aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio3
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu1
-rw-r--r--Documentation/admin-guide/hw-vuln/index.rst1
-rw-r--r--Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst149
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt38
-rw-r--r--Documentation/arm64/silicon-errata.txt1
-rw-r--r--Documentation/device-mapper/dm-integrity.txt7
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt6
-rw-r--r--Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt2
-rw-r--r--Documentation/devicetree/bindings/net/btusb.txt2
-rw-r--r--Documentation/devicetree/bindings/net/nfc/nxp-nci.txt2
-rw-r--r--Documentation/devicetree/bindings/net/nfc/pn544.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/wm8994.txt18
-rw-r--r--Documentation/devicetree/bindings/usb/dwc3.txt2
-rw-r--r--Documentation/driver-api/libata.rst2
-rw-r--r--Documentation/driver-api/mtdnand.rst4
-rw-r--r--Documentation/filesystems/affs.txt16
-rw-r--r--Documentation/filesystems/fscrypt.rst12
-rw-r--r--Documentation/filesystems/seq_file.txt6
-rw-r--r--Documentation/filesystems/sysfs.txt8
-rw-r--r--Documentation/kbuild/llvm.rst87
-rw-r--r--Documentation/media/uapi/v4l/colorspaces-defs.rst9
-rw-r--r--Documentation/media/uapi/v4l/colorspaces-details.rst5
-rw-r--r--Documentation/networking/ip-sysctl.txt4
-rw-r--r--Documentation/sound/hd-audio/index.rst1
-rw-r--r--Documentation/sound/hd-audio/models.rst2
-rw-r--r--Documentation/sound/hd-audio/realtek-pc-beep.rst129
-rwxr-xr-xDocumentation/sphinx/parse-headers.pl2
-rwxr-xr-xDocumentation/target/tcm_mod_builder.py2
-rw-r--r--Documentation/trace/postprocess/decode_msr.py2
-rw-r--r--Documentation/trace/postprocess/trace-pagealloc-postprocess.pl2
-rw-r--r--Documentation/trace/postprocess/trace-vmscan-postprocess.pl2
-rw-r--r--Documentation/virtual/kvm/api.txt2
-rw-r--r--MAINTAINERS9
-rw-r--r--Makefile87
-rw-r--r--arch/Kconfig7
-rw-r--r--arch/alpha/defconfig1
-rw-r--r--arch/alpha/include/asm/io.h82
-rw-r--r--arch/alpha/include/asm/uaccess.h8
-rw-r--r--arch/alpha/kernel/io.c60
-rw-r--r--arch/arc/Makefile9
-rw-r--r--arch/arc/boot/dts/hsdk.dts6
-rw-r--r--arch/arc/include/asm/elf.h2
-rw-r--r--arch/arc/include/asm/page.h1
-rw-r--r--arch/arc/kernel/entry.S4
-rw-r--r--arch/arc/kernel/setup.c5
-rw-r--r--arch/arc/kernel/signal.c4
-rw-r--r--arch/arc/kernel/stacktrace.c30
-rw-r--r--arch/arc/plat-eznps/Kconfig1
-rw-r--r--arch/arc/plat-eznps/include/plat/ctop.h1
-rw-r--r--arch/arc/plat-hsdk/Kconfig1
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/boot/compressed/Makefile8
-rw-r--r--arch/arm/boot/compressed/head.S4
-rw-r--r--arch/arm/boot/compressed/vmlinux.lds.S2
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi3
-rw-r--r--arch/arm/boot/dts/armada-385-turris-omnia.dts1
-rw-r--r--arch/arm/boot/dts/armada-388-helios4.dts28
-rw-r--r--arch/arm/boot/dts/armada-xp-98dx3236.dtsi5
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts4
-rw-r--r--arch/arm/boot/dts/aspeed-g4.dtsi1
-rw-r--r--arch/arm/boot/dts/aspeed-g5.dtsi1
-rw-r--r--arch/arm/boot/dts/at91-sama5d27_som1.dtsi4
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts4
-rw-r--r--arch/arm/boot/dts/at91-sama5d3_xplained.dts7
-rw-r--r--arch/arm/boot/dts/at91-sama5d4_xplained.dts7
-rw-r--r--arch/arm/boot/dts/at91sam9rl.dtsi19
-rw-r--r--arch/arm/boot/dts/bcm-hr2.dtsi8
-rw-r--r--arch/arm/boot/dts/bcm-nsp.dtsi8
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-zero-w.dts2
-rw-r--r--arch/arm/boot/dts/bcm283x.dtsi1
-rw-r--r--arch/arm/boot/dts/bcm5301x.dtsi2
-rw-r--r--arch/arm/boot/dts/dra7.dtsi4
-rw-r--r--arch/arm/boot/dts/dra76x.dtsi4
-rw-r--r--arch/arm/boot/dts/exynos3250-artik5.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos3250-monk.dts2
-rw-r--r--arch/arm/boot/dts/exynos3250-rinato.dts2
-rw-r--r--arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos4412-midas.dtsi6
-rw-r--r--arch/arm/boot/dts/exynos4412-odroid-common.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos5250-smdk5250.dts2
-rw-r--r--arch/arm/boot/dts/exynos5250-snow-common.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos5250-spring.dts2
-rw-r--r--arch/arm/boot/dts/exynos5410-odroidxu.dts6
-rw-r--r--arch/arm/boot/dts/exynos5410-pinctrl.dtsi28
-rw-r--r--arch/arm/boot/dts/exynos5410.dtsi4
-rw-r--r--arch/arm/boot/dts/exynos5420-arndale-octa.dts2
-rw-r--r--arch/arm/boot/dts/exynos5422-odroid-core.dtsi2
-rw-r--r--arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts4
-rw-r--r--arch/arm/boot/dts/imx50-evk.dts2
-rw-r--r--arch/arm/boot/dts/imx6q-b450v3.dts7
-rw-r--r--arch/arm/boot/dts/imx6q-b650v3.dts7
-rw-r--r--arch/arm/boot/dts/imx6q-b850v3.dts11
-rw-r--r--arch/arm/boot/dts/imx6q-bx50v3.dtsi15
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw52xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-udoo.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi5
-rw-r--r--arch/arm/boot/dts/imx6qp.dtsi1
-rw-r--r--arch/arm/boot/dts/imx6sl.dtsi2
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi2
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi2
-rw-r--r--arch/arm/boot/dts/lpc32xx.dtsi3
-rw-r--r--arch/arm/boot/dts/ls1021a.dtsi4
-rw-r--r--arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi4
-rw-r--r--arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts1
-rw-r--r--arch/arm/boot/dts/omap3.dtsi3
-rw-r--r--arch/arm/boot/dts/omap4-duovero-parlor.dts2
-rw-r--r--arch/arm/boot/dts/omap4-panda-es.dts2
-rw-r--r--arch/arm/boot/dts/omap4.dtsi7
-rw-r--r--arch/arm/boot/dts/omap443x.dtsi12
-rw-r--r--arch/arm/boot/dts/omap44xx-clocks.dtsi8
-rw-r--r--arch/arm/boot/dts/omap5.dtsi5
-rw-r--r--arch/arm/boot/dts/owl-s500.dtsi6
-rw-r--r--arch/arm/boot/dts/picoxcell-pc3x2.dtsi4
-rw-r--r--arch/arm/boot/dts/r8a73a4.dtsi9
-rw-r--r--arch/arm/boot/dts/r8a7740.dtsi2
-rw-r--r--arch/arm/boot/dts/r8a7793-gose.dts4
-rw-r--r--arch/arm/boot/dts/rk3036.dtsi2
-rw-r--r--arch/arm/boot/dts/rk3228-evb.dts2
-rw-r--r--arch/arm/boot/dts/rk322x.dtsi6
-rw-r--r--arch/arm/boot/dts/rk3xxx.dtsi2
-rw-r--r--arch/arm/boot/dts/s5pv210-aries.dtsi1
-rw-r--r--arch/arm/boot/dts/s5pv210.dtsi127
-rw-r--r--arch/arm/boot/dts/sama5d2.dtsi7
-rw-r--r--arch/arm/boot/dts/socfpga.dtsi2
-rw-r--r--arch/arm/boot/dts/socfpga_arria10.dtsi4
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi2
-rw-r--r--arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts2
-rw-r--r--arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts2
-rw-r--r--arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts4
-rw-r--r--arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts2
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts5
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts2
-rw-r--r--arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts12
-rw-r--r--arch/arm/boot/dts/sun8i-v3s.dtsi2
-rw-r--r--arch/arm/boot/dts/uniphier-pxs2.dtsi2
-rw-r--r--arch/arm/boot/dts/vfxxx.dtsi2
-rw-r--r--arch/arm/configs/rpc_defconfig1
-rw-r--r--arch/arm/configs/s3c2410_defconfig1
-rw-r--r--arch/arm/include/asm/assembler.h83
-rw-r--r--arch/arm/include/asm/futex.h9
-rw-r--r--arch/arm/include/asm/kexec-internal.h12
-rw-r--r--arch/arm/include/asm/kprobes.h22
-rw-r--r--arch/arm/include/asm/kvm_emulate.h11
-rw-r--r--arch/arm/include/asm/kvm_host.h5
-rw-r--r--arch/arm/include/asm/percpu.h2
-rw-r--r--arch/arm/include/asm/uaccess-asm.h117
-rw-r--r--arch/arm/include/asm/vfpmacros.h8
-rw-r--r--arch/arm/kernel/asm-offsets.c8
-rw-r--r--arch/arm/kernel/entry-armv.S11
-rw-r--r--arch/arm/kernel/entry-header.S9
-rw-r--r--arch/arm/kernel/head.S6
-rw-r--r--arch/arm/kernel/hw_breakpoint.c123
-rw-r--r--arch/arm/kernel/machine_kexec.c20
-rw-r--r--arch/arm/kernel/ptrace.c4
-rw-r--r--arch/arm/kernel/relocate_kernel.S38
-rw-r--r--arch/arm/kernel/signal.c14
-rw-r--r--arch/arm/kernel/smccc-call.S11
-rw-r--r--arch/arm/kernel/stacktrace.c26
-rw-r--r--arch/arm/kernel/suspend.c19
-rw-r--r--arch/arm/kernel/traps.c6
-rw-r--r--arch/arm/lib/bitops.h8
-rw-r--r--arch/arm/mach-at91/pm.c11
-rw-r--r--arch/arm/mach-at91/pm_suspend.S4
-rw-r--r--arch/arm/mach-footbridge/cats-pci.c4
-rw-r--r--arch/arm/mach-footbridge/dc21285.c12
-rw-r--r--arch/arm/mach-footbridge/ebsa285-pci.c4
-rw-r--r--arch/arm/mach-footbridge/netwinder-pci.c2
-rw-r--r--arch/arm/mach-footbridge/personal-pci.c5
-rw-r--r--arch/arm/mach-imx/Makefile2
-rw-r--r--arch/arm/mach-imx/pm-imx5.c6
-rw-r--r--arch/arm/mach-imx/pm-imx6.c10
-rw-r--r--arch/arm/mach-imx/suspend-imx6.S1
-rw-r--r--arch/arm/mach-integrator/Kconfig7
-rw-r--r--arch/arm/mach-keystone/keystone.c4
-rw-r--r--arch/arm/mach-omap2/omap_device.c8
-rw-r--r--arch/arm/mach-shmobile/pm-rmobile.c1
-rw-r--r--arch/arm/mach-socfpga/pm.c8
-rw-r--r--arch/arm/mach-sunxi/sunxi.c1
-rw-r--r--arch/arm/mach-tegra/tegra.c4
-rw-r--r--arch/arm/mm/cache-l2x0.c16
-rw-r--r--arch/arm/mm/proc-macros.S3
-rw-r--r--arch/arm/net/bpf_jit_32.c52
-rw-r--r--arch/arm/plat-samsung/Kconfig1
-rw-r--r--arch/arm/probes/kprobes/opt-arm.c18
-rw-r--r--arch/arm/probes/uprobes/core.c4
-rw-r--r--arch/arm/vdso/Makefile22
-rw-r--r--arch/arm/xen/enlighten.c2
-rw-r--r--arch/arm/xen/p2m.c33
-rw-r--r--arch/arm64/Kconfig18
-rw-r--r--arch/arm64/Kconfig.platforms1
-rw-r--r--arch/arm64/Makefile4
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts5
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi1
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi6
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi3
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts1
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl.dtsi5
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts2
-rw-r--r--arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi2
-rw-r--r--arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi2
-rw-r--r--arch/arm64/boot/dts/exynos/exynos7-espresso.dts3
-rw-r--r--arch/arm64/boot/dts/exynos/exynos7.dtsi12
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi3
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts11
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts18
-rw-r--r--arch/arm64/boot/dts/marvell/armada-37xx.dtsi3
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173.dtsi24
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-pins.dtsi12
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916.dtsi8
-rw-r--r--arch/arm64/boot/dts/qcom/pm8916.dtsi2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77980.dtsi2
-rw-r--r--arch/arm64/boot/dts/renesas/ulcb.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-evb.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-rock64.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328.dtsi16
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi17
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi2
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi4
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi4
-rw-r--r--arch/arm64/crypto/aes-modes.S16
-rw-r--r--arch/arm64/crypto/sha1-ce-glue.c1
-rw-r--r--arch/arm64/crypto/sha2-ce-glue.c2
-rw-r--r--arch/arm64/crypto/sha3-ce-glue.c4
-rw-r--r--arch/arm64/crypto/sha512-ce-glue.c2
-rw-r--r--arch/arm64/include/asm/alternative.h24
-rw-r--r--arch/arm64/include/asm/atomic_ll_sc.h108
-rw-r--r--arch/arm64/include/asm/atomic_lse.h122
-rw-r--r--arch/arm64/include/asm/cache.h3
-rw-r--r--arch/arm64/include/asm/cacheflush.h6
-rw-r--r--arch/arm64/include/asm/checksum.h5
-rw-r--r--arch/arm64/include/asm/cmpxchg.h116
-rw-r--r--arch/arm64/include/asm/cpucaps.h3
-rw-r--r--arch/arm64/include/asm/cputype.h2
-rw-r--r--arch/arm64/include/asm/debug-monitors.h2
-rw-r--r--arch/arm64/include/asm/kvm_arm.h4
-rw-r--r--arch/arm64/include/asm/kvm_asm.h43
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h9
-rw-r--r--arch/arm64/include/asm/kvm_host.h13
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h3
-rw-r--r--arch/arm64/include/asm/lse.h6
-rw-r--r--arch/arm64/include/asm/numa.h3
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h34
-rw-r--r--arch/arm64/include/asm/syscall.h12
-rw-r--r--arch/arm64/include/asm/sysreg.h4
-rw-r--r--arch/arm64/include/asm/thread_info.h1
-rw-r--r--arch/arm64/include/asm/word-at-a-time.h10
-rw-r--r--arch/arm64/kernel/alternative.c16
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c2
-rw-r--r--arch/arm64/kernel/cpu_errata.c30
-rw-r--r--arch/arm64/kernel/cpufeature.c15
-rw-r--r--arch/arm64/kernel/crash_dump.c2
-rw-r--r--arch/arm64/kernel/debug-monitors.c24
-rw-r--r--arch/arm64/kernel/fpsimd.c6
-rw-r--r--arch/arm64/kernel/head.S1
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c44
-rw-r--r--arch/arm64/kernel/insn.c14
-rw-r--r--arch/arm64/kernel/kgdb.c2
-rw-r--r--arch/arm64/kernel/machine_kexec.c1
-rw-r--r--arch/arm64/kernel/module.lds6
-rw-r--r--arch/arm64/kernel/perf_regs.c25
-rw-r--r--arch/arm64/kernel/probes/uprobes.c2
-rw-r--r--arch/arm64/kernel/psci.c5
-rw-r--r--arch/arm64/kernel/ptrace.c29
-rw-r--r--arch/arm64/kernel/signal.c11
-rw-r--r--arch/arm64/kernel/sys_compat.c11
-rw-r--r--arch/arm64/kernel/syscall.c7
-rw-r--r--arch/arm64/kernel/topology.c32
-rw-r--r--arch/arm64/kernel/traps.c9
-rw-r--r--arch/arm64/kernel/vdso/gettimeofday.S6
-rw-r--r--arch/arm64/kernel/vdso/vdso.lds.S8
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S11
-rw-r--r--arch/arm64/kvm/debug.c86
-rw-r--r--arch/arm64/kvm/guest.c7
-rw-r--r--arch/arm64/kvm/hyp-init.S11
-rw-r--r--arch/arm64/kvm/hyp/debug-sr.c24
-rw-r--r--arch/arm64/kvm/hyp/entry.S31
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S63
-rw-r--r--arch/arm64/kvm/hyp/switch.c47
-rw-r--r--arch/arm64/kvm/sys_regs.c11
-rw-r--r--arch/arm64/lib/memcpy.S3
-rw-r--r--arch/arm64/lib/memmove.S3
-rw-r--r--arch/arm64/lib/memset.S3
-rw-r--r--arch/arm64/mm/hugetlbpage.c2
-rw-r--r--arch/arm64/mm/numa.c6
-rw-r--r--arch/h8300/kernel/asm-offsets.c3
-rw-r--r--arch/hexagon/include/asm/io.h12
-rw-r--r--arch/hexagon/kernel/hexagon_ksyms.c2
-rw-r--r--arch/hexagon/mm/ioremap.c2
-rw-r--r--arch/ia64/configs/zx1_defconfig1
-rw-r--r--arch/ia64/include/asm/module.h6
-rw-r--r--arch/ia64/include/asm/ptrace.h8
-rw-r--r--arch/ia64/include/asm/syscall.h2
-rw-r--r--arch/ia64/kernel/Makefile2
-rw-r--r--arch/ia64/kernel/err_inject.c22
-rw-r--r--arch/ia64/kernel/kprobes.c77
-rw-r--r--arch/ia64/kernel/mca.c2
-rw-r--r--arch/ia64/kernel/module.c29
-rw-r--r--arch/ia64/kernel/ptrace.c24
-rw-r--r--arch/ia64/mm/discontig.c6
-rw-r--r--arch/ia64/mm/init.c6
-rw-r--r--arch/ia64/scripts/unwcheck.py2
-rw-r--r--arch/m68k/coldfire/pci.c4
-rw-r--r--arch/m68k/configs/amiga_defconfig1
-rw-r--r--arch/m68k/configs/apollo_defconfig1
-rw-r--r--arch/m68k/configs/atari_defconfig1
-rw-r--r--arch/m68k/configs/bvme6000_defconfig1
-rw-r--r--arch/m68k/configs/hp300_defconfig1
-rw-r--r--arch/m68k/configs/mac_defconfig1
-rw-r--r--arch/m68k/configs/multi_defconfig1
-rw-r--r--arch/m68k/configs/mvme147_defconfig1
-rw-r--r--arch/m68k/configs/mvme16x_defconfig1
-rw-r--r--arch/m68k/configs/q40_defconfig1
-rw-r--r--arch/m68k/configs/sun3_defconfig1
-rw-r--r--arch/m68k/configs/sun3x_defconfig1
-rw-r--r--arch/m68k/include/asm/m53xxacr.h6
-rw-r--r--arch/m68k/include/asm/mac_via.h1
-rw-r--r--arch/m68k/kernel/setup_no.c3
-rw-r--r--arch/m68k/mac/config.c21
-rw-r--r--arch/m68k/mac/iop.c21
-rw-r--r--arch/m68k/mac/via.c6
-rw-r--r--arch/m68k/mm/mcfmmu.c2
-rw-r--r--arch/m68k/q40/config.c1
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/Makefile13
-rw-r--r--arch/mips/alchemy/board-xxs1500.c1
-rw-r--r--arch/mips/alchemy/common/clock.c9
-rw-r--r--arch/mips/bcm47xx/Kconfig1
-rw-r--r--arch/mips/boot/compressed/Makefile2
-rw-r--r--arch/mips/boot/compressed/decompress.c3
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c3
-rw-r--r--arch/mips/cavium-octeon/octeon-usb.c5
-rw-r--r--arch/mips/configs/bigsur_defconfig1
-rw-r--r--arch/mips/configs/fuloong2e_defconfig1
-rw-r--r--arch/mips/configs/ip27_defconfig1
-rw-r--r--arch/mips/configs/ip32_defconfig1
-rw-r--r--arch/mips/configs/jazz_defconfig1
-rw-r--r--arch/mips/configs/loongson3_defconfig2
-rw-r--r--arch/mips/configs/malta_defconfig1
-rw-r--r--arch/mips/configs/malta_kvm_defconfig1
-rw-r--r--arch/mips/configs/malta_kvm_guest_defconfig1
-rw-r--r--arch/mips/configs/maltaup_xpa_defconfig1
-rw-r--r--arch/mips/configs/rm200_defconfig1
-rw-r--r--arch/mips/include/asm/cpu-type.h1
-rw-r--r--arch/mips/include/asm/div64.h55
-rw-r--r--arch/mips/include/asm/kvm_host.h8
-rw-r--r--arch/mips/include/asm/mipsregs.h2
-rw-r--r--arch/mips/kernel/genex.S6
-rw-r--r--arch/mips/kernel/mips-cm.c6
-rw-r--r--arch/mips/kernel/relocate.c10
-rw-r--r--arch/mips/kernel/setup.c10
-rw-r--r--arch/mips/kernel/smp-bmips.c2
-rw-r--r--arch/mips/kernel/time.c65
-rw-r--r--arch/mips/kernel/topology.c2
-rw-r--r--arch/mips/kernel/traps.c1
-rw-r--r--arch/mips/kernel/vmlinux.lds.S3
-rw-r--r--arch/mips/kvm/mips.c2
-rw-r--r--arch/mips/kvm/mmu.c3
-rw-r--r--arch/mips/lantiq/irq.c2
-rw-r--r--arch/mips/mm/c-r4k.c6
-rw-r--r--arch/mips/mm/tlb-r4k.c1
-rw-r--r--arch/mips/mm/tlbex.c5
-rw-r--r--arch/mips/pci/pci-legacy.c9
-rw-r--r--arch/mips/pci/pci-mt7620.c5
-rw-r--r--arch/mips/pci/pci-rt2880.c37
-rw-r--r--arch/mips/ralink/of.c2
-rw-r--r--arch/mips/sni/a20r.c9
-rw-r--r--arch/mips/vdso/Makefile5
-rw-r--r--arch/mips/vdso/genvdso.c10
-rw-r--r--arch/mips/vdso/gettimeofday.c14
-rw-r--r--arch/nds32/mm/cacheflush.c2
-rw-r--r--arch/openrisc/include/asm/barrier.h9
-rw-r--r--arch/openrisc/include/asm/uaccess.h8
-rw-r--r--arch/openrisc/kernel/entry.S4
-rw-r--r--arch/openrisc/kernel/setup.c2
-rw-r--r--arch/openrisc/kernel/stacktrace.c18
-rw-r--r--arch/openrisc/mm/cache.c2
-rw-r--r--arch/parisc/include/asm/atomic.h2
-rw-r--r--arch/parisc/include/asm/barrier.h61
-rw-r--r--arch/parisc/include/asm/cmpxchg.h2
-rw-r--r--arch/parisc/kernel/irq.c4
-rw-r--r--arch/parisc/lib/bitops.c12
-rw-r--r--arch/parisc/mm/init.c2
-rw-r--r--arch/powerpc/Kconfig20
-rw-r--r--arch/powerpc/Kconfig.debug1
-rw-r--r--arch/powerpc/Makefile1
-rw-r--r--arch/powerpc/boot/Makefile2
-rw-r--r--arch/powerpc/boot/serial.c2
-rw-r--r--arch/powerpc/configs/85xx-hw.config1
-rw-r--r--arch/powerpc/configs/amigaone_defconfig1
-rw-r--r--arch/powerpc/configs/chrp32_defconfig1
-rw-r--r--arch/powerpc/configs/g5_defconfig1
-rw-r--r--arch/powerpc/configs/maple_defconfig1
-rw-r--r--arch/powerpc/configs/pasemi_defconfig2
-rw-r--r--arch/powerpc/configs/pmac32_defconfig1
-rw-r--r--arch/powerpc/configs/powernv_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64e_defconfig1
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig2
-rw-r--r--arch/powerpc/configs/pseries_defconfig1
-rw-r--r--arch/powerpc/configs/skiroot_defconfig1
-rw-r--r--arch/powerpc/include/asm/bitops.h23
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h4
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-4k.h6
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-64k.h8
-rw-r--r--arch/powerpc/include/asm/book3s/64/kup-radix.h22
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h27
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h11
-rw-r--r--arch/powerpc/include/asm/code-patching.h2
-rw-r--r--arch/powerpc/include/asm/cpu_has_feature.h4
-rw-r--r--arch/powerpc/include/asm/cputable.h5
-rw-r--r--arch/powerpc/include/asm/dcr-native.h8
-rw-r--r--arch/powerpc/include/asm/drmem.h24
-rw-r--r--arch/powerpc/include/asm/exception-64s.h9
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h19
-rw-r--r--arch/powerpc/include/asm/futex.h4
-rw-r--r--arch/powerpc/include/asm/kup.h40
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h3
-rw-r--r--arch/powerpc/include/asm/kvm_host.h3
-rw-r--r--arch/powerpc/include/asm/machdep.h3
-rw-r--r--arch/powerpc/include/asm/mmu_context.h2
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h4
-rw-r--r--arch/powerpc/include/asm/percpu.h4
-rw-r--r--arch/powerpc/include/asm/processor.h1
-rw-r--r--arch/powerpc/include/asm/reg.h2
-rw-r--r--arch/powerpc/include/asm/security_features.h7
-rw-r--r--arch/powerpc/include/asm/setjmp.h6
-rw-r--r--arch/powerpc/include/asm/setup.h4
-rw-r--r--arch/powerpc/include/asm/tlb.h13
-rw-r--r--arch/powerpc/include/asm/uaccess.h147
-rw-r--r--arch/powerpc/include/uapi/asm/errno.h1
-rw-r--r--arch/powerpc/kernel/Makefile3
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S2
-rw-r--r--arch/powerpc/kernel/dma-iommu.c3
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c8
-rw-r--r--arch/powerpc/kernel/eeh.c13
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S96
-rw-r--r--arch/powerpc/kernel/head_64.S17
-rw-r--r--arch/powerpc/kernel/head_8xx.S10
-rw-r--r--arch/powerpc/kernel/idle_book3s.S27
-rw-r--r--arch/powerpc/kernel/iommu.c4
-rw-r--r--arch/powerpc/kernel/kprobes.c3
-rw-r--r--arch/powerpc/kernel/machine_kexec.c8
-rw-r--r--arch/powerpc/kernel/pci-common.c10
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c12
-rw-r--r--arch/powerpc/kernel/prom.c21
-rw-r--r--arch/powerpc/kernel/rtas.c153
-rw-r--r--arch/powerpc/kernel/setup-common.c4
-rw-r--r--arch/powerpc/kernel/setup_64.c124
-rw-r--r--arch/powerpc/kernel/signal_64.c4
-rw-r--r--arch/powerpc/kernel/smp.c6
-rw-r--r--arch/powerpc/kernel/sysfs.c42
-rw-r--r--arch/powerpc/kernel/tau_6xx.c147
-rw-r--r--arch/powerpc/kernel/time.c44
-rw-r--r--arch/powerpc/kernel/traps.c13
-rw-r--r--arch/powerpc/kernel/vdso.c2
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S15
-rw-r--r--arch/powerpc/kvm/book3s.c3
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c16
-rw-r--r--arch/powerpc/kvm/book3s_hv_tm.c28
-rw-r--r--arch/powerpc/kvm/book3s_hv_tm_builtin.c16
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c3
-rw-r--r--arch/powerpc/kvm/powerpc.c8
-rw-r--r--arch/powerpc/lib/checksum_wrappers.c4
-rw-r--r--arch/powerpc/lib/feature-fixups.c137
-rw-r--r--arch/powerpc/lib/string_32.S4
-rw-r--r--arch/powerpc/lib/string_64.S6
-rw-r--r--arch/powerpc/mm/fault.c7
-rw-r--r--arch/powerpc/mm/pgtable-radix.c4
-rw-r--r--arch/powerpc/mm/pkeys.c28
-rw-r--r--arch/powerpc/mm/tlb-radix.c23
-rw-r--r--arch/powerpc/mm/tlb_nohash_low.S12
-rw-r--r--arch/powerpc/perf/core-book3s.c27
-rw-r--r--arch/powerpc/perf/hv-24x7.c10
-rw-r--r--arch/powerpc/perf/hv-gpci-requests.h6
-rw-r--r--arch/powerpc/perf/isa207-common.c14
-rw-r--r--arch/powerpc/platforms/4xx/pci.c4
-rw-r--r--arch/powerpc/platforms/52xx/lite5200_sleep.S2
-rw-r--r--arch/powerpc/platforms/Kconfig14
-rw-r--r--arch/powerpc/platforms/cell/Kconfig1
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c113
-rw-r--r--arch/powerpc/platforms/maple/setup.c34
-rw-r--r--arch/powerpc/platforms/powernv/memtrace.c44
-rw-r--r--arch/powerpc/platforms/powernv/opal-dump.c41
-rw-r--r--arch/powerpc/platforms/powernv/opal-elog.c33
-rw-r--r--arch/powerpc/platforms/powernv/opal-imc.c39
-rw-r--r--arch/powerpc/platforms/powernv/setup.c17
-rw-r--r--arch/powerpc/platforms/powernv/smp.c3
-rw-r--r--arch/powerpc/platforms/ps3/mm.c22
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c7
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c3
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c10
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c2
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c4
-rw-r--r--arch/powerpc/platforms/pseries/ras.c6
-rw-r--r--arch/powerpc/platforms/pseries/rng.c1
-rw-r--r--arch/powerpc/platforms/pseries/setup.c8
-rw-r--r--arch/powerpc/platforms/pseries/suspend.c4
-rw-r--r--arch/powerpc/sysdev/mpic_msgr.c2
-rw-r--r--arch/powerpc/sysdev/xics/icp-hv.c1
-rw-r--r--arch/powerpc/sysdev/xive/common.c17
-rw-r--r--arch/powerpc/sysdev/xive/native.c6
-rw-r--r--arch/powerpc/sysdev/xive/spapr.c4
-rw-r--r--arch/powerpc/sysdev/xive/xive-internal.h7
-rw-r--r--arch/powerpc/xmon/Makefile3
-rw-r--r--arch/powerpc/xmon/nonstdio.c2
-rw-r--r--arch/riscv/include/asm/barrier.h10
-rw-r--r--arch/riscv/include/asm/cmpxchg.h8
-rw-r--r--arch/riscv/include/asm/ftrace.h21
-rw-r--r--arch/riscv/include/asm/page.h5
-rw-r--r--arch/riscv/include/asm/thread_info.h4
-rw-r--r--arch/riscv/include/uapi/asm/auxvec.h3
-rw-r--r--arch/riscv/kernel/entry.S1
-rw-r--r--arch/riscv/kernel/ftrace.c19
-rw-r--r--arch/riscv/kernel/mcount.S10
-rw-r--r--arch/riscv/kernel/setup.c2
-rw-r--r--arch/riscv/kernel/stacktrace.c2
-rw-r--r--arch/riscv/kernel/sys_riscv.c6
-rw-r--r--arch/riscv/kernel/time.c3
-rw-r--r--arch/riscv/kernel/vdso/Makefile6
-rw-r--r--arch/s390/crypto/arch_random.c4
-rw-r--r--arch/s390/include/asm/kvm_host.h8
-rw-r--r--arch/s390/include/asm/percpu.h28
-rw-r--r--arch/s390/include/asm/syscall.h12
-rw-r--r--arch/s390/include/asm/vdso.h1
-rw-r--r--arch/s390/kernel/asm-offsets.c2
-rw-r--r--arch/s390/kernel/cpcmd.c6
-rw-r--r--arch/s390/kernel/debug.c3
-rw-r--r--arch/s390/kernel/diag.c4
-rw-r--r--arch/s390/kernel/dis.c2
-rw-r--r--arch/s390/kernel/early.c2
-rw-r--r--arch/s390/kernel/entry.S1
-rw-r--r--arch/s390/kernel/mcount.S1
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c12
-rw-r--r--arch/s390/kernel/processor.c5
-rw-r--r--arch/s390/kernel/ptrace.c38
-rw-r--r--arch/s390/kernel/runtime_instr.c2
-rw-r--r--arch/s390/kernel/setup.c6
-rw-r--r--arch/s390/kernel/smp.c27
-rw-r--r--arch/s390/kernel/time.c119
-rw-r--r--arch/s390/kernel/trace.c2
-rw-r--r--arch/s390/kernel/vdso64/clock_getres.S10
-rw-r--r--arch/s390/kvm/gaccess.h54
-rw-r--r--arch/s390/kvm/kvm-s390.c7
-rw-r--r--arch/s390/kvm/vsie.c1
-rw-r--r--arch/s390/lib/uaccess.c4
-rw-r--r--arch/s390/mm/gmap.c7
-rw-r--r--arch/s390/mm/hugetlbpage.c11
-rw-r--r--arch/s390/mm/pgalloc.c16
-rw-r--r--arch/s390/purgatory/head.S9
-rw-r--r--arch/sh/boards/mach-landisk/setup.c3
-rw-r--r--arch/sh/configs/sh03_defconfig1
-rw-r--r--arch/sh/drivers/dma/Kconfig3
-rw-r--r--arch/sh/include/asm/uaccess.h7
-rw-r--r--arch/sh/kernel/entry-common.S6
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/configs/sparc64_defconfig1
-rw-r--r--arch/sparc/include/asm/mman.h54
-rw-r--r--arch/sparc/kernel/ptrace_32.c233
-rw-r--r--arch/sparc/kernel/ptrace_64.c17
-rw-r--r--arch/sparc/kernel/smp_64.c65
-rw-r--r--arch/sparc/kernel/traps_64.c13
-rw-r--r--arch/sparc/lib/memset.S1
-rw-r--r--arch/sparc/mm/init_32.c3
-rw-r--r--arch/um/drivers/chan_user.c4
-rw-r--r--arch/um/drivers/ubd_kern.c6
-rw-r--r--arch/um/drivers/xterm.c5
-rw-r--r--arch/um/kernel/dyn.lds.S6
-rw-r--r--arch/um/kernel/sigio.c6
-rw-r--r--arch/um/kernel/uml.lds.S6
-rw-r--r--arch/um/os-Linux/irq.c2
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/Makefile6
-rw-r--r--arch/x86/boot/Makefile2
-rw-r--r--arch/x86/boot/compressed/Makefile4
-rw-r--r--arch/x86/boot/compressed/head_32.S7
-rw-r--r--arch/x86/boot/compressed/head_64.S5
-rw-r--r--arch/x86/configs/i386_defconfig3
-rw-r--r--arch/x86/configs/x86_64_defconfig3
-rw-r--r--arch/x86/crypto/aes_ctrby8_avx-x86_64.S14
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S6
-rw-r--r--arch/x86/crypto/crc32c-pcl-intel-asm_64.S2
-rw-r--r--arch/x86/entry/calling.h40
-rw-r--r--arch/x86/entry/entry_32.S1
-rw-r--r--arch/x86/entry/entry_64.S9
-rw-r--r--arch/x86/entry/vdso/vma.c2
-rw-r--r--arch/x86/events/amd/ibs.c53
-rw-r--r--arch/x86/events/amd/iommu.c8
-rw-r--r--arch/x86/events/intel/cstate.c6
-rw-r--r--arch/x86/events/intel/ds.c2
-rw-r--r--arch/x86/events/intel/rapl.c14
-rw-r--r--arch/x86/events/intel/uncore.c4
-rw-r--r--arch/x86/events/intel/uncore.h12
-rw-r--r--arch/x86/events/intel/uncore_snbep.c61
-rw-r--r--arch/x86/hyperv/hv_init.c6
-rw-r--r--arch/x86/hyperv/mmu.c12
-rw-r--r--arch/x86/include/asm/apic.h11
-rw-r--r--arch/x86/include/asm/barrier.h18
-rw-r--r--arch/x86/include/asm/cpu_device_id.h27
-rw-r--r--arch/x86/include/asm/cpufeatures.h3
-rw-r--r--arch/x86/include/asm/dma.h2
-rw-r--r--arch/x86/include/asm/insn.h15
-rw-r--r--arch/x86/include/asm/kvm_host.h12
-rw-r--r--arch/x86/include/asm/microcode_amd.h2
-rw-r--r--arch/x86/include/asm/mshyperv.h2
-rw-r--r--arch/x86/include/asm/msr-index.h5
-rw-r--r--arch/x86/include/asm/msr.h4
-rw-r--r--arch/x86/include/asm/nospec-branch.h5
-rw-r--r--arch/x86/include/asm/pgtable.h8
-rw-r--r--arch/x86/include/asm/pgtable_types.h3
-rw-r--r--arch/x86/include/asm/pkeys.h5
-rw-r--r--arch/x86/include/asm/processor.h11
-rw-r--r--arch/x86/include/asm/set_memory.h19
-rw-r--r--arch/x86/include/asm/stackprotector.h7
-rw-r--r--arch/x86/include/asm/sync_core.h9
-rw-r--r--arch/x86/include/asm/thread_info.h23
-rw-r--r--arch/x86/include/asm/uaccess.h12
-rw-r--r--arch/x86/include/asm/unwind.h2
-rw-r--r--arch/x86/kernel/acpi/boot.c27
-rw-r--r--arch/x86/kernel/acpi/cstate.c3
-rw-r--r--arch/x86/kernel/amd_nb.c15
-rw-r--r--arch/x86/kernel/apic/apic.c50
-rw-r--r--arch/x86/kernel/apic/io_apic.c26
-rw-r--r--arch/x86/kernel/apic/msi.c18
-rw-r--r--arch/x86/kernel/apic/vector.c51
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c6
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c15
-rw-r--r--arch/x86/kernel/cpu/amd.c3
-rw-r--r--arch/x86/kernel/cpu/bugs.c214
-rw-r--r--arch/x86/kernel/cpu/common.c54
-rw-r--r--arch/x86/kernel/cpu/cpu.h1
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.c2
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.h3
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_monitor.c7
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c191
-rw-r--r--arch/x86/kernel/cpu/match.c7
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-inject.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c11
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c8
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c63
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c14
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c6
-rw-r--r--arch/x86/kernel/crash.c2
-rw-r--r--arch/x86/kernel/fpu/init.c30
-rw-r--r--arch/x86/kernel/fpu/xstate.c95
-rw-r--r--arch/x86/kernel/i8259.c2
-rw-r--r--arch/x86/kernel/idt.c6
-rw-r--r--arch/x86/kernel/kexec-bzimage64.c3
-rw-r--r--arch/x86/kernel/kprobes/core.c38
-rw-r--r--arch/x86/kernel/module.c1
-rw-r--r--arch/x86/kernel/nmi.c5
-rw-r--r--arch/x86/kernel/process.c28
-rw-r--r--arch/x86/kernel/process.h2
-rw-r--r--arch/x86/kernel/ptrace.c2
-rw-r--r--arch/x86/kernel/reboot.c46
-rw-r--r--arch/x86/kernel/setup.c7
-rw-r--r--arch/x86/kernel/signal.c24
-rw-r--r--arch/x86/kernel/smpboot.c8
-rw-r--r--arch/x86/kernel/time.c4
-rw-r--r--arch/x86/kernel/unwind_orc.c89
-rw-r--r--arch/x86/kernel/uprobes.c10
-rw-r--r--arch/x86/kernel/vmlinux.lds.S5
-rw-r--r--arch/x86/kvm/cpuid.c3
-rw-r--r--arch/x86/kvm/cpuid.h14
-rw-r--r--arch/x86/kvm/emulate.c10
-rw-r--r--arch/x86/kvm/irq.c85
-rw-r--r--arch/x86/kvm/kvm_cache_regs.h2
-rw-r--r--arch/x86/kvm/lapic.c4
-rw-r--r--arch/x86/kvm/mmu.c98
-rw-r--r--arch/x86/kvm/mmu.h4
-rw-r--r--arch/x86/kvm/mmutrace.h2
-rw-r--r--arch/x86/kvm/paging_tmpl.h7
-rw-r--r--arch/x86/kvm/pmu_intel.c2
-rw-r--r--arch/x86/kvm/svm.c63
-rw-r--r--arch/x86/kvm/vmx.c244
-rw-r--r--arch/x86/kvm/x86.c172
-rw-r--r--arch/x86/lib/insn-eval.c5
-rw-r--r--arch/x86/lib/memcpy_64.S6
-rw-r--r--arch/x86/lib/memmove_64.S4
-rw-r--r--arch/x86/lib/memset_64.S6
-rw-r--r--arch/x86/lib/msr-smp.c4
-rw-r--r--arch/x86/lib/usercopy_64.c3
-rw-r--r--arch/x86/math-emu/wm_sqrt.S2
-rw-r--r--arch/x86/mm/ident_map.c12
-rw-r--r--arch/x86/mm/init.c2
-rw-r--r--arch/x86/mm/mem_encrypt.c2
-rw-r--r--arch/x86/mm/mem_encrypt_identity.c4
-rw-r--r--arch/x86/mm/mmio-mod.c4
-rw-r--r--arch/x86/mm/numa_emulation.c2
-rw-r--r--arch/x86/mm/pat.c3
-rw-r--r--arch/x86/mm/pgtable.c2
-rw-r--r--arch/x86/mm/tlb.c10
-rw-r--r--arch/x86/net/bpf_jit_comp.c29
-rw-r--r--arch/x86/net/bpf_jit_comp32.c35
-rw-r--r--arch/x86/pci/fixup.c4
-rw-r--r--arch/x86/pci/intel_mid_pci.c1
-rw-r--r--arch/x86/pci/xen.c1
-rw-r--r--arch/x86/platform/efi/efi_64.c28
-rw-r--r--arch/x86/platform/uv/uv_irq.c3
-rw-r--r--arch/x86/purgatory/Makefile5
-rw-r--r--arch/x86/tools/relocs.c12
-rw-r--r--arch/x86/xen/efi.c12
-rw-r--r--arch/x86/xen/enlighten_pv.c11
-rw-r--r--arch/x86/xen/enlighten_pvh.c4
-rw-r--r--arch/x86/xen/p2m.c59
-rw-r--r--arch/x86/xen/smp_pv.c1
-rw-r--r--arch/x86/xen/spinlock.c12
-rw-r--r--arch/x86/xen/xen-ops.h4
-rw-r--r--arch/xtensa/include/asm/uaccess.h2
-rw-r--r--arch/xtensa/kernel/perf_event.c2
-rw-r--r--arch/xtensa/kernel/setup.c3
-rw-r--r--arch/xtensa/kernel/xtensa_ksyms.c4
-rw-r--r--arch/xtensa/mm/cache.c14
-rw-r--r--block/bfq-iosched.c17
-rw-r--r--block/bio-integrity.c24
-rw-r--r--block/bio.c2
-rw-r--r--block/blk-cgroup.c15
-rw-r--r--block/blk-core.c3
-rw-r--r--block/blk-ioc.c7
-rw-r--r--block/blk-mq-sched.c9
-rw-r--r--block/blk-mq-sysfs.c12
-rw-r--r--block/blk-mq.c51
-rw-r--r--block/blk-settings.c15
-rw-r--r--block/blk-sysfs.c49
-rw-r--r--block/blk-wbt.c2
-rw-r--r--block/blk.h2
-rw-r--r--block/elevator.c44
-rw-r--r--block/genhd.c42
-rw-r--r--certs/blacklist.c2
-rw-r--r--crypto/af_alg.c36
-rw-r--r--crypto/algboss.c2
-rw-r--r--crypto/algif_aead.c16
-rw-r--r--crypto/algif_hash.c9
-rw-r--r--crypto/algif_skcipher.c17
-rw-r--r--crypto/api.c2
-rw-r--r--crypto/ecdh.c12
-rw-r--r--crypto/ecdh_helper.c3
-rw-r--r--crypto/lrw.c4
-rw-r--r--crypto/tcrypt.c20
-rw-r--r--crypto/xts.c4
-rw-r--r--drivers/acpi/acpi_configfs.c7
-rw-r--r--drivers/acpi/acpi_dbg.c3
-rw-r--r--drivers/acpi/acpi_extlog.c6
-rw-r--r--drivers/acpi/acpi_pnp.c3
-rw-r--r--drivers/acpi/acpica/exprep.c4
-rw-r--r--drivers/acpi/acpica/utdelete.c6
-rw-r--r--drivers/acpi/arm64/gtdt.c10
-rw-r--r--drivers/acpi/button.c26
-rw-r--r--drivers/acpi/cppc_acpi.c15
-rw-r--r--drivers/acpi/custom_method.c4
-rw-r--r--drivers/acpi/device_pm.c47
-rw-r--r--drivers/acpi/device_sysfs.c20
-rw-r--r--drivers/acpi/ec.c16
-rw-r--r--drivers/acpi/evged.c22
-rw-r--r--drivers/acpi/internal.h8
-rw-r--r--drivers/acpi/nfit/core.c19
-rw-r--r--drivers/acpi/nfit/nfit.h6
-rw-r--r--drivers/acpi/numa.c2
-rw-r--r--drivers/acpi/processor_throttling.c7
-rw-r--r--drivers/acpi/property.c44
-rw-r--r--drivers/acpi/resource.c2
-rw-r--r--drivers/acpi/scan.c122
-rw-r--r--drivers/acpi/sysfs.c8
-rw-r--r--drivers/acpi/tables.c42
-rw-r--r--drivers/acpi/thermal.c55
-rw-r--r--drivers/acpi/video_detect.c28
-rw-r--r--drivers/amba/bus.c20
-rw-r--r--drivers/android/binder.c50
-rw-r--r--drivers/android/binder_alloc.c10
-rw-r--r--drivers/ata/acard-ahci.c6
-rw-r--r--drivers/ata/ahci_brcm.c14
-rw-r--r--drivers/ata/libahci.c6
-rw-r--r--drivers/ata/libahci_platform.c4
-rw-r--r--drivers/ata/libata-core.c25
-rw-r--r--drivers/ata/libata-pmp.c1
-rw-r--r--drivers/ata/libata-scsi.c26
-rw-r--r--drivers/ata/libata-sff.c12
-rw-r--r--drivers/ata/pata_arasan_cf.c15
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c6
-rw-r--r--drivers/ata/pata_macio.c6
-rw-r--r--drivers/ata/pata_pxa.c8
-rw-r--r--drivers/ata/pdc_adma.c7
-rw-r--r--drivers/ata/sata_fsl.c4
-rw-r--r--drivers/ata/sata_inic162x.c4
-rw-r--r--drivers/ata/sata_mv.c38
-rw-r--r--drivers/ata/sata_nv.c20
-rw-r--r--drivers/ata/sata_promise.c6
-rw-r--r--drivers/ata/sata_qstor.c8
-rw-r--r--drivers/ata/sata_rcar.c19
-rw-r--r--drivers/ata/sata_sil.c8
-rw-r--r--drivers/ata/sata_sil24.c6
-rw-r--r--drivers/ata/sata_sx4.c6
-rw-r--r--drivers/atm/atmtcp.c10
-rw-r--r--drivers/atm/eni.c5
-rw-r--r--drivers/atm/firestream.c1
-rw-r--r--drivers/atm/idt77105.c4
-rw-r--r--drivers/atm/idt77252.c2
-rw-r--r--drivers/atm/lanai.c5
-rw-r--r--drivers/atm/nicstar.c2
-rw-r--r--drivers/atm/uPD98402.c2
-rw-r--r--drivers/auxdisplay/ht16k33.c3
-rw-r--r--drivers/base/component.c8
-rw-r--r--drivers/base/core.c31
-rw-r--r--drivers/base/cpu.c8
-rw-r--r--drivers/base/dd.c24
-rw-r--r--drivers/base/firmware_loader/fallback.c2
-rw-r--r--drivers/base/node.c84
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/base/power/main.c16
-rw-r--r--drivers/base/power/runtime.c10
-rw-r--r--drivers/base/regmap/regmap-debugfs.c68
-rw-r--r--drivers/base/regmap/regmap-sdw.c4
-rw-r--r--drivers/base/regmap/regmap.c115
-rw-r--r--drivers/block/Kconfig1
-rw-r--r--drivers/block/aoe/aoe.h1
-rw-r--r--drivers/block/aoe/aoeblk.c21
-rw-r--r--drivers/block/aoe/aoedev.c1
-rw-r--r--drivers/block/floppy.c29
-rw-r--r--drivers/block/loop.c55
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c2
-rw-r--r--drivers/block/nbd.c45
-rw-r--r--drivers/block/null_blk_main.c12
-rw-r--r--drivers/block/null_blk_zoned.c25
-rw-r--r--drivers/block/ps3disk.c3
-rw-r--r--drivers/block/ps3vram.c2
-rw-r--r--drivers/block/rbd.c37
-rw-r--r--drivers/block/rsxx/core.c9
-rw-r--r--drivers/block/rsxx/dev.c2
-rw-r--r--drivers/block/skd_main.c2
-rw-r--r--drivers/block/sunvdc.c2
-rw-r--r--drivers/block/virtio_blk.c164
-rw-r--r--drivers/block/xen-blkback/blkback.c52
-rw-r--r--drivers/block/xen-blkback/xenbus.c9
-rw-r--r--drivers/block/xen-blkfront.c39
-rw-r--r--drivers/block/zram/zram_drv.c9
-rw-r--r--drivers/bluetooth/btbcm.c2
-rw-r--r--drivers/bluetooth/btqcomsmd.c27
-rw-r--r--drivers/bluetooth/btrtl.c20
-rw-r--r--drivers/bluetooth/hci_bcm.c5
-rw-r--r--drivers/bluetooth/hci_h5.c10
-rw-r--r--drivers/bluetooth/hci_ldisc.c1
-rw-r--r--drivers/bluetooth/hci_serdev.c5
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-allocator.c4
-rw-r--r--drivers/bus/fsl-mc/mc-io.c7
-rw-r--r--drivers/bus/hisi_lpc.c27
-rw-r--r--drivers/bus/mips_cdmm.c4
-rw-r--r--drivers/bus/omap_l3_noc.c4
-rw-r--r--drivers/bus/qcom-ebi2.c4
-rw-r--r--drivers/bus/sunxi-rsb.c2
-rw-r--r--drivers/bus/ti-sysc.c4
-rw-r--r--drivers/cdrom/gdrom.c13
-rw-r--r--drivers/char/agp/Kconfig2
-rw-r--r--drivers/char/agp/intel-gtt.c8
-rw-r--r--drivers/char/hpet.c4
-rw-r--r--drivers/char/hw_random/ks-sa-rng.c1
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c2
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c11
-rw-r--r--drivers/char/ipmi/ipmi_si_pci.c5
-rw-r--r--drivers/char/random.c14
-rw-r--r--drivers/char/tlclk.c17
-rw-r--r--drivers/char/tpm/eventlog/common.c15
-rw-r--r--drivers/char/tpm/eventlog/efi.c5
-rw-r--r--drivers/char/tpm/eventlog/tpm1.c2
-rw-r--r--drivers/char/tpm/eventlog/tpm2.c2
-rw-r--r--drivers/char/tpm/tpm-chip.c13
-rw-r--r--drivers/char/tpm/tpm.h8
-rw-r--r--drivers/char/tpm/tpm2-cmd.c1
-rw-r--r--drivers/char/tpm/tpm2-space.c26
-rw-r--r--drivers/char/tpm/tpm_crb.c123
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c145
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.h1
-rw-r--r--drivers/char/tpm/tpm_tis.c29
-rw-r--r--drivers/char/tpm/tpm_tis_core.c60
-rw-r--r--drivers/char/tpm/tpmrm-dev.c2
-rw-r--r--drivers/char/ttyprintk.c11
-rw-r--r--drivers/char/virtio_console.c3
-rw-r--r--drivers/clk/at91/clk-main.c11
-rw-r--r--drivers/clk/at91/clk-usb.c3
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c14
-rw-r--r--drivers/clk/clk-s2mps11.c1
-rw-r--r--drivers/clk/clk-scmi.c22
-rw-r--r--drivers/clk/clk.c108
-rw-r--r--drivers/clk/davinci/pll.c2
-rw-r--r--drivers/clk/ingenic/jz4770-cgu.c4
-rw-r--r--drivers/clk/meson/clk-pll.c2
-rw-r--r--drivers/clk/mvebu/armada-37xx-periph.c83
-rw-r--r--drivers/clk/mvebu/armada-37xx-xtal.c4
-rw-r--r--drivers/clk/qcom/a53-pll.c1
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c8
-rw-r--r--drivers/clk/qcom/gcc-msm8998.c100
-rw-r--r--drivers/clk/renesas/r9a06g032-clocks.c2
-rw-r--r--drivers/clk/rockchip/clk-half-divider.c2
-rw-r--r--drivers/clk/rockchip/clk-rk3228.c19
-rw-r--r--drivers/clk/samsung/clk-exynos4.c4
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c16
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c3
-rw-r--r--drivers/clk/samsung/clk-exynos7.c7
-rw-r--r--drivers/clk/sirf/clk-atlas6.c2
-rw-r--r--drivers/clk/socfpga/clk-gate-a10.c1
-rw-r--r--drivers/clk/socfpga/clk-gate.c2
-rw-r--r--drivers/clk/socfpga/clk-pll-s10.c4
-rw-r--r--drivers/clk/socfpga/clk-s10.c2
-rw-r--r--drivers/clk/sprd/pll.c2
-rw-r--r--drivers/clk/st/clk-flexgen.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.c10
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c1
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c2
-rw-r--r--drivers/clk/tegra/clk-id.h1
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c2
-rw-r--r--drivers/clk/tegra/clk-tegra-pmc.c12
-rw-r--r--drivers/clk/tegra/clk-tegra30.c2
-rw-r--r--drivers/clk/ti/adpll.c11
-rw-r--r--drivers/clk/ti/clockdomain.c2
-rw-r--r--drivers/clk/ti/composite.c1
-rw-r--r--drivers/clk/ti/fapll.c11
-rw-r--r--drivers/clk/uniphier/clk-uniphier-mux.c4
-rw-r--r--drivers/clocksource/arm_arch_timer.c23
-rw-r--r--drivers/clocksource/cadence_ttc_timer.c18
-rw-r--r--drivers/clocksource/dw_apb_timer.c5
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c6
-rw-r--r--drivers/clocksource/h8300_timer8.c2
-rw-r--r--drivers/clocksource/mxs_timer.c5
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c3
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c83
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c24
-rw-r--r--drivers/cpufreq/highbank-cpufreq.c7
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c3
-rw-r--r--drivers/cpufreq/intel_pstate.c22
-rw-r--r--drivers/cpufreq/loongson1-cpufreq.c1
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c1
-rw-r--r--drivers/cpufreq/powernow-k8.c9
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c28
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c1
-rw-r--r--drivers/cpufreq/sti-cpufreq.c13
-rw-r--r--drivers/cpuidle/cpuidle.c3
-rw-r--r--drivers/cpuidle/sysfs.c6
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c2
-rw-r--r--drivers/crypto/bcm/cipher.c17
-rw-r--r--drivers/crypto/bcm/cipher.h4
-rw-r--r--drivers/crypto/bcm/util.c2
-rw-r--r--drivers/crypto/caam/caamalg_desc.c16
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c1
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_reqmanager.c12
-rw-r--r--drivers/crypto/cavium/cpt/request_manager.h2
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c4
-rw-r--r--drivers/crypto/ccp/Kconfig3
-rw-r--r--drivers/crypto/ccp/ccp-dev.h1
-rw-r--r--drivers/crypto/ccp/ccp-ops.c42
-rw-r--r--drivers/crypto/ccree/cc_aead.c56
-rw-r--r--drivers/crypto/ccree/cc_aead.h1
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c108
-rw-r--r--drivers/crypto/ccree/cc_cipher.c30
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c7
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c106
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.h3
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_hw.c4
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_io.c24
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c34
-rw-r--r--drivers/crypto/inside-secure/safexcel.c2
-rw-r--r--drivers/crypto/ixp4xx_crypto.c2
-rw-r--r--drivers/crypto/mediatek/mtk-platform.c8
-rw-r--r--drivers/crypto/mxs-dcp.c62
-rw-r--r--drivers/crypto/omap-aes.c3
-rw-r--r--drivers/crypto/omap-sham.c67
-rw-r--r--drivers/crypto/picoxcell_crypto.c9
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_drv.c4
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_drv.c4
-rw-r--r--drivers/crypto/qat/qat_common/adf_isr.c29
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport.c1
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf_isr.c17
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c10
-rw-r--r--drivers/crypto/qat/qat_common/qat_hal.c2
-rw-r--r--drivers/crypto/qat/qat_common/qat_uclo.c9
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_drv.c4
-rw-r--r--drivers/crypto/stm32/stm32_crc32.c144
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-cipher.c125
-rw-r--r--drivers/crypto/talitos.c40
-rw-r--r--drivers/crypto/talitos.h1
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c21
-rw-r--r--drivers/devfreq/tegra-devfreq.c4
-rw-r--r--drivers/dma-buf/dma-fence.c78
-rw-r--r--drivers/dma/acpi-dma.c4
-rw-r--r--drivers/dma/at_hdmac.c13
-rw-r--r--drivers/dma/dma-jz4780.c7
-rw-r--r--drivers/dma/dmatest.c4
-rw-r--r--drivers/dma/dw/Kconfig2
-rw-r--r--drivers/dma/fsl-edma.c7
-rw-r--r--drivers/dma/fsldma.c6
-rw-r--r--drivers/dma/hsu/pci.c21
-rw-r--r--drivers/dma/ioat/dma.c12
-rw-r--r--drivers/dma/ioat/dma.h2
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c5
-rw-r--r--drivers/dma/mmp_tdma.c2
-rw-r--r--drivers/dma/mv_xor_v2.c4
-rw-r--r--drivers/dma/of-dma.c8
-rw-r--r--drivers/dma/owl-dma.c9
-rw-r--r--drivers/dma/pch_dma.c3
-rw-r--r--drivers/dma/pl330.c4
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c14
-rw-r--r--drivers/dma/stm32-dma.c9
-rw-r--r--drivers/dma/stm32-mdma.c9
-rw-r--r--drivers/dma/tegra20-apb-dma.c3
-rw-r--r--drivers/dma/tegra210-adma.c7
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c15
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c24
-rw-r--r--drivers/edac/amd64_edac.c41
-rw-r--r--drivers/edac/amd64_edac.h3
-rw-r--r--drivers/edac/edac_device_sysfs.c1
-rw-r--r--drivers/edac/edac_pci_sysfs.c2
-rw-r--r--drivers/edac/i5100_edac.c11
-rw-r--r--drivers/edac/ie31200_edac.c50
-rw-r--r--drivers/edac/ti_edac.c3
-rw-r--r--drivers/extcon/extcon-adc-jack.c3
-rw-r--r--drivers/extcon/extcon-arizona.c17
-rw-r--r--drivers/extcon/extcon-max77693.c2
-rw-r--r--drivers/extcon/extcon.c1
-rw-r--r--drivers/firewire/nosy.c9
-rw-r--r--drivers/firmware/Kconfig1
-rw-r--r--drivers/firmware/arm_scmi/scmi_pm_domain.c12
-rw-r--r--drivers/firmware/arm_scpi.c4
-rw-r--r--drivers/firmware/arm_sdei.c40
-rw-r--r--drivers/firmware/efi/Kconfig11
-rw-r--r--drivers/firmware/efi/efi.c6
-rw-r--r--drivers/firmware/efi/efivars.c4
-rw-r--r--drivers/firmware/efi/esrt.c2
-rw-r--r--drivers/firmware/efi/libstub/Makefile1
-rw-r--r--drivers/firmware/psci_checker.c5
-rw-r--r--drivers/firmware/qcom_scm.c9
-rw-r--r--drivers/firmware/qemu_fw_cfg.c7
-rw-r--r--drivers/fpga/dfl-afu-dma-region.c4
-rw-r--r--drivers/fpga/dfl-afu-main.c3
-rw-r--r--drivers/gnss/sirf.c8
-rw-r--r--drivers/gpio/gpio-arizona.c7
-rw-r--r--drivers/gpio/gpio-dwapb.c34
-rw-r--r--drivers/gpio/gpio-eic-sprd.c2
-rw-r--r--drivers/gpio/gpio-exar.c7
-rw-r--r--drivers/gpio/gpio-ml-ioh.c2
-rw-r--r--drivers/gpio/gpio-mockup.c2
-rw-r--r--drivers/gpio/gpio-mvebu.c41
-rw-r--r--drivers/gpio/gpio-pcf857x.c2
-rw-r--r--drivers/gpio/gpio-pch.c1
-rw-r--r--drivers/gpio/gpio-pcie-idio-24.c62
-rw-r--r--drivers/gpio/gpio-sprd.c3
-rw-r--r--drivers/gpio/gpio-tc3589x.c2
-rw-r--r--drivers/gpio/gpio-tegra.c1
-rw-r--r--drivers/gpio/gpiolib-acpi.c16
-rw-r--r--drivers/gpio/gpiolib-sysfs.c8
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.h9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c20
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c92
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/fixed31_32.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c26
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c19
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c7
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c7
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c26
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c10
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c2
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c12
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c12
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c2
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c9
-rw-r--r--drivers/gpu/drm/drm_atomic.c21
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c23
-rw-r--r--drivers/gpu/drm/drm_connector.c11
-rw-r--r--drivers/gpu/drm/drm_debugfs.c8
-rw-r--r--drivers/gpu/drm/drm_dp_aux_dev.c2
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c84
-rw-r--r--drivers/gpu/drm/drm_edid.c5
-rw-r--r--drivers/gpu/drm/drm_encoder_slave.c5
-rw-r--r--drivers/gpu/drm/drm_gem.c10
-rw-r--r--drivers/gpu/drm/drm_ioc32.c11
-rw-r--r--drivers/gpu/drm/drm_ioctl.c7
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c6
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c20
-rw-r--r--drivers/gpu/drm/drm_pci.c25
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c19
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_perfmon.c103
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c4
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c22
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c34
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c49
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c8
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c7
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c30
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c2
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c17
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c20
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c25
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c26
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c12
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c36
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c12
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c10
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c3
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c8
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c26
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c4
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c12
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c1
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c10
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c13
-rw-r--r--drivers/gpu/drm/qxl/qxl_image.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c5
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c9
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c8
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c3
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c5
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_csc.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c2
-rw-r--r--drivers/gpu/drm/tegra/dc.c10
-rw-r--r--drivers/gpu/drm/tegra/drm.c2
-rw-r--r--drivers/gpu/drm/tegra/hub.c8
-rw-r--r--drivers/gpu/drm/tegra/sor.c10
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c3
-rw-r--r--drivers/gpu/drm/tve200/tve200_display.c22
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c21
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c27
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c16
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h5
-rw-r--r--drivers/gpu/drm/vkms/vkms_gem.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c5
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c2
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c8
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_kms.c2
-rw-r--r--drivers/gpu/host1x/bus.c9
-rw-r--r--drivers/gpu/host1x/debug.c4
-rw-r--r--drivers/gpu/ipu-v3/ipu-image-convert.c58
-rw-r--r--drivers/hid/hid-alps.c4
-rw-r--r--drivers/hid/hid-apple.c18
-rw-r--r--drivers/hid/hid-core.c24
-rw-r--r--drivers/hid/hid-cypress.c44
-rw-r--r--drivers/hid/hid-elan.c2
-rw-r--r--drivers/hid/hid-google-hammer.c2
-rw-r--r--drivers/hid/hid-ids.h28
-rw-r--r--drivers/hid/hid-input.c19
-rw-r--r--drivers/hid/hid-magicmouse.c6
-rw-r--r--drivers/hid/hid-mf.c2
-rw-r--r--drivers/hid/hid-multitouch.c5
-rw-r--r--drivers/hid/hid-plantronics.c60
-rw-r--r--drivers/hid/hid-quirks.c17
-rw-r--r--drivers/hid/hid-roccat-kone.c23
-rw-r--r--drivers/hid/hid-sensor-hub.c3
-rw-r--r--drivers/hid/hid-sony.c17
-rw-r--r--drivers/hid/hid-steam.c6
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c24
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c24
-rw-r--r--drivers/hid/usbhid/hid-core.c37
-rw-r--r--drivers/hid/usbhid/hiddev.c4
-rw-r--r--drivers/hid/usbhid/usbhid.h1
-rw-r--r--drivers/hid/wacom_sys.c44
-rw-r--r--drivers/hid/wacom_wac.c19
-rw-r--r--drivers/hid/wacom_wac.h2
-rw-r--r--drivers/hsi/controllers/omap_ssi_core.c2
-rw-r--r--drivers/hsi/hsi_core.c3
-rw-r--r--drivers/hv/channel_mgmt.c58
-rw-r--r--drivers/hv/hv_balloon.c2
-rw-r--r--drivers/hv/vmbus_drv.c64
-rw-r--r--drivers/hwmon/acpi_power_meter.c4
-rw-r--r--drivers/hwmon/applesmc.c31
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c2
-rw-r--r--drivers/hwmon/da9052-hwmon.c4
-rw-r--r--drivers/hwmon/emc2103.c2
-rw-r--r--drivers/hwmon/jc42.c2
-rw-r--r--drivers/hwmon/k10temp.c9
-rw-r--r--drivers/hwmon/lm80.c11
-rw-r--r--drivers/hwmon/lm90.c42
-rw-r--r--drivers/hwmon/max6697.c7
-rw-r--r--drivers/hwmon/pmbus/adm1275.c10
-rw-r--r--drivers/hwmon/pmbus/max34440.c3
-rw-r--r--drivers/hwmon/scmi-hwmon.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c13
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c2
-rw-r--r--drivers/hwtracing/intel_th/core.c21
-rw-r--r--drivers/hwtracing/intel_th/gth.c4
-rw-r--r--drivers/hwtracing/intel_th/pci.c30
-rw-r--r--drivers/hwtracing/intel_th/sth.c4
-rw-r--r--drivers/hwtracing/stm/heartbeat.c6
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c38
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-altera.c19
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c2
-rw-r--r--drivers/i2c/busses/i2c-cadence.c14
-rw-r--r--drivers/i2c/busses/i2c-cpm.c3
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c2
-rw-r--r--drivers/i2c/busses/i2c-emev2.c5
-rw-r--r--drivers/i2c/busses/i2c-fsi.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c28
-rw-r--r--drivers/i2c/busses/i2c-imx.c66
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c5
-rw-r--r--drivers/i2c/busses/i2c-meson.c42
-rw-r--r--drivers/i2c/busses/i2c-mlxcpld.c4
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.c2
-rw-r--r--drivers/i2c/busses/i2c-owl.c6
-rw-r--r--drivers/i2c/busses/i2c-piix4.c3
-rw-r--r--drivers/i2c/busses/i2c-pxa.c13
-rw-r--r--drivers/i2c/busses/i2c-qup.c3
-rw-r--r--drivers/i2c/busses/i2c-rcar.c21
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c3
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c5
-rw-r--r--drivers/i2c/busses/i2c-sprd.c8
-rw-r--r--drivers/i2c/busses/i2c-st.c1
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c11
-rw-r--r--drivers/i2c/busses/i2c-tegra-bpmp.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c53
-rw-r--r--drivers/i2c/i2c-core-acpi.c11
-rw-r--r--drivers/i2c/i2c-core-base.c9
-rw-r--r--drivers/i2c/i2c-core-slave.c7
-rw-r--r--drivers/i2c/i2c-core-smbus.c7
-rw-r--r--drivers/i2c/i2c-dev.c57
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c1
-rw-r--r--drivers/ide/ide-atapi.c1
-rw-r--r--drivers/ide/ide-cd.c2
-rw-r--r--drivers/ide/ide-gd.c2
-rw-r--r--drivers/ide/ide-io.c5
-rw-r--r--drivers/iio/accel/adis16201.c2
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c15
-rw-r--r--drivers/iio/accel/kxcjk-1013.c51
-rw-r--r--drivers/iio/accel/kxsd9.c16
-rw-r--r--drivers/iio/accel/mma7455_core.c16
-rw-r--r--drivers/iio/accel/mma8452.c16
-rw-r--r--drivers/iio/accel/sca3000.c2
-rw-r--r--drivers/iio/adc/Kconfig1
-rw-r--r--drivers/iio/adc/ad7793.c3
-rw-r--r--drivers/iio/adc/ina2xx-adc.c11
-rw-r--r--drivers/iio/adc/max1118.c10
-rw-r--r--drivers/iio/adc/mcp3422.c16
-rw-r--r--drivers/iio/adc/qcom-spmi-vadc.c2
-rw-r--r--drivers/iio/adc/rockchip_saradc.c2
-rw-r--r--drivers/iio/adc/stm32-adc.c51
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c23
-rw-r--r--drivers/iio/adc/ti-adc081c.c11
-rw-r--r--drivers/iio/adc/ti-adc0832.c11
-rw-r--r--drivers/iio/adc/ti-adc084s021.c10
-rw-r--r--drivers/iio/adc/ti-adc12138.c13
-rw-r--r--drivers/iio/adc/ti-ads1015.c10
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c95
-rw-r--r--drivers/iio/chemical/ccs811.c13
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c2
-rw-r--r--drivers/iio/dac/ad5504.c4
-rw-r--r--drivers/iio/dac/ad5592r-base.c4
-rw-r--r--drivers/iio/dac/vf610_dac.c1
-rw-r--r--drivers/iio/gyro/itg3200_buffer.c15
-rw-r--r--drivers/iio/gyro/mpu3050-core.c15
-rw-r--r--drivers/iio/health/afe4403.c13
-rw-r--r--drivers/iio/health/afe4404.c8
-rw-r--r--drivers/iio/humidity/hdc100x.c10
-rw-r--r--drivers/iio/humidity/hid-sensor-humidity.c12
-rw-r--r--drivers/iio/humidity/hts221.h7
-rw-r--r--drivers/iio/humidity/hts221_buffer.c9
-rw-r--r--drivers/iio/imu/adis16400_buffer.c5
-rw-r--r--drivers/iio/imu/adis16400_core.c3
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c13
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c26
-rw-r--r--drivers/iio/industrialio-buffer.c6
-rw-r--r--drivers/iio/light/hid-sensor-prox.c13
-rw-r--r--drivers/iio/light/ltr501.c15
-rw-r--r--drivers/iio/light/max44000.c12
-rw-r--r--drivers/iio/light/rpr0521.c17
-rw-r--r--drivers/iio/light/si1133.c37
-rw-r--r--drivers/iio/light/si1145.c19
-rw-r--r--drivers/iio/light/st_uvis25.h5
-rw-r--r--drivers/iio/light/st_uvis25_core.c8
-rw-r--r--drivers/iio/light/tsl2583.c8
-rw-r--r--drivers/iio/light/vcnl4000.c6
-rw-r--r--drivers/iio/magnetometer/ak8974.c29
-rw-r--r--drivers/iio/magnetometer/ak8975.c16
-rw-r--r--drivers/iio/magnetometer/mag3110.c13
-rw-r--r--drivers/iio/pressure/bmp280-core.c7
-rw-r--r--drivers/iio/pressure/mpl3115.c9
-rw-r--r--drivers/iio/pressure/ms5611_core.c11
-rw-r--r--drivers/iio/pressure/zpa2326.c4
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c1
-rw-r--r--drivers/iio/temperature/hid-sensor-temperature.c14
-rw-r--r--drivers/infiniband/core/addr.c18
-rw-r--r--drivers/infiniband/core/cm.c27
-rw-r--r--drivers/infiniband/core/cma.c102
-rw-r--r--drivers/infiniband/core/cma_configfs.c13
-rw-r--r--drivers/infiniband/core/mad.c3
-rw-r--r--drivers/infiniband/core/rdma_core.c23
-rw-r--r--drivers/infiniband/core/ucma.c6
-rw-r--r--drivers/infiniband/core/umem_odp.c3
-rw-r--r--drivers/infiniband/core/user_mad.c17
-rw-r--r--drivers/infiniband/core/uverbs_main.c2
-rw-r--r--drivers/infiniband/core/verbs.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c22
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c3
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h1
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c9
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c2
-rw-r--r--drivers/infiniband/hw/hfi1/firmware.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c10
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_pble.c6
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c36
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c3
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c34
-rw-r--r--drivers/infiniband/hw/mlx4/main.c3
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c14
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c5
-rw-r--r--drivers/infiniband/hw/mlx5/main.c9
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h1
-rw-r--r--drivers/infiniband/hw/qedr/main.c2
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h4
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c16
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c24
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c9
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c3
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c4
-rw-r--r--drivers/infiniband/sw/rxe/Kconfig1
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c14
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c17
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c76
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c12
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c1
-rw-r--r--drivers/input/evdev.c19
-rw-r--r--drivers/input/joydev.c7
-rw-r--r--drivers/input/joystick/xpad.c32
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c1
-rw-r--r--drivers/input/keyboard/dlink-dir685-touchkeys.c2
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c4
-rw-r--r--drivers/input/keyboard/nspire-keypad.c56
-rw-r--r--drivers/input/keyboard/omap4-keypad.c95
-rw-r--r--drivers/input/keyboard/sunkbd.c41
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c8
-rw-r--r--drivers/input/misc/adxl34x.c2
-rw-r--r--drivers/input/misc/cm109.c7
-rw-r--r--drivers/input/mouse/cyapa_gen6.c2
-rw-r--r--drivers/input/mouse/psmouse-base.c2
-rw-r--r--drivers/input/mouse/sentelic.c2
-rw-r--r--drivers/input/mouse/synaptics.c2
-rw-r--r--drivers/input/mouse/trackpoint.c12
-rw-r--r--drivers/input/mouse/trackpoint.h10
-rw-r--r--drivers/input/rmi4/rmi_driver.c5
-rw-r--r--drivers/input/serio/hil_mlc.c21
-rw-r--r--drivers/input/serio/hp_sdc_mlc.c8
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h101
-rw-r--r--drivers/input/serio/i8042.c15
-rw-r--r--drivers/input/serio/sun4i-ps2.c9
-rw-r--r--drivers/input/touchscreen/Kconfig1
-rw-r--r--drivers/input/touchscreen/ads7846.c52
-rw-r--r--drivers/input/touchscreen/elants_i2c.c44
-rw-r--r--drivers/input/touchscreen/elo.c4
-rw-r--r--drivers/input/touchscreen/goodix.c12
-rw-r--r--drivers/input/touchscreen/imx6ul_tsc.c27
-rw-r--r--drivers/input/touchscreen/mms114.c29
-rw-r--r--drivers/input/touchscreen/raydium_i2c_ts.c3
-rw-r--r--drivers/input/touchscreen/s6sy761.c4
-rw-r--r--drivers/input/touchscreen/silead.c44
-rw-r--r--drivers/input/touchscreen/stmfts.c2
-rw-r--r--drivers/input/touchscreen/sur40.c1
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c1
-rw-r--r--drivers/iommu/amd_iommu.c15
-rw-r--r--drivers/iommu/amd_iommu_init.c11
-rw-r--r--drivers/iommu/amd_iommu_types.h10
-rw-r--r--drivers/iommu/amd_iommu_v2.c7
-rw-r--r--drivers/iommu/dmar.c49
-rw-r--r--drivers/iommu/exynos-iommu.c8
-rw-r--r--drivers/iommu/intel-iommu.c6
-rw-r--r--drivers/iommu/intel-svm.c7
-rw-r--r--drivers/iommu/intel_irq_remapping.c22
-rw-r--r--drivers/iommu/iommu.c2
-rw-r--r--drivers/iommu/iova.c4
-rw-r--r--drivers/iommu/omap-iommu-debug.c3
-rw-r--r--drivers/iommu/qcom_iommu.c5
-rw-r--r--drivers/ipack/carriers/tpci200.c1
-rw-r--r--drivers/irqchip/irq-alpine-msi.c3
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c27
-rw-r--r--drivers/irqchip/irq-gic-v3-mbi.c2
-rw-r--r--drivers/irqchip/irq-gic.c14
-rw-r--r--drivers/irqchip/irq-mbigen.c8
-rw-r--r--drivers/irqchip/irq-mips-cpu.c7
-rw-r--r--drivers/irqchip/irq-mtk-sysirq.c8
-rw-r--r--drivers/irqchip/irq-stm32-exti.c14
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c18
-rw-r--r--drivers/isdn/capi/kcapi.c4
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNinfineon.c24
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNipac.c2
-rw-r--r--drivers/isdn/hardware/mISDN/w6692.c3
-rw-r--r--drivers/isdn/mISDN/Kconfig1
-rw-r--r--drivers/leds/led-class.c1
-rw-r--r--drivers/leds/led-triggers.c10
-rw-r--r--drivers/leds/leds-88pm860x.c14
-rw-r--r--drivers/leds/leds-bcm6328.c2
-rw-r--r--drivers/leds/leds-bcm6358.c2
-rw-r--r--drivers/leds/leds-da903x.c14
-rw-r--r--drivers/leds/leds-lm3533.c12
-rw-r--r--drivers/leds/leds-lm355x.c7
-rw-r--r--drivers/leds/leds-lp5523.c2
-rw-r--r--drivers/leds/leds-mlxreg.c4
-rw-r--r--drivers/leds/leds-wm831x-status.c14
-rw-r--r--drivers/lightnvm/Kconfig1
-rw-r--r--drivers/macintosh/windfarm_pm112.c21
-rw-r--r--drivers/mailbox/mailbox.c12
-rw-r--r--drivers/md/bcache/bcache.h3
-rw-r--r--drivers/md/bcache/bset.c2
-rw-r--r--drivers/md/bcache/btree.c22
-rw-r--r--drivers/md/bcache/journal.c4
-rw-r--r--drivers/md/bcache/super.c19
-rw-r--r--drivers/md/bcache/writeback.c14
-rw-r--r--drivers/md/bcache/writeback.h19
-rw-r--r--drivers/md/dm-bufio.c10
-rw-r--r--drivers/md/dm-cache-metadata.c8
-rw-r--r--drivers/md/dm-core.h4
-rw-r--r--drivers/md/dm-crypt.c3
-rw-r--r--drivers/md/dm-era-target.c93
-rw-r--r--drivers/md/dm-integrity.c84
-rw-r--r--drivers/md/dm-ioctl.c3
-rw-r--r--drivers/md/dm-mpath.c8
-rw-r--r--drivers/md/dm-raid.c34
-rw-r--r--drivers/md/dm-rq.c5
-rw-r--r--drivers/md/dm-snap.c27
-rw-r--r--drivers/md/dm-table.c195
-rw-r--r--drivers/md/dm-thin-metadata.c8
-rw-r--r--drivers/md/dm-verity-fec.c31
-rw-r--r--drivers/md/dm-verity-fec.h1
-rw-r--r--drivers/md/dm-verity-target.c14
-rw-r--r--drivers/md/dm-writecache.c78
-rw-r--r--drivers/md/dm-zoned-metadata.c5
-rw-r--r--drivers/md/dm-zoned-reclaim.c4
-rw-r--r--drivers/md/dm-zoned-target.c2
-rw-r--r--drivers/md/dm.c96
-rw-r--r--drivers/md/md-bitmap.c4
-rw-r--r--drivers/md/md-cluster.c68
-rw-r--r--drivers/md/md.c114
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h4
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c2
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.h8
-rw-r--r--drivers/md/raid0.c3
-rw-r--r--drivers/md/raid1.c2
-rw-r--r--drivers/md/raid10.c3
-rw-r--r--drivers/md/raid5.c7
-rw-r--r--drivers/media/cec/cec-adap.c8
-rw-r--r--drivers/media/cec/cec-api.c8
-rw-r--r--drivers/media/common/siano/smsdvb-main.c5
-rw-r--r--drivers/media/dvb-core/dvbdev.c1
-rw-r--r--drivers/media/dvb-frontends/sp8870.c4
-rw-r--r--drivers/media/dvb-frontends/tda10071.c9
-rw-r--r--drivers/media/firewire/firedtv-fw.c4
-rw-r--r--drivers/media/i2c/adv7511-v4l2.c2
-rw-r--r--drivers/media/i2c/adv7604.c2
-rw-r--r--drivers/media/i2c/adv7842.c2
-rw-r--r--drivers/media/i2c/imx274.c8
-rw-r--r--drivers/media/i2c/m5mols/m5mols_core.c3
-rw-r--r--drivers/media/i2c/max2175.c2
-rw-r--r--drivers/media/i2c/ov5640.c4
-rw-r--r--drivers/media/i2c/ov5670.c3
-rw-r--r--drivers/media/i2c/ov5695.c49
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c3
-rw-r--r--drivers/media/i2c/tc358743.c16
-rw-r--r--drivers/media/i2c/video-i2c.c2
-rw-r--r--drivers/media/media-device.c65
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c13
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c4
-rw-r--r--drivers/media/pci/cx23885/cx23888-ir.c5
-rw-r--r--drivers/media/pci/cx25821/cx25821-core.c4
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c62
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.h1
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_spi.c5
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c5
-rw-r--r--drivers/media/pci/saa7134/saa7134-tvaudio.c3
-rw-r--r--drivers/media/pci/saa7146/mxb.c19
-rw-r--r--drivers/media/pci/saa7164/saa7164-encoder.c20
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-g723.c2
-rw-r--r--drivers/media/pci/sta2x11/Kconfig1
-rw-r--r--drivers/media/pci/ttpci/av7110.c5
-rw-r--r--drivers/media/pci/ttpci/budget-core.c11
-rw-r--r--drivers/media/pci/tw5864/tw5864-video.c6
-rw-r--r--drivers/media/platform/davinci/vpss.c20
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp.c4
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c2
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c5
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c4
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c7
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c1
-rw-r--r--drivers/media/platform/mx2_emmaprp.c7
-rw-r--r--drivers/media/platform/omap3isp/isp.c6
-rw-r--r--drivers/media/platform/omap3isp/isppreview.c4
-rw-r--r--drivers/media/platform/pxa_camera.c3
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c4
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.c1
-rw-r--r--drivers/media/platform/qcom/venus/core.c5
-rw-r--r--drivers/media/platform/qcom/venus/hfi_parser.c1
-rw-r--r--drivers/media/platform/rcar-fcp.c9
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c4
-rw-r--r--drivers/media/platform/rcar_drif.c1
-rw-r--r--drivers/media/platform/rcar_fdp1.c2
-rw-r--r--drivers/media/platform/rockchip/rga/rga-buf.c1
-rw-r--r--drivers/media/platform/rockchip/rga/rga-hw.c29
-rw-r--r--drivers/media/platform/rockchip/rga/rga-hw.h5
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.c5
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_pm.c4
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c3
-rw-r--r--drivers/media/platform/sti/delta/delta-v4l2.c4
-rw-r--r--drivers/media/platform/sti/hva/hva-hw.c4
-rw-r--r--drivers/media/platform/stm32/stm32-dcmi.c4
-rw-r--r--drivers/media/platform/ti-vpe/cal.c22
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c2
-rw-r--r--drivers/media/platform/vivid/vivid-core.c6
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c2
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.c2
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.c6
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c15
-rw-r--r--drivers/media/rc/ati_remote.c4
-rw-r--r--drivers/media/rc/gpio-ir-tx.c7
-rw-r--r--drivers/media/rc/ite-cir.c8
-rw-r--r--drivers/media/rc/mceusb.c9
-rw-r--r--drivers/media/rc/rc-main.c48
-rw-r--r--drivers/media/rc/sunxi-cir.c2
-rw-r--r--drivers/media/tuners/m88rs6000t.c6
-rw-r--r--drivers/media/tuners/qm1d1c0042.c4
-rw-r--r--drivers/media/tuners/si2157.c15
-rw-r--r--drivers/media/tuners/tuner-simple.c5
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c2
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-mb.c2
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-init.c20
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb.h2
-rw-r--r--drivers/media/usb/dvb-usb/gp8psk.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c6
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c1
-rw-r--r--drivers/media/usb/go7007/go7007-usb.c4
-rw-r--r--drivers/media/usb/go7007/snd-go7007.c35
-rw-r--r--drivers/media/usb/gspca/gspca.c3
-rw-r--r--drivers/media/usb/gspca/gspca.h1
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_po1030.c10
-rw-r--r--drivers/media/usb/gspca/sq905.c2
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx.c9
-rw-r--r--drivers/media/usb/msi2500/msi2500.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-dvb.c4
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c5
-rw-r--r--drivers/media/usb/usbtv/usbtv-audio.c2
-rw-r--r--drivers/media/usb/usbtv/usbtv-core.c3
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c33
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c7
-rw-r--r--drivers/media/usb/uvc/uvc_entity.c35
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c30
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c19
-rw-r--r--drivers/memory/emif.c33
-rw-r--r--drivers/memory/fsl-corenet-cf.c6
-rw-r--r--drivers/memory/omap-gpmc.c15
-rw-r--r--drivers/memory/ti-aemif.c8
-rw-r--r--drivers/memstick/core/memstick.c1
-rw-r--r--drivers/memstick/core/ms_block.c2
-rw-r--r--drivers/memstick/core/mspro_block.c2
-rw-r--r--drivers/memstick/host/r592.c12
-rw-r--r--drivers/message/fusion/mptscsih.c17
-rw-r--r--drivers/mfd/arizona-core.c18
-rw-r--r--drivers/mfd/bd9571mwv.c6
-rw-r--r--drivers/mfd/dln2.c13
-rw-r--r--drivers/mfd/intel-lpss-pci.c3
-rw-r--r--drivers/mfd/intel-lpss.c2
-rw-r--r--drivers/mfd/mfd-core.c10
-rw-r--r--drivers/mfd/sm501.c8
-rw-r--r--drivers/mfd/sprd-sc27xx-spi.c28
-rw-r--r--drivers/mfd/stm32-timers.c7
-rw-r--r--drivers/mfd/wm831x-auxadc.c3
-rw-r--r--drivers/mfd/wm8994-core.c1
-rw-r--r--drivers/misc/aspeed-lpc-snoop.c34
-rw-r--r--drivers/misc/atmel-ssc.c24
-rw-r--r--drivers/misc/cardreader/rts5227.c5
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c7
-rw-r--r--drivers/misc/cxl/pci.c4
-rw-r--r--drivers/misc/cxl/sysfs.c2
-rw-r--r--drivers/misc/echo/echo.c2
-rw-r--r--drivers/misc/eeprom/at25.c2
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c16
-rw-r--r--drivers/misc/kgdbts.c27
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c21
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.h1
-rw-r--r--drivers/misc/lkdtm/Makefile2
-rw-r--r--drivers/misc/lkdtm/rodata.c2
-rw-r--r--drivers/misc/mei/bus.c3
-rw-r--r--drivers/misc/mei/client.c2
-rw-r--r--drivers/misc/mei/client.h4
-rw-r--r--drivers/misc/mei/interrupt.c3
-rw-r--r--drivers/misc/mic/scif/scif_rma.c4
-rw-r--r--drivers/misc/mic/vop/vop_main.c2
-rw-r--r--drivers/misc/mic/vop/vop_vringh.c24
-rw-r--r--drivers/misc/pch_phub.c1
-rw-r--r--drivers/misc/pci_endpoint_test.c20
-rw-r--r--drivers/misc/vmw_vmci/vmci_context.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c15
-rw-r--r--drivers/mmc/core/block.c25
-rw-r--r--drivers/mmc/core/bus.c11
-rw-r--r--drivers/mmc/core/core.c2
-rw-r--r--drivers/mmc/core/core.h9
-rw-r--r--drivers/mmc/core/mmc.c31
-rw-r--r--drivers/mmc/core/mmc_ops.c4
-rw-r--r--drivers/mmc/core/queue.c22
-rw-r--r--drivers/mmc/core/sd.c6
-rw-r--r--drivers/mmc/core/sdio.c3
-rw-r--r--drivers/mmc/core/sdio_cis.c9
-rw-r--r--drivers/mmc/host/cqhci.c21
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c14
-rw-r--r--drivers/mmc/host/mtk-sd.c18
-rw-r--r--drivers/mmc/host/mxs-mmc.c2
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c1
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c22
-rw-r--r--drivers/mmc/host/sdhci-acpi.c47
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c5
-rw-r--r--drivers/mmc/host/sdhci-msm.c30
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c1
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c33
-rw-r--r--drivers/mmc/host/sdhci-xenon.c17
-rw-r--r--drivers/mmc/host/sdhci.c2
-rw-r--r--drivers/mmc/host/usdhi6rol0.c4
-rw-r--r--drivers/mmc/host/via-sdmmc.c10
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c7
-rw-r--r--drivers/mtd/cmdlinepart.c35
-rw-r--r--drivers/mtd/devices/phram.c15
-rw-r--r--drivers/mtd/lpddr/lpddr2_nvm.c35
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c1
-rw-r--r--drivers/mtd/mtd_blkdevs.c2
-rw-r--r--drivers/mtd/mtdchar.c56
-rw-r--r--drivers/mtd/mtdcore.c3
-rw-r--r--drivers/mtd/mtdoops.c11
-rw-r--r--drivers/mtd/nand/raw/ams-delta.c4
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c8
-rw-r--r--drivers/mtd/nand/raw/au1550nd.c4
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/main.c2
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c26
-rw-r--r--drivers/mtd/nand/raw/cafe_nand.c4
-rw-r--r--drivers/mtd/nand/raw/cmx270_nand.c4
-rw-r--r--drivers/mtd/nand/raw/cs553x_nand.c4
-rw-r--r--drivers/mtd/nand/raw/davinci_nand.c4
-rw-r--r--drivers/mtd/nand/raw/denali.c6
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c11
-rw-r--r--drivers/mtd/nand/raw/docg4.c4
-rw-r--r--drivers/mtd/nand/raw/fsl_elbc_nand.c5
-rw-r--r--drivers/mtd/nand/raw/fsl_ifc_nand.c5
-rw-r--r--drivers/mtd/nand/raw/fsl_upm.c4
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c6
-rw-r--r--drivers/mtd/nand/raw/gpio.c4
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c6
-rw-r--r--drivers/mtd/nand/raw/hisi504_nand.c5
-rw-r--r--drivers/mtd/nand/raw/jz4740_nand.c4
-rw-r--r--drivers/mtd/nand/raw/jz4780_nand.c6
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_mlc.c5
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_slc.c5
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c29
-rw-r--r--drivers/mtd/nand/raw/mpc5121_nfc.c4
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c6
-rw-r--r--drivers/mtd/nand/raw/mxc_nand.c4
-rw-r--r--drivers/mtd/nand/raw/nand_base.c29
-rw-r--r--drivers/mtd/nand/raw/nand_timings.c5
-rw-r--r--drivers/mtd/nand/raw/nandsim.c6
-rw-r--r--drivers/mtd/nand/raw/ndfc.c4
-rw-r--r--drivers/mtd/nand/raw/nuc900_nand.c4
-rw-r--r--drivers/mtd/nand/raw/omap2.c4
-rw-r--r--drivers/mtd/nand/raw/omap_elm.c1
-rw-r--r--drivers/mtd/nand/raw/orion_nand.c7
-rw-r--r--drivers/mtd/nand/raw/oxnas_nand.c40
-rw-r--r--drivers/mtd/nand/raw/pasemi_nand.c8
-rw-r--r--drivers/mtd/nand/raw/plat_nand.c6
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c20
-rw-r--r--drivers/mtd/nand/raw/r852.c4
-rw-r--r--drivers/mtd/nand/raw/s3c2410.c4
-rw-r--r--drivers/mtd/nand/raw/sh_flctl.c4
-rw-r--r--drivers/mtd/nand/raw/sharpsl.c6
-rw-r--r--drivers/mtd/nand/raw/sm_common.c2
-rw-r--r--drivers/mtd/nand/raw/socrates_nand.c7
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c6
-rw-r--r--drivers/mtd/nand/raw/tango_nand.c4
-rw-r--r--drivers/mtd/nand/raw/tegra_nand.c2
-rw-r--r--drivers/mtd/nand/raw/tmio_nand.c6
-rw-r--r--drivers/mtd/nand/raw/txx9ndfmc.c4
-rw-r--r--drivers/mtd/nand/raw/vf610_nfc.c4
-rw-r--r--drivers/mtd/nand/raw/xway_nand.c6
-rw-r--r--drivers/mtd/nand/spi/core.c24
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c2
-rw-r--r--drivers/mtd/spi-nor/hisi-sfc.c4
-rw-r--r--drivers/mtd/ubi/debug.c12
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c46
-rw-r--r--drivers/mtd/ubi/fastmap.c14
-rw-r--r--drivers/mtd/ubi/ubi.h6
-rw-r--r--drivers/mtd/ubi/wl.c45
-rw-r--r--drivers/mtd/ubi/wl.h1
-rw-r--r--drivers/net/bonding/bond_main.c114
-rw-r--r--drivers/net/bonding/bond_netlink.c3
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c16
-rw-r--r--drivers/net/caif/caif_serial.c1
-rw-r--r--drivers/net/can/c_can/c_can.c24
-rw-r--r--drivers/net/can/c_can/c_can_pci.c3
-rw-r--r--drivers/net/can/c_can/c_can_platform.c6
-rw-r--r--drivers/net/can/dev.c21
-rw-r--r--drivers/net/can/flexcan.c57
-rw-r--r--drivers/net/can/m_can/m_can.c9
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c11
-rw-r--r--drivers/net/can/rx-offload.c4
-rw-r--r--drivers/net/can/softing/softing_main.c9
-rw-r--r--drivers/net/can/ti_hecc.c13
-rw-r--r--drivers/net/can/usb/gs_usb.c131
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c2
-rw-r--r--drivers/net/can/usb/mcba_usb.c4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c57
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c56
-rw-r--r--drivers/net/can/vxcan.c6
-rw-r--r--drivers/net/dsa/b53/b53_common.c60
-rw-r--r--drivers/net/dsa/b53/b53_regs.h9
-rw-r--r--drivers/net/dsa/bcm_sf2.c21
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c9
-rw-r--r--drivers/net/dsa/dsa_loop.c1
-rw-r--r--drivers/net/dsa/mt7530.c46
-rw-r--r--drivers/net/dsa/mt7530.h14
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c37
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c59
-rw-r--r--drivers/net/dsa/realtek-smi.h4
-rw-r--r--drivers/net/dsa/rtl8366.c301
-rw-r--r--drivers/net/dsa/rtl8366rb.c2
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c11
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c6
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c41
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c5
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h14
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c39
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h6
-rw-r--r--drivers/net/ethernet/apple/bmac.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c2
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c1
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c16
-rw-r--r--drivers/net/ethernet/broadcom/b44.c3
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c70
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c33
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c11
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c85
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c21
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c7
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h2
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c5
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c56
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c76
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h122
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c52
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c6
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_api.c6
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c34
-rw-r--r--drivers/net/ethernet/cortina/gemini.c33
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c27
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/media.c5
-rw-r--r--drivers/net/ethernet/ethoc.c3
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c5
-rw-r--r--drivers/net/ethernet/freescale/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/fec.h7
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c199
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c7
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c3
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_mac.h2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c3
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c9
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c1
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c39
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c16
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h9
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c51
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c17
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c16
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c21
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c107
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c22
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c113
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c90
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c47
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c16
-rw-r--r--drivers/net/ethernet/korina.c5
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c22
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c9
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c5
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c48
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h2
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c107
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c8
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c3
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c5
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c9
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c31
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h3
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c4
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c6
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c12
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c7
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.h2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.h14
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c32
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c4
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c7
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c8
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c38
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c23
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c5
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c11
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c3
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c4
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c17
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c50
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c5
-rw-r--r--drivers/net/ethernet/realtek/r8169.c73
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c36
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c11
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c4
-rw-r--r--drivers/net/ethernet/sfc/falcon/farch.c29
-rw-r--r--drivers/net/ethernet/sis/sis900.c5
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c4
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c9
-rw-r--r--drivers/net/ethernet/socionext/netsec.c9
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c60
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c86
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c7
-rw-r--r--drivers/net/ethernet/sun/cassini.c3
-rw-r--r--drivers/net/ethernet/sun/niu.c34
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c23
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-common.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c1
-rw-r--r--drivers/net/ethernet/ti/cpts.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c9
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
-rw-r--r--drivers/net/ethernet/ti/tlan.c4
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c4
-rw-r--r--drivers/net/ethernet/via/via-velocity.c13
-rw-r--r--drivers/net/fddi/Kconfig15
-rw-r--r--drivers/net/fddi/defxx.c47
-rw-r--r--drivers/net/geneve.c57
-rw-r--r--drivers/net/gtp.c31
-rw-r--r--drivers/net/hamradio/yam.c1
-rw-r--r--drivers/net/hippi/rrunner.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c11
-rw-r--r--drivers/net/ieee802154/adf7242.c8
-rw-r--r--drivers/net/ieee802154/atusb.c1
-rw-r--r--drivers/net/ieee802154/ca8210.c1
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c27
-rw-r--r--drivers/net/macsec.c19
-rw-r--r--drivers/net/macvlan.c27
-rw-r--r--drivers/net/net_failover.c3
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/bcm-phy-lib.c11
-rw-r--r--drivers/net/phy/dp83640.c6
-rw-r--r--drivers/net/phy/marvell.c28
-rw-r--r--drivers/net/phy/mdio-octeon.c2
-rw-r--r--drivers/net/phy/mdio-thunder.c1
-rw-r--r--drivers/net/phy/phy.c15
-rw-r--r--drivers/net/phy/phy_device.c9
-rw-r--r--drivers/net/phy/sfp-bus.c79
-rw-r--r--drivers/net/phy/sfp.c3
-rw-r--r--drivers/net/ppp/pppoe.c3
-rw-r--r--drivers/net/team/team.c13
-rw-r--r--drivers/net/tun.c76
-rw-r--r--drivers/net/usb/asix_common.c2
-rw-r--r--drivers/net/usb/ax88172a.c1
-rw-r--r--drivers/net/usb/ax88179_178a.c16
-rw-r--r--drivers/net/usb/cdc-phonet.c2
-rw-r--r--drivers/net/usb/cdc_ether.c18
-rw-r--r--drivers/net/usb/cdc_ncm.c11
-rw-r--r--drivers/net/usb/dm9601.c4
-rw-r--r--drivers/net/usb/hso.c47
-rw-r--r--drivers/net/usb/ipheth.c2
-rw-r--r--drivers/net/usb/lan78xx.c113
-rw-r--r--drivers/net/usb/qmi_wwan.c27
-rw-r--r--drivers/net/usb/r8152.c37
-rw-r--r--drivers/net/usb/rndis_host.c4
-rw-r--r--drivers/net/usb/rtl8150.c16
-rw-r--r--drivers/net/usb/smsc75xx.c8
-rw-r--r--drivers/net/usb/smsc95xx.c11
-rw-r--r--drivers/net/veth.c11
-rw-r--r--drivers/net/virtio_net.c70
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c2
-rw-r--r--drivers/net/vrf.c102
-rw-r--r--drivers/net/vxlan.c36
-rw-r--r--drivers/net/wan/Kconfig3
-rw-r--r--drivers/net/wan/Makefile12
-rw-r--r--drivers/net/wan/cosa.c1
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c8
-rw-r--r--drivers/net/wan/hdlc.c10
-rw-r--r--drivers/net/wan/hdlc_cisco.c2
-rw-r--r--drivers/net/wan/hdlc_fr.c104
-rw-r--r--drivers/net/wan/hdlc_ppp.c24
-rw-r--r--drivers/net/wan/hdlc_raw_eth.c1
-rw-r--r--drivers/net/wan/lapbether.c55
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2
-rw-r--r--drivers/net/wan/x25_asy.c21
-rw-r--r--drivers/net/wimax/i2400m/op-rfkill.c2
-rw-r--r--drivers/net/wimax/i2400m/usb-fw.c1
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c77
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c20
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c22
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h10
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c29
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c58
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h1
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c81
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c6
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c4
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c21
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c8
-rw-r--r--drivers/net/wireless/ath/wil6210/Kconfig1
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c29
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c12
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c14
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h3
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c1
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/xmit.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c4
-rw-r--r--drivers/net/wireless/cisco/airo.c12
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_wx.c6
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c12
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_usb.c14
-rw-r--r--drivers/net/wireless/intersil/p54/p54pci.c4
-rw-r--r--drivers/net/wireless/intersil/p54/p54usb.c1
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c12
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.c28
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/join.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c18
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c24
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c3
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h6
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.c5
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/eeprom.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c19
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c500
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c13
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c3
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c8
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c52
-rw-r--r--drivers/net/wireless/rsi/rsi_sdio.h8
-rw-r--r--drivers/net/wireless/st/cw1200/main.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/event.c2
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c19
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h3
-rw-r--r--drivers/net/wireless/wl3501.h47
-rw-r--r--drivers/net/wireless/wl3501_cs.c54
-rw-r--r--drivers/net/xen-netback/common.h15
-rw-r--r--drivers/net/xen-netback/interface.c63
-rw-r--r--drivers/net/xen-netback/netback.c21
-rw-r--r--drivers/net/xen-netback/rx.c22
-rw-r--r--drivers/net/xen-netback/xenbus.c16
-rw-r--r--drivers/net/xen-netfront.c64
-rw-r--r--drivers/nfc/pn533/pn533.c3
-rw-r--r--drivers/nfc/s3fwrn5/core.c1
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c4
-rw-r--r--drivers/nfc/s3fwrn5/i2c.c4
-rw-r--r--drivers/nfc/st21nfca/dep.c4
-rw-r--r--drivers/nfc/st95hf/core.c2
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.c1
-rw-r--r--drivers/ntb/ntb.c9
-rw-r--r--drivers/ntb/test/ntb_perf.c30
-rw-r--r--drivers/ntb/test/ntb_pingpong.c14
-rw-r--r--drivers/ntb/test/ntb_tool.c9
-rw-r--r--drivers/nvdimm/blk.c2
-rw-r--r--drivers/nvdimm/btt.c39
-rw-r--r--drivers/nvdimm/btt.h2
-rw-r--r--drivers/nvdimm/btt_devs.c8
-rw-r--r--drivers/nvdimm/bus.c6
-rw-r--r--drivers/nvdimm/dimm_devs.c18
-rw-r--r--drivers/nvdimm/label.c13
-rw-r--r--drivers/nvdimm/namespace_devs.c7
-rw-r--r--drivers/nvdimm/pmem.c2
-rw-r--r--drivers/nvme/host/core.c80
-rw-r--r--drivers/nvme/host/fabrics.c1
-rw-r--r--drivers/nvme/host/fc.c25
-rw-r--r--drivers/nvme/host/lightnvm.c105
-rw-r--r--drivers/nvme/host/multipath.c77
-rw-r--r--drivers/nvme/host/nvme.h29
-rw-r--r--drivers/nvme/host/pci.c17
-rw-r--r--drivers/nvme/host/rdma.c17
-rw-r--r--drivers/nvme/target/core.c26
-rw-r--r--drivers/nvme/target/fc.c4
-rw-r--r--drivers/nvme/target/fcloop.c1
-rw-r--r--drivers/nvme/target/io-cmd-file.c8
-rw-r--r--drivers/nvme/target/rdma.c30
-rw-r--r--drivers/nvmem/qfprom.c14
-rw-r--r--drivers/of/address.c4
-rw-r--r--drivers/of/kobj.c3
-rw-r--r--drivers/of/of_mdio.c9
-rw-r--r--drivers/of/of_reserved_mem.c13
-rw-r--r--drivers/of/overlay.c2
-rw-r--r--drivers/of/unittest.c16
-rw-r--r--drivers/parisc/sba_iommu.c2
-rw-r--r--drivers/pci/access.c8
-rw-r--r--drivers/pci/bus.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c55
-rw-r--r--drivers/pci/controller/pci-aardvark.c4
-rw-r--r--drivers/pci/controller/pci-tegra.c3
-rw-r--r--drivers/pci/controller/pci-thunder-ecam.c2
-rw-r--r--drivers/pci/controller/pci-thunder-pem.c13
-rw-r--r--drivers/pci/controller/pci-v3-semi.c2
-rw-r--r--drivers/pci/controller/pci-xgene-msi.c10
-rw-r--r--drivers/pci/controller/pcie-cadence-host.c9
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c15
-rw-r--r--drivers/pci/controller/pcie-iproc.c10
-rw-r--r--drivers/pci/controller/pcie-mediatek.c25
-rw-r--r--drivers/pci/controller/pcie-rcar.c9
-rw-r--r--drivers/pci/controller/vmd.c16
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c3
-rw-r--r--drivers/pci/endpoint/pci-epc-mem.c10
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c15
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c40
-rw-r--r--drivers/pci/hotplug/rpadlpar_sysfs.c14
-rw-r--r--drivers/pci/pci-acpi.c4
-rw-r--r--drivers/pci/pci.c43
-rw-r--r--drivers/pci/pci.h6
-rw-r--r--drivers/pci/pcie/aspm.c33
-rw-r--r--drivers/pci/pcie/ptm.c22
-rw-r--r--drivers/pci/probe.c82
-rw-r--r--drivers/pci/quirks.c381
-rw-r--r--drivers/pci/rom.c17
-rw-r--r--drivers/pci/setup-bus.c45
-rw-r--r--drivers/pci/setup-res.c9
-rw-r--r--drivers/pci/slot.c12
-rw-r--r--drivers/pci/switch/switchtec.c2
-rw-r--r--drivers/pci/syscall.c10
-rw-r--r--drivers/perf/arm_pmu_platform.c2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c2
-rw-r--r--drivers/perf/xgene_pmu.c32
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c5
-rw-r--r--drivers/phy/marvell/Kconfig4
-rw-r--r--drivers/phy/motorola/phy-cpcap-usb.c19
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c16
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h2
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c4
-rw-r--r--drivers/phy/samsung/phy-s5pv210-usb2.c4
-rw-r--r--drivers/phy/tegra/xusb.c1
-rw-r--r--drivers/phy/ti/phy-twl4030-usb.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c7
-rw-r--r--drivers/pinctrl/bcm/Kconfig1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c19
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c1
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c76
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c4
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c8
-rw-r--r--drivers/pinctrl/intel/pinctrl-lewisburg.c6
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c8
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-xp.c2
-rw-r--r--drivers/pinctrl/pinctrl-amd.c13
-rw-r--r--drivers/pinctrl/pinctrl-amd.h2
-rw-r--r--drivers/pinctrl/pinctrl-falcon.c14
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c3
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c24
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c22
-rw-r--r--drivers/pinctrl/pinctrl-rza1.c2
-rw-r--r--drivers/pinctrl/pinctrl-single.c11
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c92
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c6
-rw-r--r--drivers/platform/x86/Kconfig1
-rw-r--r--drivers/platform/x86/acer-wmi.c170
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c24
-rw-r--r--drivers/platform/x86/dell-smbios-base.c1
-rw-r--r--drivers/platform/x86/dell-smbios-wmi.c3
-rw-r--r--drivers/platform/x86/gpd-pocket-fan.c2
-rw-r--r--drivers/platform/x86/hp-wireless.c2
-rw-r--r--drivers/platform/x86/hp-wmi.c24
-rw-r--r--drivers/platform/x86/hp_accel.c22
-rw-r--r--drivers/platform/x86/intel-hid.c16
-rw-r--r--drivers/platform/x86/intel-vbtn.c143
-rw-r--r--drivers/platform/x86/intel_punit_ipc.c1
-rw-r--r--drivers/platform/x86/mlx-platform.c42
-rw-r--r--drivers/platform/x86/pmc_atom.c28
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c56
-rw-r--r--drivers/platform/x86/toshiba_acpi.c3
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c18
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c2
-rw-r--r--drivers/power/reset/vexpress-poweroff.c1
-rw-r--r--drivers/power/supply/88pm860x_battery.c6
-rw-r--r--drivers/power/supply/Kconfig2
-rw-r--r--drivers/power/supply/axp288_charger.c28
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c4
-rw-r--r--drivers/power/supply/bq24190_charger.c20
-rw-r--r--drivers/power/supply/bq27xxx_battery.c62
-rw-r--r--drivers/power/supply/generic-adc-battery.c2
-rw-r--r--drivers/power/supply/lp8788-charger.c20
-rw-r--r--drivers/power/supply/max17040_battery.c2
-rw-r--r--drivers/power/supply/pm2301_charger.c2
-rw-r--r--drivers/power/supply/s3c_adc_battery.c2
-rw-r--r--drivers/power/supply/smb347-charger.c1
-rw-r--r--drivers/power/supply/test_power.c6
-rw-r--r--drivers/power/supply/tps65090-charger.c2
-rw-r--r--drivers/power/supply/tps65217_charger.c2
-rw-r--r--drivers/powercap/powercap_sys.c4
-rw-r--r--drivers/ps3/ps3stor_lib.c2
-rw-r--r--drivers/pwm/pwm-bcm-iproc.c9
-rw-r--r--drivers/pwm/pwm-bcm2835.c1
-rw-r--r--drivers/pwm/pwm-img.c11
-rw-r--r--drivers/pwm/pwm-lp3943.c1
-rw-r--r--drivers/pwm/pwm-lpss.c7
-rw-r--r--drivers/pwm/pwm-pca9685.c85
-rw-r--r--drivers/pwm/pwm-rcar.c10
-rw-r--r--drivers/pwm/pwm-renesas-tpu.c9
-rw-r--r--drivers/pwm/pwm-rockchip.c1
-rw-r--r--drivers/pwm/pwm-zx.c1
-rw-r--r--drivers/rapidio/Kconfig2
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c27
-rw-r--r--drivers/rapidio/rio_cm.c17
-rw-r--r--drivers/regulator/axp20x-regulator.c7
-rw-r--r--drivers/regulator/bd9571mwv-regulator.c4
-rw-r--r--drivers/regulator/core.c185
-rw-r--r--drivers/regulator/pfuze100-regulator.c73
-rw-r--r--drivers/regulator/pwm-regulator.c2
-rw-r--r--drivers/regulator/s5m8767.c8
-rw-r--r--drivers/regulator/ti-abb-regulator.c12
-rw-r--r--drivers/remoteproc/qcom_q6v5.c2
-rw-r--r--drivers/remoteproc/qcom_q6v5_pil.c11
-rw-r--r--drivers/remoteproc/remoteproc_core.c5
-rw-r--r--drivers/rpmsg/qcom_glink_native.c7
-rw-r--r--drivers/rpmsg/qcom_smd.c32
-rw-r--r--drivers/rtc/Kconfig1
-rw-r--r--drivers/rtc/rtc-88pm860x.c14
-rw-r--r--drivers/rtc/rtc-ds1307.c12
-rw-r--r--drivers/rtc/rtc-ds1374.c15
-rw-r--r--drivers/rtc/rtc-goldfish.c1
-rw-r--r--drivers/rtc/rtc-omap.c4
-rw-r--r--drivers/rtc/rtc-rx8010.c24
-rw-r--r--drivers/rtc/rtc-sa1100.c18
-rw-r--r--drivers/rtc/rtc-sun6i.c8
-rw-r--r--drivers/s390/block/dasd.c12
-rw-r--r--drivers/s390/block/dasd_alias.c22
-rw-r--r--drivers/s390/block/dasd_fba.c9
-rw-r--r--drivers/s390/block/dasd_genhd.c2
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/block/scm_blk.c2
-rw-r--r--drivers/s390/cio/css.c5
-rw-r--r--drivers/s390/cio/device.c13
-rw-r--r--drivers/s390/cio/qdio.h1
-rw-r--r--drivers/s390/cio/qdio_setup.c1
-rw-r--r--drivers/s390/cio/qdio_thinint.c14
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c6
-rw-r--r--drivers/s390/crypto/zcrypt_api.c3
-rw-r--r--drivers/s390/net/qeth_core_main.c6
-rw-r--r--drivers/s390/net/qeth_l2_main.c4
-rw-r--r--drivers/s390/scsi/zfcp_erp.c15
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c4
-rw-r--r--drivers/s390/virtio/virtio_ccw.c4
-rw-r--r--drivers/scsi/BusLogic.c6
-rw-r--r--drivers/scsi/BusLogic.h2
-rw-r--r--drivers/scsi/Kconfig9
-rw-r--r--drivers/scsi/aacraid/aachba.c8
-rw-r--r--drivers/scsi/aacraid/commsup.c2
-rw-r--r--drivers/scsi/aacraid/linit.c46
-rw-r--r--drivers/scsi/arm/acornscsi.c4
-rw-r--r--drivers/scsi/arm/cumana_2.c2
-rw-r--r--drivers/scsi/arm/eesox.c2
-rw-r--r--drivers/scsi/arm/powertec.c2
-rw-r--r--drivers/scsi/bnx2fc/Kconfig1
-rw-r--r--drivers/scsi/bnx2i/Kconfig1
-rw-r--r--drivers/scsi/csiostor/csio_hw.c2
-rw-r--r--drivers/scsi/cxlflash/main.c1
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c14
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c2
-rw-r--r--drivers/scsi/fnic/fnic_main.c1
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c3
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c3
-rw-r--r--drivers/scsi/hpsa.c84
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c62
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c6
-rw-r--r--drivers/scsi/iscsi_boot_sysfs.c2
-rw-r--r--drivers/scsi/jazz_esp.c4
-rw-r--r--drivers/scsi/libfc/fc_disc.c14
-rw-r--r--drivers/scsi/libfc/fc_exch.c16
-rw-r--r--drivers/scsi/libfc/fc_lport.c2
-rw-r--r--drivers/scsi/libfc/fc_rport.c13
-rw-r--r--drivers/scsi/libiscsi.c182
-rw-r--r--drivers/scsi/libsas/sas_ata.c6
-rw-r--r--drivers/scsi/libsas/sas_port.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c35
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c137
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h36
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h174
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c103
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c36
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c17
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c26
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c18
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c7
-rw-r--r--drivers/scsi/mesh.c8
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c12
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c8
-rw-r--r--drivers/scsi/mvumi.c1
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c3
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c52
-rw-r--r--drivers/scsi/qedf/qedf.h1
-rw-r--r--drivers/scsi/qedf/qedf_main.c35
-rw-r--r--drivers/scsi/qedi/qedi_fw.c23
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c12
-rw-r--r--drivers/scsi/qedi/qedi_main.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h10
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c30
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c101
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c77
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c98
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c2
-rw-r--r--drivers/scsi/scsi_debug.c6
-rw-r--r--drivers/scsi/scsi_lib.c126
-rw-r--r--drivers/scsi/scsi_scan.c7
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c44
-rw-r--r--drivers/scsi/scsi_transport_spi.c29
-rw-r--r--drivers/scsi/scsi_transport_srp.c9
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sg.c8
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c2
-rw-r--r--drivers/scsi/sni_53c710.c5
-rw-r--r--drivers/scsi/sr.c8
-rw-r--r--drivers/scsi/sr_vendor.c8
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/sun3x_esp.c4
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c15
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c11
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h1
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c34
-rw-r--r--drivers/scsi/ufs/ufshcd.c69
-rw-r--r--drivers/slimbus/core.c7
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c14
-rw-r--r--drivers/soc/atmel/soc.c13
-rw-r--r--drivers/soc/fsl/dpio/dpio-driver.c5
-rw-r--r--drivers/soc/fsl/qbman/qman.c2
-rw-r--r--drivers/soc/imx/gpc.c24
-rw-r--r--drivers/soc/mediatek/mtk-scpsys.c5
-rw-r--r--drivers/soc/qcom/mdt_loader.c17
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c17
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c99
-rw-r--r--drivers/soc/qcom/rpmh.c56
-rw-r--r--drivers/soc/qcom/smp2p.c5
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra210.c2
-rw-r--r--drivers/soc/ti/knav_dma.c13
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c4
-rw-r--r--drivers/soundwire/bus.c3
-rw-r--r--drivers/soundwire/stream.c10
-rw-r--r--drivers/spi/Kconfig3
-rw-r--r--drivers/spi/spi-atmel.c2
-rw-r--r--drivers/spi/spi-bcm-qspi.c42
-rw-r--r--drivers/spi/spi-bcm2835.c26
-rw-r--r--drivers/spi/spi-bcm2835aux.c21
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c4
-rw-r--r--drivers/spi/spi-cadence.c6
-rw-r--r--drivers/spi/spi-davinci.c2
-rw-r--r--drivers/spi/spi-dln2.c2
-rw-r--r--drivers/spi/spi-dw-mid.c16
-rw-r--r--drivers/spi/spi-dw.c15
-rw-r--r--drivers/spi/spi-fsl-dspi.c44
-rw-r--r--drivers/spi/spi-fsl-espi.c5
-rw-r--r--drivers/spi/spi-gpio.c8
-rw-r--r--drivers/spi/spi-img-spfi.c4
-rw-r--r--drivers/spi/spi-lantiq-ssc.c10
-rw-r--r--drivers/spi/spi-loopback-test.c2
-rw-r--r--drivers/spi/spi-mt65xx.c15
-rw-r--r--drivers/spi/spi-mxs.c1
-rw-r--r--drivers/spi/spi-omap-100k.c6
-rw-r--r--drivers/spi/spi-pic32.c1
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c27
-rw-r--r--drivers/spi/spi-pxa2xx.c15
-rw-r--r--drivers/spi/spi-rb4xx.c2
-rw-r--r--drivers/spi/spi-s3c24xx-fiq.S9
-rw-r--r--drivers/spi/spi-s3c64xx.c52
-rw-r--r--drivers/spi/spi-sc18is602.c13
-rw-r--r--drivers/spi/spi-sh.c13
-rw-r--r--drivers/spi/spi-sprd-adi.c2
-rw-r--r--drivers/spi/spi-st-ssc4.c5
-rw-r--r--drivers/spi/spi-stm32.c11
-rw-r--r--drivers/spi/spi-sun6i.c14
-rw-r--r--drivers/spi/spi-tegra114.c2
-rw-r--r--drivers/spi/spi-tegra20-sflash.c1
-rw-r--r--drivers/spi/spi-tegra20-slink.c2
-rw-r--r--drivers/spi/spi-ti-qspi.c21
-rw-r--r--drivers/spi/spi-topcliff-pch.c1
-rw-r--r--drivers/spi/spi.c106
-rw-r--r--drivers/spi/spidev.c45
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/android/ashmem.c12
-rw-r--r--drivers/staging/android/ion/ion_heap.c4
-rw-r--r--drivers/staging/comedi/comedi_fops.c4
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1032.c24
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1500.c52
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1564.c20
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c10
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas.c3
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c2
-rw-r--r--drivers/staging/comedi/drivers/das6402.c2
-rw-r--r--drivers/staging/comedi/drivers/das800.c2
-rw-r--r--drivers/staging/comedi/drivers/dmm32at.c2
-rw-r--r--drivers/staging/comedi/drivers/dt2815.c3
-rw-r--r--drivers/staging/comedi/drivers/me4000.c2
-rw-r--r--drivers/staging/comedi/drivers/mf6x4.c3
-rw-r--r--drivers/staging/comedi/drivers/ni_6527.c2
-rw-r--r--drivers/staging/comedi/drivers/pcl711.c2
-rw-r--r--drivers/staging/comedi/drivers/pcl818.c2
-rw-r--r--drivers/staging/comedi/drivers/vmk80xx.c3
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c4
-rw-r--r--drivers/staging/erofs/erofs_fs.h3
-rw-r--r--drivers/staging/erofs/inode.c162
-rw-r--r--drivers/staging/erofs/unzip_vle.h20
-rw-r--r--drivers/staging/erofs/utils.c2
-rw-r--r--drivers/staging/fwserial/fwserial.c2
-rw-r--r--drivers/staging/gasket/apex_driver.c7
-rw-r--r--drivers/staging/gasket/gasket_core.c4
-rw-r--r--drivers/staging/gasket/gasket_interrupt.c15
-rw-r--r--drivers/staging/gasket/gasket_sysfs.c2
-rw-r--r--drivers/staging/gdm724x/gdm_usb.c10
-rw-r--r--drivers/staging/greybus/audio_codec.c2
-rw-r--r--drivers/staging/greybus/audio_topology.c29
-rw-r--r--drivers/staging/greybus/light.c3
-rw-r--r--drivers/staging/greybus/sdio.c10
-rw-r--r--drivers/staging/greybus/uart.c6
-rw-r--r--drivers/staging/iio/cdc/ad7746.c1
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c17
-rw-r--r--drivers/staging/ks7010/ks_wlan_net.c6
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c2
-rw-r--r--drivers/staging/media/omap4iss/iss.c4
-rw-r--r--drivers/staging/most/core.c2
-rw-r--r--drivers/staging/most/sound/sound.c2
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.c2
-rw-r--r--drivers/staging/mt7621-dma/Makefile2
-rw-r--r--drivers/staging/mt7621-dma/hsdma-mt7621.c (renamed from drivers/staging/mt7621-dma/mtk-hsdma.c)6
-rw-r--r--drivers/staging/mt7621-mmc/Kconfig16
-rw-r--r--drivers/staging/mt7621-mmc/Makefile42
-rw-r--r--drivers/staging/mt7621-mmc/TODO8
-rw-r--r--drivers/staging/mt7621-mmc/board.h63
-rw-r--r--drivers/staging/mt7621-mmc/dbg.c307
-rw-r--r--drivers/staging/mt7621-mmc/dbg.h149
-rw-r--r--drivers/staging/mt7621-mmc/mt6575_sd.h488
-rw-r--r--drivers/staging/mt7621-mmc/sd.c2392
-rw-r--r--drivers/staging/mt7621-spi/spi-mt7621.c12
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c6
-rw-r--r--drivers/staging/octeon/ethernet-rx.c34
-rw-r--r--drivers/staging/octeon/ethernet.c9
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c19
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c6
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c1
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.c7
-rw-r--r--drivers/staging/rtl8192e/rtllib.h2
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c2
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c4
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.c6
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c6
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c2
-rw-r--r--drivers/staging/rtl8712/wifi.h9
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c4
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c1
-rw-r--r--drivers/staging/rtl8723bs/os_dep/wifi_regd.c2
-rw-r--r--drivers/staging/sm750fb/sm750.c1
-rw-r--r--drivers/staging/speakup/speakup_dectlk.c2
-rw-r--r--drivers/staging/speakup/spk_ttyio.c25
-rw-r--r--drivers/staging/vt6656/int.c3
-rw-r--r--drivers/staging/vt6656/key.c14
-rw-r--r--drivers/staging/vt6656/main_usb.c31
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c5
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c9
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_target.c3
-rw-r--r--drivers/target/iscsi/iscsi_target.c113
-rw-r--r--drivers/target/iscsi/iscsi_target.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c5
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c11
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c3
-rw-r--r--drivers/target/target_core_fabric_lib.c2
-rw-r--r--drivers/target/target_core_iblock.c2
-rw-r--r--drivers/target/target_core_pr.c15
-rw-r--r--drivers/target/target_core_pscsi.c11
-rw-r--r--drivers/target/target_core_transport.c15
-rw-r--r--drivers/target/target_core_user.c187
-rw-r--r--drivers/target/target_core_xcopy.c119
-rw-r--r--drivers/target/target_core_xcopy.h1
-rw-r--r--drivers/tee/optee/call.c3
-rw-r--r--drivers/tee/optee/core.c10
-rw-r--r--drivers/thermal/cpu_cooling.c6
-rw-r--r--drivers/thermal/fair_share.c4
-rw-r--r--drivers/thermal/mtk_thermal.c6
-rw-r--r--drivers/thermal/rcar_thermal.c6
-rw-r--r--drivers/thermal/thermal_sysfs.c3
-rw-r--r--drivers/thermal/ti-soc-thermal/omap4-thermal-data.c23
-rw-r--r--drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h10
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c6
-rw-r--r--drivers/thunderbolt/dma_port.c11
-rw-r--r--drivers/thunderbolt/icm.c12
-rw-r--r--drivers/thunderbolt/nhi.c19
-rw-r--r--drivers/thunderbolt/switch.c18
-rw-r--r--drivers/thunderbolt/tb.c9
-rw-r--r--drivers/thunderbolt/tb.h1
-rw-r--r--drivers/thunderbolt/xdomain.c1
-rw-r--r--drivers/tty/ehv_bytechan.c21
-rw-r--r--drivers/tty/hvc/hvc_console.c46
-rw-r--r--drivers/tty/hvc/hvcs.c14
-rw-r--r--drivers/tty/ipwireless/network.c4
-rw-r--r--drivers/tty/ipwireless/tty.c2
-rw-r--r--drivers/tty/n_gsm.c26
-rw-r--r--drivers/tty/pty.c2
-rw-r--r--drivers/tty/rocket.c25
-rw-r--r--drivers/tty/serial/8250/8250_core.c13
-rw-r--r--drivers/tty/serial/8250/8250_exar.c36
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c20
-rw-r--r--drivers/tty/serial/8250/8250_omap.c13
-rw-r--r--drivers/tty/serial/8250/8250_pci.c17
-rw-r--r--drivers/tty/serial/8250/8250_port.c29
-rw-r--r--drivers/tty/serial/Kconfig1
-rw-r--r--drivers/tty/serial/amba-pl011.c28
-rw-r--r--drivers/tty/serial/fsl_lpuart.c2
-rw-r--r--drivers/tty/serial/imx.c20
-rw-r--r--drivers/tty/serial/max310x.c6
-rw-r--r--drivers/tty/serial/mvebu-uart.c13
-rw-r--r--drivers/tty/serial/mxs-auart.c12
-rw-r--r--drivers/tty/serial/pch_uart.c2
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c14
-rw-r--r--drivers/tty/serial/rp2.c52
-rw-r--r--drivers/tty/serial/samsung.c16
-rw-r--r--drivers/tty/serial/serial_core.c4
-rw-r--r--drivers/tty/serial/serial_txx9.c3
-rw-r--r--drivers/tty/serial/sh-sci.c17
-rw-r--r--drivers/tty/serial/stm32-usart.c17
-rw-r--r--drivers/tty/serial/stm32-usart.h3
-rw-r--r--drivers/tty/serial/xilinx_uartps.c8
-rw-r--r--drivers/tty/tty_io.c15
-rw-r--r--drivers/tty/tty_jobctrl.c44
-rw-r--r--drivers/tty/vcc.c1
-rw-r--r--drivers/tty/vt/consolemap.c2
-rw-r--r--drivers/tty/vt/keyboard.c65
-rw-r--r--drivers/tty/vt/vt.c73
-rw-r--r--drivers/tty/vt/vt_ioctl.c46
-rw-r--r--drivers/uio/uio.c12
-rw-r--r--drivers/uio/uio_pdrv_genirq.c2
-rw-r--r--drivers/usb/c67x00/c67x00-sched.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c9
-rw-r--r--drivers/usb/chipidea/core.c24
-rw-r--r--drivers/usb/class/cdc-acm.c175
-rw-r--r--drivers/usb/class/cdc-acm.h4
-rw-r--r--drivers/usb/class/cdc-wdm.c116
-rw-r--r--drivers/usb/class/usblp.c66
-rw-r--r--drivers/usb/core/devio.c25
-rw-r--r--drivers/usb/core/hub.c26
-rw-r--r--drivers/usb/core/hub.h6
-rw-r--r--drivers/usb/core/message.c104
-rw-r--r--drivers/usb/core/quirks.c67
-rw-r--r--drivers/usb/core/sysfs.c5
-rw-r--r--drivers/usb/core/urb.c89
-rw-r--r--drivers/usb/dwc2/core.h2
-rw-r--r--drivers/usb/dwc2/core_intr.c169
-rw-r--r--drivers/usb/dwc2/gadget.c57
-rw-r--r--drivers/usb/dwc2/hcd.c27
-rw-r--r--drivers/usb/dwc2/hcd_intr.c14
-rw-r--r--drivers/usb/dwc2/params.c2
-rw-r--r--drivers/usb/dwc2/platform.c18
-rw-r--r--drivers/usb/dwc3/core.c22
-rw-r--r--drivers/usb/dwc3/core.h9
-rw-r--r--drivers/usb/dwc3/dwc3-haps.c4
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c1
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c5
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c6
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c7
-rw-r--r--drivers/usb/dwc3/ep0.c11
-rw-r--r--drivers/usb/dwc3/gadget.c313
-rw-r--r--drivers/usb/dwc3/ulpi.c22
-rw-r--r--drivers/usb/early/xhci-dbc.c8
-rw-r--r--drivers/usb/early/xhci-dbc.h18
-rw-r--r--drivers/usb/gadget/Kconfig2
-rw-r--r--drivers/usb/gadget/composite.c101
-rw-r--r--drivers/usb/gadget/config.c4
-rw-r--r--drivers/usb/gadget/configfs.c38
-rw-r--r--drivers/usb/gadget/function/f_acm.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c19
-rw-r--r--drivers/usb/gadget/function/f_midi.c16
-rw-r--r--drivers/usb/gadget/function/f_ncm.c59
-rw-r--r--drivers/usb/gadget/function/f_printer.c17
-rw-r--r--drivers/usb/gadget/function/f_rndis.c4
-rw-r--r--drivers/usb/gadget/function/f_tcm.c7
-rw-r--r--drivers/usb/gadget/function/f_uac1.c44
-rw-r--r--drivers/usb/gadget/function/f_uac1_legacy.c2
-rw-r--r--drivers/usb/gadget/function/f_uac2.c71
-rw-r--r--drivers/usb/gadget/function/f_uvc.c7
-rw-r--r--drivers/usb/gadget/function/u_audio.c17
-rw-r--r--drivers/usb/gadget/function/u_ether.c11
-rw-r--r--drivers/usb/gadget/function/u_ether_configfs.h5
-rw-r--r--drivers/usb/gadget/legacy/acm_ms.c4
-rw-r--r--drivers/usb/gadget/legacy/audio.c4
-rw-r--r--drivers/usb/gadget/legacy/cdc2.c4
-rw-r--r--drivers/usb/gadget/legacy/ether.c4
-rw-r--r--drivers/usb/gadget/legacy/inode.c6
-rw-r--r--drivers/usb/gadget/legacy/ncm.c4
-rw-r--r--drivers/usb/gadget/u_f.h38
-rw-r--r--drivers/usb/gadget/udc/amd5536udc_pci.c10
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/core.c3
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/epn.c7
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c2
-rw-r--r--drivers/usb/gadget/udc/bdc/Kconfig2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c13
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c18
-rw-r--r--drivers/usb/gadget/udc/core.c13
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c25
-rw-r--r--drivers/usb/gadget/udc/fotg210-udc.c26
-rw-r--r--drivers/usb/gadget/udc/goku_udc.c2
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c7
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c11
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c2
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c3
-rw-r--r--drivers/usb/gadget/udc/net2272.c2
-rw-r--r--drivers/usb/gadget/udc/net2280.c4
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c50
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c2
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c5
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.c4
-rw-r--r--drivers/usb/gadget/udc/snps_udc_plat.c4
-rw-r--r--drivers/usb/gadget/usbstring.c4
-rw-r--r--drivers/usb/host/ehci-exynos.c5
-rw-r--r--drivers/usb/host/ehci-hcd.c13
-rw-r--r--drivers/usb/host/ehci-hub.c4
-rw-r--r--drivers/usb/host/ehci-mv.c8
-rw-r--r--drivers/usb/host/ehci-mxc.c2
-rw-r--r--drivers/usb/host/ehci-omap.c1
-rw-r--r--drivers/usb/host/ehci-pci.c7
-rw-r--r--drivers/usb/host/ehci-platform.c127
-rw-r--r--drivers/usb/host/fotg210-hcd.c4
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c9
-rw-r--r--drivers/usb/host/max3421-hcd.c3
-rw-r--r--drivers/usb/host/ohci-exynos.c5
-rw-r--r--drivers/usb/host/ohci-hcd.c18
-rw-r--r--drivers/usb/host/ohci-sm501.c1
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c4
-rw-r--r--drivers/usb/host/sl811-hcd.c9
-rw-r--r--drivers/usb/host/xhci-debugfs.c8
-rw-r--r--drivers/usb/host/xhci-ext-caps.h5
-rw-r--r--drivers/usb/host/xhci-histb.c2
-rw-r--r--drivers/usb/host/xhci-hub.c43
-rw-r--r--drivers/usb/host/xhci-mem.c9
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c4
-rw-r--r--drivers/usb/host/xhci-mtk.c18
-rw-r--r--drivers/usb/host/xhci-mtk.h1
-rw-r--r--drivers/usb/host/xhci-pci.c34
-rw-r--r--drivers/usb/host/xhci-plat.c4
-rw-r--r--drivers/usb/host/xhci-ring.c37
-rw-r--r--drivers/usb/host/xhci-tegra.c7
-rw-r--r--drivers/usb/host/xhci.c83
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/misc/adutux.c1
-rw-r--r--drivers/usb/misc/iowarrior.c35
-rw-r--r--drivers/usb/misc/lvstest.c2
-rw-r--r--drivers/usb/misc/sisusbvga/Kconfig2
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c22
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_init.h14
-rw-r--r--drivers/usb/misc/trancevibrator.c4
-rw-r--r--drivers/usb/misc/usbtest.c1
-rw-r--r--drivers/usb/misc/uss720.c1
-rw-r--r--drivers/usb/misc/yurex.c5
-rw-r--r--drivers/usb/mtu3/mtu3_core.c6
-rw-r--r--drivers/usb/mtu3/mtu3_gadget.c1
-rw-r--r--drivers/usb/musb/musb_core.c50
-rw-r--r--drivers/usb/musb/musb_debugfs.c10
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c1
-rw-r--r--drivers/usb/renesas_usbhs/pipe.c2
-rw-r--r--drivers/usb/serial/ch341.c7
-rw-r--r--drivers/usb/serial/cp210x.c24
-rw-r--r--drivers/usb/serial/cyberjack.c7
-rw-r--r--drivers/usb/serial/cypress_m8.c2
-rw-r--r--drivers/usb/serial/cypress_m8.h3
-rw-r--r--drivers/usb/serial/digi_acceleport.c45
-rw-r--r--drivers/usb/serial/ftdi_sio.c51
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h15
-rw-r--r--drivers/usb/serial/garmin_gps.c4
-rw-r--r--drivers/usb/serial/io_edgeport.c26
-rw-r--r--drivers/usb/serial/iuu_phoenix.c42
-rw-r--r--drivers/usb/serial/keyspan_pda.c65
-rw-r--r--drivers/usb/serial/kl5kusb105.c10
-rw-r--r--drivers/usb/serial/mos7720.c6
-rw-r--r--drivers/usb/serial/mos7840.c4
-rw-r--r--drivers/usb/serial/option.c82
-rw-r--r--drivers/usb/serial/pl2303.c2
-rw-r--r--drivers/usb/serial/pl2303.h2
-rw-r--r--drivers/usb/serial/qcserial.c3
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c3
-rw-r--r--drivers/usb/serial/usb_wwan.c4
-rw-r--r--drivers/usb/storage/transport.c7
-rw-r--r--drivers/usb/storage/uas.c63
-rw-r--r--drivers/usb/storage/unusual_devs.h19
-rw-r--r--drivers/usb/storage/unusual_uas.h31
-rw-r--r--drivers/usb/storage/usb.c3
-rw-r--r--drivers/usb/typec/tcpci.c21
-rw-r--r--drivers/usb/typec/tcpci_rt1711h.c31
-rw-r--r--drivers/usb/typec/tcpm.c8
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c4
-rw-r--r--drivers/usb/usbip/stub_dev.c49
-rw-r--r--drivers/usb/usbip/usbip_common.h3
-rw-r--r--drivers/usb/usbip/usbip_event.c2
-rw-r--r--drivers/usb/usbip/vhci_hcd.c5
-rw-r--r--drivers/usb/usbip/vhci_sysfs.c63
-rw-r--r--drivers/usb/usbip/vudc_dev.c1
-rw-r--r--drivers/usb/usbip/vudc_sysfs.c57
-rw-r--r--drivers/vfio/mdev/mdev_sysfs.c4
-rw-r--r--drivers/vfio/pci/vfio_pci.c365
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c65
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c18
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h16
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c24
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c3
-rw-r--r--drivers/vfio/vfio_iommu_type1.c112
-rw-r--r--drivers/vhost/net.c6
-rw-r--r--drivers/vhost/vhost.c2
-rw-r--r--drivers/vhost/vringh.c9
-rw-r--r--drivers/vhost/vsock.c109
-rw-r--r--drivers/video/backlight/lp855x_bl.c20
-rw-r--r--drivers/video/backlight/sky81452-backlight.c1
-rw-r--r--drivers/video/console/Kconfig46
-rw-r--r--drivers/video/console/newport_con.c19
-rw-r--r--drivers/video/console/vgacon.c273
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c2
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c2
-rw-r--r--drivers/video/fbdev/core/bitblit.c15
-rw-r--r--drivers/video/fbdev/core/fbcmap.c8
-rw-r--r--drivers/video/fbdev/core/fbcon.c376
-rw-r--r--drivers/video/fbdev/core/fbcon.h9
-rw-r--r--drivers/video/fbdev/core/fbcon_ccw.c15
-rw-r--r--drivers/video/fbdev/core/fbcon_cw.c15
-rw-r--r--drivers/video/fbdev/core/fbcon_rotate.c1
-rw-r--r--drivers/video/fbdev/core/fbcon_ud.c15
-rw-r--r--drivers/video/fbdev/core/fbmem.c2
-rw-r--r--drivers/video/fbdev/core/tileblit.c3
-rw-r--r--drivers/video/fbdev/efifb.c2
-rw-r--r--drivers/video/fbdev/hgafb.c21
-rw-r--r--drivers/video/fbdev/hyperv_fb.c5
-rw-r--r--drivers/video/fbdev/imsttfb.c5
-rw-r--r--drivers/video/fbdev/neofb.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc.c7
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dsi.c7
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss.c9
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c5
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c5
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/venc.c7
-rw-r--r--drivers/video/fbdev/pvr2fb.c2
-rw-r--r--drivers/video/fbdev/pxafb.c4
-rw-r--r--drivers/video/fbdev/sis/init.c11
-rw-r--r--drivers/video/fbdev/sis/init301.c4
-rw-r--r--drivers/video/fbdev/sm712fb.c2
-rw-r--r--drivers/video/fbdev/udlfb.c1
-rw-r--r--drivers/video/fbdev/vga16fb.c16
-rw-r--r--drivers/video/fbdev/w100fb.c2
-rw-r--r--drivers/virt/fsl_hypervisor.c17
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c6
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.h15
-rw-r--r--drivers/virt/vboxguest/vboxguest_linux.c3
-rw-r--r--drivers/virt/vboxguest/vmmdev.h2
-rw-r--r--drivers/virtio/virtio_ring.c3
-rw-r--r--drivers/w1/masters/mxc_w1.c14
-rw-r--r--drivers/w1/masters/omap_hdq.c10
-rw-r--r--drivers/watchdog/Kconfig3
-rw-r--r--drivers/watchdog/da9062_wdt.c5
-rw-r--r--drivers/watchdog/f71808e_wdt.c13
-rw-r--r--drivers/watchdog/mei_wdt.c1
-rw-r--r--drivers/watchdog/qcom-wdt.c2
-rw-r--r--drivers/watchdog/rdc321x_wdt.c5
-rw-r--r--drivers/watchdog/sp5100_tco.h2
-rw-r--r--drivers/watchdog/sp805_wdt.c4
-rw-r--r--drivers/watchdog/sprd_wdt.c34
-rw-r--r--drivers/watchdog/watchdog_core.c22
-rw-r--r--drivers/watchdog/watchdog_dev.c25
-rw-r--r--drivers/xen/balloon.c12
-rw-r--r--drivers/xen/events/events_2l.c31
-rw-r--r--drivers/xen/events/events_base.c587
-rw-r--r--drivers/xen/events/events_fifo.c88
-rw-r--r--drivers/xen/events/events_internal.h42
-rw-r--r--drivers/xen/evtchn.c7
-rw-r--r--drivers/xen/gntdev-dmabuf.c8
-rw-r--r--drivers/xen/gntdev.c54
-rw-r--r--drivers/xen/platform-pci.c1
-rw-r--r--drivers/xen/preempt.c2
-rw-r--r--drivers/xen/privcmd.c25
-rw-r--r--drivers/xen/pvcalls-back.c79
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c14
-rw-r--r--drivers/xen/xen-pciback/pciback.h12
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c48
-rw-r--r--drivers/xen/xen-pciback/xenbus.c26
-rw-r--r--drivers/xen/xen-scsiback.c27
-rw-r--r--drivers/xen/xenbus/xenbus.h2
-rw-r--r--drivers/xen/xenbus/xenbus_client.c27
-rw-r--r--drivers/xen/xenbus/xenbus_comms.c8
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c111
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c7
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c34
-rw-r--r--fs/9p/v9fs.c5
-rw-r--r--fs/9p/vfs_file.c4
-rw-r--r--fs/affs/amigaffs.c27
-rw-r--r--fs/affs/file.c26
-rw-r--r--fs/affs/namei.c4
-rw-r--r--fs/afs/dynroot.c20
-rw-r--r--fs/afs/main.c6
-rw-r--r--fs/afs/proc.c1
-rw-r--r--fs/afs/write.c5
-rw-r--r--fs/aio.c8
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--fs/binfmt_misc.c29
-rw-r--r--fs/block_dev.c42
-rw-r--r--fs/btrfs/async-thread.c8
-rw-r--r--fs/btrfs/async-thread.h1
-rw-r--r--fs/btrfs/backref.c1
-rw-r--r--fs/btrfs/ctree.c43
-rw-r--r--fs/btrfs/ctree.h17
-rw-r--r--fs/btrfs/delayed-inode.c18
-rw-r--r--fs/btrfs/dev-replace.c36
-rw-r--r--fs/btrfs/disk-io.c51
-rw-r--r--fs/btrfs/export.c8
-rw-r--r--fs/btrfs/export.h5
-rw-r--r--fs/btrfs/extent-tree.c30
-rw-r--r--fs/btrfs/extent_io.c278
-rw-r--r--fs/btrfs/extent_io.h6
-rw-r--r--fs/btrfs/file-item.c6
-rw-r--r--fs/btrfs/file.c22
-rw-r--r--fs/btrfs/free-space-cache.c12
-rw-r--r--fs/btrfs/inode.c123
-rw-r--r--fs/btrfs/ioctl.c82
-rw-r--r--fs/btrfs/print-tree.c12
-rw-r--r--fs/btrfs/qgroup.c40
-rw-r--r--fs/btrfs/raid56.c58
-rw-r--r--fs/btrfs/reada.c2
-rw-r--r--fs/btrfs/ref-verify.c3
-rw-r--r--fs/btrfs/relocation.c49
-rw-r--r--fs/btrfs/scrub.c4
-rw-r--r--fs/btrfs/send.c190
-rw-r--r--fs/btrfs/super.c62
-rw-r--r--fs/btrfs/sysfs.c4
-rw-r--r--fs/btrfs/tests/btrfs-tests.c8
-rw-r--r--fs/btrfs/tests/inode-tests.c1
-rw-r--r--fs/btrfs/transaction.c24
-rw-r--r--fs/btrfs/tree-checker.c346
-rw-r--r--fs/btrfs/tree-checker.h4
-rw-r--r--fs/btrfs/tree-log.c71
-rw-r--r--fs/btrfs/volumes.c300
-rw-r--r--fs/btrfs/volumes.h13
-rw-r--r--fs/buffer.c36
-rw-r--r--fs/cachefiles/rdwr.c5
-rw-r--r--fs/ceph/addr.c2
-rw-r--r--fs/ceph/caps.c35
-rw-r--r--fs/ceph/export.c5
-rw-r--r--fs/ceph/file.c1
-rw-r--r--fs/ceph/inode.c6
-rw-r--r--fs/ceph/mds_client.c17
-rw-r--r--fs/cifs/asn1.c16
-rw-r--r--fs/cifs/cifs_unicode.c8
-rw-r--r--fs/cifs/cifsfs.c2
-rw-r--r--fs/cifs/cifsglob.h9
-rw-r--r--fs/cifs/cifssmb.c2
-rw-r--r--fs/cifs/connect.c13
-rw-r--r--fs/cifs/dir.c22
-rw-r--r--fs/cifs/file.c26
-rw-r--r--fs/cifs/inode.c22
-rw-r--r--fs/cifs/misc.c17
-rw-r--r--fs/cifs/smb1ops.c8
-rw-r--r--fs/cifs/smb2misc.c109
-rw-r--r--fs/cifs/smb2ops.c67
-rw-r--r--fs/cifs/smb2pdu.c22
-rw-r--r--fs/cifs/smb2pdu.h18
-rw-r--r--fs/cifs/transport.c35
-rw-r--r--fs/configfs/dir.c1
-rw-r--r--fs/configfs/file.c6
-rw-r--r--fs/coredump.c8
-rw-r--r--fs/crypto/crypto.c58
-rw-r--r--fs/crypto/fname.c1
-rw-r--r--fs/crypto/hooks.c34
-rw-r--r--fs/crypto/policy.c3
-rw-r--r--fs/dcache.c19
-rw-r--r--fs/direct-io.c5
-rw-r--r--fs/dlm/config.c3
-rw-r--r--fs/dlm/debug_fs.c1
-rw-r--r--fs/dlm/dlm_internal.h1
-rw-r--r--fs/dlm/lockspace.c6
-rw-r--r--fs/ecryptfs/crypto.c6
-rw-r--r--fs/ecryptfs/main.c6
-rw-r--r--fs/efivarfs/inode.c2
-rw-r--r--fs/efivarfs/super.c3
-rw-r--r--fs/eventpoll.c94
-rw-r--r--fs/exec.c23
-rw-r--r--fs/ext2/file.c6
-rw-r--r--fs/ext2/ialloc.c3
-rw-r--r--fs/ext2/xattr.c8
-rw-r--r--fs/ext4/block_validity.c87
-rw-r--r--fs/ext4/ext4.h71
-rw-r--r--fs/ext4/ext4_extents.h9
-rw-r--r--fs/ext4/extents.c73
-rw-r--r--fs/ext4/fsmap.c3
-rw-r--r--fs/ext4/fsync.c28
-rw-r--r--fs/ext4/ialloc.c50
-rw-r--r--fs/ext4/indirect.c6
-rw-r--r--fs/ext4/inline.c1
-rw-r--r--fs/ext4/inode.c64
-rw-r--r--fs/ext4/ioctl.c3
-rw-r--r--fs/ext4/mballoc.c22
-rw-r--r--fs/ext4/namei.c165
-rw-r--r--fs/ext4/resize.c4
-rw-r--r--fs/ext4/super.c245
-rw-r--r--fs/ext4/xattr.c13
-rw-r--r--fs/f2fs/checkpoint.c8
-rw-r--r--fs/f2fs/data.c3
-rw-r--r--fs/f2fs/dir.c8
-rw-r--r--fs/f2fs/f2fs.h6
-rw-r--r--fs/f2fs/file.c7
-rw-r--r--fs/f2fs/inline.c26
-rw-r--r--fs/f2fs/namei.c25
-rw-r--r--fs/f2fs/node.c19
-rw-r--r--fs/f2fs/recovery.c10
-rw-r--r--fs/f2fs/segment.h4
-rw-r--r--fs/f2fs/super.c13
-rw-r--r--fs/f2fs/sysfs.c1
-rw-r--r--fs/f2fs/xattr.c15
-rw-r--r--fs/fat/inode.c6
-rw-r--r--fs/fcntl.c10
-rw-r--r--fs/file.c2
-rw-r--r--fs/filesystems.c4
-rw-r--r--fs/fs-writeback.c114
-rw-r--r--fs/fuse/cuse.c2
-rw-r--r--fs/fuse/dev.c32
-rw-r--r--fs/fuse/file.c12
-rw-r--r--fs/gfs2/bmap.c16
-rw-r--r--fs/gfs2/glock.c3
-rw-r--r--fs/gfs2/glops.c2
-rw-r--r--fs/gfs2/inode.c13
-rw-r--r--fs/gfs2/lock_dlm.c8
-rw-r--r--fs/gfs2/log.c13
-rw-r--r--fs/gfs2/ops_fstype.c32
-rw-r--r--fs/gfs2/quota.c3
-rw-r--r--fs/gfs2/quota.h3
-rw-r--r--fs/gfs2/rgrp.c9
-rw-r--r--fs/gfs2/super.c11
-rw-r--r--fs/gfs2/trans.c2
-rw-r--r--fs/hfsplus/attributes.c4
-rw-r--r--fs/hfsplus/extents.c7
-rw-r--r--fs/hugetlbfs/inode.c7
-rw-r--r--fs/iomap.c34
-rw-r--r--fs/isofs/dir.c1
-rw-r--r--fs/isofs/namei.c1
-rw-r--r--fs/jbd2/commit.c7
-rw-r--r--fs/jbd2/journal.c4
-rw-r--r--fs/jbd2/transaction.c26
-rw-r--r--fs/jffs2/compr_rtime.c3
-rw-r--r--fs/jffs2/dir.c6
-rw-r--r--fs/jffs2/readinode.c16
-rw-r--r--fs/jffs2/scan.c2
-rw-r--r--fs/jffs2/summary.c3
-rw-r--r--fs/jfs/jfs_dmap.c2
-rw-r--r--fs/jfs/jfs_dmap.h2
-rw-r--r--fs/jfs/jfs_filsys.h1
-rw-r--r--fs/jfs/jfs_mount.c10
-rw-r--r--fs/libfs.c6
-rw-r--r--fs/lockd/host.c20
-rw-r--r--fs/minix/inode.c42
-rw-r--r--fs/minix/itree_common.c8
-rw-r--r--fs/minix/itree_v1.c12
-rw-r--r--fs/minix/itree_v2.c13
-rw-r--r--fs/minix/minix.h1
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/nfs/Kconfig2
-rw-r--r--fs/nfs/callback_proc.c2
-rw-r--r--fs/nfs/dir.c3
-rw-r--r--fs/nfs/direct.c2
-rw-r--r--fs/nfs/filelayout/filelayout.c2
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c13
-rw-r--r--fs/nfs/fscache-index.c6
-rw-r--r--fs/nfs/fscache.c31
-rw-r--r--fs/nfs/fscache.h8
-rw-r--r--fs/nfs/inode.c10
-rw-r--r--fs/nfs/internal.h12
-rw-r--r--fs/nfs/namespace.c12
-rw-r--r--fs/nfs/nfs3acl.c22
-rw-r--r--fs/nfs/nfs3xdr.c3
-rw-r--r--fs/nfs/nfs42proc.c21
-rw-r--r--fs/nfs/nfs4file.c2
-rw-r--r--fs/nfs/nfs4proc.c41
-rw-r--r--fs/nfs/nfs4super.c2
-rw-r--r--fs/nfs/nfs4xdr.c6
-rw-r--r--fs/nfs/pagelist.c96
-rw-r--r--fs/nfs/pnfs.c115
-rw-r--r--fs/nfs/pnfs.h5
-rw-r--r--fs/nfs/write.c11
-rw-r--r--fs/nfs_common/grace.c6
-rw-r--r--fs/nfsd/nfs3xdr.c7
-rw-r--r--fs/nfsd/nfs4callback.c2
-rw-r--r--fs/nfsd/nfs4state.c75
-rw-r--r--fs/nfsd/nfsproc.c16
-rw-r--r--fs/nfsd/nfssvc.c3
-rw-r--r--fs/nfsd/vfs.c6
-rw-r--r--fs/nilfs2/segment.c2
-rw-r--r--fs/notify/fanotify/fanotify.c5
-rw-r--r--fs/ntfs/inode.c12
-rw-r--r--fs/ocfs2/alloc.c4
-rw-r--r--fs/ocfs2/aops.c11
-rw-r--r--fs/ocfs2/cluster/heartbeat.c8
-rw-r--r--fs/ocfs2/dlmglue.c25
-rw-r--r--fs/ocfs2/file.c8
-rw-r--r--fs/ocfs2/ocfs2.h5
-rw-r--r--fs/ocfs2/ocfs2_fs.h4
-rw-r--r--fs/ocfs2/suballoc.c13
-rw-r--r--fs/ocfs2/super.c5
-rw-r--r--fs/overlayfs/copy_up.c20
-rw-r--r--fs/overlayfs/dir.c2
-rw-r--r--fs/overlayfs/export.c2
-rw-r--r--fs/overlayfs/file.c10
-rw-r--r--fs/overlayfs/inode.c6
-rw-r--r--fs/overlayfs/super.c36
-rw-r--r--fs/pnode.c9
-rw-r--r--fs/proc/base.c7
-rw-r--r--fs/proc/generic.c55
-rw-r--r--fs/proc/inode.c2
-rw-r--r--fs/proc/internal.h7
-rw-r--r--fs/proc/proc_net.c16
-rw-r--r--fs/proc/self.c9
-rw-r--r--fs/proc/thread_self.c2
-rw-r--r--fs/proc/vmcore.c5
-rw-r--r--fs/pstore/inode.c5
-rw-r--r--fs/pstore/platform.c9
-rw-r--r--fs/quota/quota_tree.c8
-rw-r--r--fs/quota/quota_v2.c25
-rw-r--r--fs/ramfs/file-nommu.c2
-rw-r--r--fs/readdir.c6
-rw-r--r--fs/reiserfs/inode.c9
-rw-r--r--fs/reiserfs/stree.c6
-rw-r--r--fs/reiserfs/super.c8
-rw-r--r--fs/reiserfs/xattr.c7
-rw-r--r--fs/reiserfs/xattr.h2
-rw-r--r--fs/romfs/storage.c4
-rw-r--r--fs/select.c10
-rw-r--r--fs/signalfd.c10
-rw-r--r--fs/squashfs/export.c45
-rw-r--r--fs/squashfs/file.c6
-rw-r--r--fs/squashfs/id.c42
-rw-r--r--fs/squashfs/squashfs_fs.h1
-rw-r--r--fs/squashfs/squashfs_fs_sb.h1
-rw-r--r--fs/squashfs/super.c6
-rw-r--r--fs/squashfs/xattr.h10
-rw-r--r--fs/squashfs/xattr_id.c68
-rw-r--r--fs/super.c33
-rw-r--r--fs/sysfs/file.c55
-rw-r--r--fs/ubifs/debug.c1
-rw-r--r--fs/ubifs/dir.c25
-rw-r--r--fs/ubifs/file.c6
-rw-r--r--fs/ubifs/io.c29
-rw-r--r--fs/ubifs/replay.c3
-rw-r--r--fs/udf/inode.c34
-rw-r--r--fs/udf/super.c27
-rw-r--r--fs/ufs/super.c2
-rw-r--r--fs/xattr.c84
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c17
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c12
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c21
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h2
-rw-r--r--fs/xfs/libxfs/xfs_dir2_node.c1
-rw-r--r--fs/xfs/libxfs/xfs_rmap.c2
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.c11
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.c96
-rw-r--r--fs/xfs/scrub/bmap.c32
-rw-r--r--fs/xfs/scrub/btree.c45
-rw-r--r--fs/xfs/scrub/dir.c3
-rw-r--r--fs/xfs/scrub/inode.c3
-rw-r--r--fs/xfs/scrub/refcount.c8
-rw-r--r--fs/xfs/xfs_bmap_util.c2
-rw-r--r--fs/xfs/xfs_buf.c8
-rw-r--r--fs/xfs/xfs_dquot.c9
-rw-r--r--fs/xfs/xfs_file.c12
-rw-r--r--fs/xfs/xfs_fsmap.c3
-rw-r--r--fs/xfs/xfs_icache.c13
-rw-r--r--fs/xfs/xfs_inode.c110
-rw-r--r--fs/xfs/xfs_ioctl.c5
-rw-r--r--fs/xfs/xfs_iops.c12
-rw-r--r--fs/xfs/xfs_log.c9
-rw-r--r--fs/xfs/xfs_pnfs.c2
-rw-r--r--fs/xfs/xfs_reflink.c22
-rw-r--r--fs/xfs/xfs_rtalloc.c21
-rw-r--r--fs/xfs/xfs_sysfs.h6
-rw-r--r--fs/xfs/xfs_trans_ail.c4
-rw-r--r--fs/xfs/xfs_trans_dquot.c2
-rw-r--r--fs/xfs/xfs_trans_inode.c6
-rw-r--r--include/acpi/acexcep.h10
-rw-r--r--include/acpi/acpi_bus.h6
-rw-r--r--include/acpi/processor.h8
-rw-r--r--include/asm-generic/pgtable.h4
-rw-r--r--include/asm-generic/sections.h3
-rw-r--r--include/asm-generic/topology.h2
-rw-r--r--include/asm-generic/vmlinux.lds.h30
-rw-r--r--include/crypto/acompress.h2
-rw-r--r--include/crypto/aead.h2
-rw-r--r--include/crypto/akcipher.h2
-rw-r--r--include/crypto/hash.h4
-rw-r--r--include/crypto/if_alg.h4
-rw-r--r--include/crypto/kpp.h2
-rw-r--r--include/crypto/rng.h2
-rw-r--r--include/crypto/skcipher.h2
-rw-r--r--include/drm/drm_connector.h71
-rw-r--r--include/keys/big_key-type.h2
-rw-r--r--include/keys/user-type.h3
-rw-r--r--include/linux/acpi.h16
-rw-r--r--include/linux/backing-dev.h10
-rw-r--r--include/linux/bitfield.h2
-rw-r--r--include/linux/bitops.h2
-rw-r--r--include/linux/blkdev.h4
-rw-r--r--include/linux/blktrace_api.h18
-rw-r--r--include/linux/bpf.h9
-rw-r--r--include/linux/bpf_verifier.h5
-rw-r--r--include/linux/buffer_head.h8
-rw-r--r--include/linux/build_bug.h5
-rw-r--r--include/linux/bvec.h9
-rw-r--r--include/linux/can/skb.h28
-rw-r--r--include/linux/cgroup-defs.h8
-rw-r--r--include/linux/cgroup.h4
-rw-r--r--include/linux/compat.h2
-rw-r--r--include/linux/compiler-clang.h1
-rw-r--r--include/linux/compiler-gcc.h23
-rw-r--r--include/linux/compiler.h80
-rw-r--r--include/linux/compiler_types.h6
-rw-r--r--include/linux/console_struct.h1
-rw-r--r--include/linux/dcache.h2
-rw-r--r--include/linux/debugfs.h5
-rw-r--r--include/linux/devfreq_cooling.h2
-rw-r--r--include/linux/device-mapper.h6
-rw-r--r--include/linux/dm-bufio.h1
-rw-r--r--include/linux/eeprom_93xx46.h2
-rw-r--r--include/linux/efi.h4
-rw-r--r--include/linux/elfcore.h22
-rw-r--r--include/linux/elfnote.h2
-rw-r--r--include/linux/extcon.h23
-rw-r--r--include/linux/filter.h6
-rw-r--r--include/linux/font.h13
-rw-r--r--include/linux/fs.h9
-rw-r--r--include/linux/fscrypt.h34
-rw-r--r--include/linux/fscrypt_notsupp.h14
-rw-r--r--include/linux/fscrypt_supp.h35
-rw-r--r--include/linux/futex.h40
-rw-r--r--include/linux/genhd.h7
-rw-r--r--include/linux/hid.h44
-rw-r--r--include/linux/hil_mlc.h2
-rw-r--r--include/linux/hugetlb.h5
-rw-r--r--include/linux/hyperv.h2
-rw-r--r--include/linux/i2c-algo-pca.h15
-rw-r--r--include/linux/i2c.h2
-rw-r--r--include/linux/icmpv6.h48
-rw-r--r--include/linux/ieee80211.h9
-rw-r--r--include/linux/if_macvlan.h3
-rw-r--r--include/linux/if_vlan.h29
-rw-r--r--include/linux/iio/iio.h2
-rw-r--r--include/linux/intel-iommu.h6
-rw-r--r--include/linux/io-mapping.h5
-rw-r--r--include/linux/io.h2
-rw-r--r--include/linux/iocontext.h1
-rw-r--r--include/linux/iomap.h1
-rw-r--r--include/linux/ipv6.h2
-rw-r--r--include/linux/irq.h13
-rw-r--r--include/linux/kallsyms.h5
-rw-r--r--include/linux/kdev_t.h22
-rw-r--r--include/linux/kexec.h5
-rw-r--r--include/linux/key-type.h2
-rw-r--r--include/linux/key.h1
-rw-r--r--include/linux/kgdb.h2
-rw-r--r--include/linux/khugepaged.h5
-rw-r--r--include/linux/kprobes.h6
-rw-r--r--include/linux/kthread.h3
-rw-r--r--include/linux/kvm_host.h39
-rw-r--r--include/linux/kvm_types.h9
-rw-r--r--include/linux/libata.h17
-rw-r--r--include/linux/log2.h2
-rw-r--r--include/linux/marvell_phy.h5
-rw-r--r--include/linux/mlx5/driver.h1
-rw-r--r--include/linux/mlx5/mlx5_ifc.h17
-rw-r--r--include/linux/mm.h27
-rw-r--r--include/linux/mmc/card.h2
-rw-r--r--include/linux/mmzone.h13
-rw-r--r--include/linux/mod_devicetable.h8
-rw-r--r--include/linux/module.h26
-rw-r--r--include/linux/msi.h6
-rw-r--r--include/linux/mtd/pfow.h2
-rw-r--r--include/linux/mtd/rawnand.h9
-rw-r--r--include/linux/mutex.h2
-rw-r--r--include/linux/netdevice.h52
-rw-r--r--include/linux/netfilter.h2
-rw-r--r--include/linux/netfilter/nf_conntrack_pptp.h2
-rw-r--r--include/linux/netfilter/nf_conntrack_sctp.h2
-rw-r--r--include/linux/netfilter/nfnetlink.h3
-rw-r--r--include/linux/netfilter/x_tables.h2
-rw-r--r--include/linux/netfilter_ipv4.h2
-rw-r--r--include/linux/netfilter_ipv6.h2
-rw-r--r--include/linux/nfs_page.h2
-rw-r--r--include/linux/node.h11
-rw-r--r--include/linux/nvme-fc-driver.h4
-rw-r--r--include/linux/of.h1
-rw-r--r--include/linux/oom.h1
-rw-r--r--include/linux/overflow.h1
-rw-r--r--include/linux/padata.h13
-rw-r--r--include/linux/pci-epc.h3
-rw-r--r--include/linux/pci.h4
-rw-r--r--include/linux/pci_ids.h36
-rw-r--r--include/linux/percpu_counter.h4
-rw-r--r--include/linux/pnp.h29
-rw-r--r--include/linux/power/bq27xxx_battery.h1
-rw-r--r--include/linux/prandom.h110
-rw-r--r--include/linux/printk.h5
-rw-r--r--include/linux/proc_fs.h8
-rw-r--r--include/linux/qed/qed_chain.h50
-rw-r--r--include/linux/random.h63
-rw-r--r--include/linux/rmap.h3
-rw-r--r--include/linux/sched.h7
-rw-r--r--include/linux/sched/coredump.h1
-rw-r--r--include/linux/sched/mm.h11
-rw-r--r--include/linux/security.h2
-rw-r--r--include/linux/seq_buf.h2
-rw-r--r--include/linux/seqlock.h11
-rw-r--r--include/linux/set_memory.h2
-rw-r--r--include/linux/skbuff.h26
-rw-r--r--include/linux/smp.h2
-rw-r--r--include/linux/spi/spi.h22
-rw-r--r--include/linux/stop_machine.h11
-rw-r--r--include/linux/string.h64
-rw-r--r--include/linux/sunrpc/gss_api.h1
-rw-r--r--include/linux/sunrpc/svc_rdma.h1
-rw-r--r--include/linux/sunrpc/svcauth_gss.h3
-rw-r--r--include/linux/sunrpc/xdr.h3
-rw-r--r--include/linux/sunrpc/xprt.h1
-rw-r--r--include/linux/swab.h1
-rw-r--r--include/linux/swapops.h3
-rw-r--r--include/linux/sysfs.h16
-rw-r--r--include/linux/tcp.h4
-rw-r--r--include/linux/thread_info.h13
-rw-r--r--include/linux/time64.h4
-rw-r--r--include/linux/trace_seq.h4
-rw-r--r--include/linux/tracepoint.h2
-rw-r--r--include/linux/tty.h6
-rw-r--r--include/linux/tty_driver.h2
-rw-r--r--include/linux/u64_stats_sync.h7
-rw-r--r--include/linux/uaccess.h28
-rw-r--r--include/linux/usb/composite.h3
-rw-r--r--include/linux/usb/ehci_def.h2
-rw-r--r--include/linux/usb/pd.h1
-rw-r--r--include/linux/usb_usual.h4
-rw-r--r--include/linux/virtio_net.h56
-rw-r--r--include/linux/virtio_vsock.h3
-rw-r--r--include/linux/vmalloc.h2
-rw-r--r--include/linux/xattr.h2
-rw-r--r--include/linux/zsmalloc.h2
-rw-r--r--include/net/act_api.h3
-rw-r--r--include/net/addrconf.h7
-rw-r--r--include/net/bluetooth/hci_core.h31
-rw-r--r--include/net/bluetooth/l2cap.h2
-rw-r--r--include/net/bonding.h8
-rw-r--r--include/net/cfg80211.h4
-rw-r--r--include/net/dsa.h1
-rw-r--r--include/net/dst.h10
-rw-r--r--include/net/genetlink.h8
-rw-r--r--include/net/icmp.h10
-rw-r--r--include/net/inet_connection_sock.h10
-rw-r--r--include/net/inet_ecn.h23
-rw-r--r--include/net/ip.h6
-rw-r--r--include/net/ip6_route.h1
-rw-r--r--include/net/ip_tunnels.h7
-rw-r--r--include/net/ip_vs.h10
-rw-r--r--include/net/ipv6.h2
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--include/net/netfilter/nf_log.h1
-rw-r--r--include/net/netfilter/nf_tables.h2
-rw-r--r--include/net/netns/xfrm.h4
-rw-r--r--include/net/nfc/nci_core.h1
-rw-r--r--include/net/pkt_sched.h11
-rw-r--r--include/net/red.h16
-rw-r--r--include/net/rtnetlink.h2
-rw-r--r--include/net/sctp/constants.h8
-rw-r--r--include/net/sock.h9
-rw-r--r--include/net/tcp.h24
-rw-r--r--include/net/tls.h6
-rw-r--r--include/net/xfrm.h21
-rw-r--r--include/rdma/uverbs_std_types.h2
-rw-r--r--include/scsi/libfcoe.h2
-rw-r--r--include/scsi/libiscsi.h3
-rw-r--r--include/scsi/scsi_common.h7
-rw-r--r--include/soc/nps/common.h6
-rw-r--r--include/sound/compress_driver.h10
-rw-r--r--include/sound/rawmidi.h1
-rw-r--r--include/sound/rt5670.h1
-rw-r--r--include/target/iscsi/iscsi_target_core.h2
-rw-r--r--include/target/target_core_backend.h1
-rw-r--r--include/trace/events/rpcrdma.h50
-rw-r--r--include/trace/events/rxrpc.h37
-rw-r--r--include/trace/events/sctp.h9
-rw-r--r--include/trace/events/target.h12
-rw-r--r--include/trace/events/writeback.h49
-rw-r--r--include/uapi/linux/bpf.h4
-rw-r--r--include/uapi/linux/const.h5
-rw-r--r--include/uapi/linux/ethtool.h2
-rw-r--r--include/uapi/linux/if_alg.h16
-rw-r--r--include/uapi/linux/input-event-codes.h3
-rw-r--r--include/uapi/linux/kernel.h9
-rw-r--r--include/uapi/linux/kvm.h7
-rw-r--r--include/uapi/linux/lightnvm.h2
-rw-r--r--include/uapi/linux/mmc/ioctl.h1
-rw-r--r--include/uapi/linux/mroute6.h2
-rw-r--r--include/uapi/linux/ndctl.h1
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h2
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_cthelper.h2
-rw-r--r--include/uapi/linux/netfilter/x_tables.h2
-rw-r--r--include/uapi/linux/netfilter/xt_SECMARK.h6
-rw-r--r--include/uapi/linux/netlink.h2
-rw-r--r--include/uapi/linux/nfs4.h3
-rw-r--r--include/uapi/linux/perf_event.h2
-rw-r--r--include/uapi/linux/raid/md_p.h2
-rw-r--r--include/uapi/linux/swab.h10
-rw-r--r--include/uapi/linux/sysctl.h2
-rw-r--r--include/uapi/linux/tty_flags.h4
-rw-r--r--include/uapi/linux/usb/ch9.h3
-rw-r--r--include/uapi/linux/vboxguest.h4
-rw-r--r--include/uapi/linux/videodev2.h17
-rw-r--r--include/uapi/linux/wireless.h9
-rw-r--r--include/uapi/linux/xfrm.h2
-rw-r--r--include/xen/events.h29
-rw-r--r--include/xen/grant_table.h1
-rw-r--r--include/xen/xenbus.h17
-rw-r--r--init/Kconfig6
-rw-r--r--init/init_task.c3
-rw-r--r--init/main.c3
-rw-r--r--ipc/mqueue.c34
-rw-r--r--ipc/util.c12
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/audit.c55
-rw-r--r--kernel/audit.h2
-rw-r--r--kernel/audit_watch.c2
-rw-r--r--kernel/auditfilter.c16
-rw-r--r--kernel/bpf/bpf_lru_list.c7
-rw-r--r--kernel/bpf/cpumap.c2
-rw-r--r--kernel/bpf/hashtab.c20
-rw-r--r--kernel/bpf/inode.c4
-rw-r--r--kernel/bpf/stackmap.c2
-rw-r--r--kernel/bpf/syscall.c32
-rw-r--r--kernel/bpf/verifier.c364
-rw-r--r--kernel/cgroup/cgroup.c31
-rw-r--r--kernel/cgroup/rstat.c16
-rw-r--r--kernel/compat.c6
-rw-r--r--kernel/cpu.c29
-rw-r--r--kernel/cpu_pm.c4
-rw-r--r--kernel/debug/debug_core.c31
-rw-r--r--kernel/debug/kdb/kdb_io.c8
-rw-r--r--kernel/debug/kdb/kdb_private.h2
-rw-r--r--kernel/dma/swiotlb.c6
-rw-r--r--kernel/elfcore.c26
-rw-r--r--kernel/events/core.c61
-rw-r--r--kernel/events/internal.h2
-rw-r--r--kernel/events/uprobes.c16
-rw-r--r--kernel/exit.c66
-rw-r--r--kernel/fail_function.c5
-rw-r--r--kernel/fork.c71
-rw-r--r--kernel/futex.c516
-rw-r--r--kernel/gcov/fs.c2
-rw-r--r--kernel/gcov/gcc_4_7.c4
-rw-r--r--kernel/irq/Kconfig1
-rw-r--r--kernel/irq/irqdomain.c21
-rw-r--r--kernel/irq/manage.c45
-rw-r--r--kernel/irq/matrix.c11
-rw-r--r--kernel/irq/msi.c44
-rw-r--r--kernel/jump_label.c26
-rw-r--r--kernel/kallsyms.c17
-rw-r--r--kernel/kexec_core.c2
-rw-r--r--kernel/kexec_file.c9
-rw-r--r--kernel/kmod.c4
-rw-r--r--kernel/kprobes.c101
-rw-r--r--kernel/kthread.c47
-rw-r--r--kernel/locking/lockdep.c7
-rw-r--r--kernel/locking/lockdep_proc.c2
-rw-r--r--kernel/locking/locktorture.c8
-rw-r--r--kernel/locking/mutex-debug.c4
-rw-r--r--kernel/locking/mutex-debug.h2
-rw-r--r--kernel/locking/mutex.c43
-rw-r--r--kernel/locking/mutex.h4
-rw-r--r--kernel/locking/qrwlock.c7
-rw-r--r--kernel/locking/rtmutex.c3
-rw-r--r--kernel/locking/rtmutex_common.h3
-rw-r--r--kernel/module.c158
-rw-r--r--kernel/padata.c114
-rw-r--r--kernel/power/hibernate.c18
-rw-r--r--kernel/power/swap.c2
-rw-r--r--kernel/printk/internal.h5
-rw-r--r--kernel/printk/printk.c37
-rw-r--r--kernel/printk/printk_safe.c27
-rw-r--r--kernel/ptrace.c34
-rw-r--r--kernel/reboot.c28
-rw-r--r--kernel/relay.c6
-rw-r--r--kernel/sched/core.c18
-rw-r--r--kernel/sched/deadline.c6
-rw-r--r--kernel/sched/debug.c42
-rw-r--r--kernel/sched/fair.c47
-rw-r--r--kernel/sched/sched.h63
-rw-r--r--kernel/sched/topology.c2
-rw-r--r--kernel/seccomp.c7
-rw-r--r--kernel/signal.c21
-rw-r--r--kernel/smp.c10
-rw-r--r--kernel/smpboot.c1
-rw-r--r--kernel/sys.c4
-rw-r--r--kernel/sysctl.c40
-rw-r--r--kernel/time/alarmtimer.c2
-rw-r--r--kernel/time/hrtimer.c62
-rw-r--r--kernel/time/itimer.c4
-rw-r--r--kernel/time/posix-cpu-timers.c2
-rw-r--r--kernel/time/posix-timers.c4
-rw-r--r--kernel/time/tick-common.c2
-rw-r--r--kernel/time/timekeeping.c3
-rw-r--r--kernel/time/timer.c22
-rw-r--r--kernel/trace/Kconfig2
-rw-r--r--kernel/trace/blktrace.c190
-rw-r--r--kernel/trace/ftrace.c50
-rw-r--r--kernel/trace/ring_buffer.c72
-rw-r--r--kernel/trace/trace.c73
-rw-r--r--kernel/trace/trace.h32
-rw-r--r--kernel/trace/trace_clock.c44
-rw-r--r--kernel/trace/trace_entries.h2
-rw-r--r--kernel/trace/trace_events.c9
-rw-r--r--kernel/trace/trace_events_filter.c6
-rw-r--r--kernel/trace/trace_events_hist.c1
-rw-r--r--kernel/trace/trace_events_trigger.c31
-rw-r--r--kernel/trace/trace_hwlat.c7
-rw-r--r--kernel/trace/trace_kprobe.c10
-rw-r--r--kernel/trace/trace_preemptirq.c4
-rw-r--r--kernel/trace/trace_selftest.c9
-rw-r--r--kernel/tracepoint.c80
-rw-r--r--kernel/umh.c14
-rw-r--r--kernel/up.c2
-rw-r--r--kernel/workqueue.c24
-rw-r--r--lib/Makefile2
-rw-r--r--lib/bug.c33
-rw-r--r--lib/crc32.c2
-rw-r--r--lib/crc32test.c4
-rw-r--r--lib/devres.c19
-rw-r--r--lib/dynamic_debug.c23
-rw-r--r--lib/find_bit.c16
-rw-r--r--lib/fonts/font_10x18.c9
-rw-r--r--lib/fonts/font_6x10.c9
-rw-r--r--lib/fonts/font_6x11.c9
-rw-r--r--lib/fonts/font_7x14.c9
-rw-r--r--lib/fonts/font_8x16.c9
-rw-r--r--lib/fonts/font_8x8.c9
-rw-r--r--lib/fonts/font_acorn_8x8.c9
-rw-r--r--lib/fonts/font_mini_4x6.c8
-rw-r--r--lib/fonts/font_pearl_8x8.c9
-rw-r--r--lib/fonts/font_sun12x22.c9
-rw-r--r--lib/fonts/font_sun8x16.c7
-rw-r--r--lib/genalloc.c25
-rw-r--r--lib/kobject_uevent.c9
-rw-r--r--lib/logic_pio.c3
-rw-r--r--lib/mpi/longlong.h36
-rw-r--r--lib/nlattr.c2
-rw-r--r--lib/raid6/neon.uc5
-rw-r--r--lib/raid6/recov_neon_inner.c7
-rw-r--r--lib/random32.c462
-rw-r--r--lib/scatterlist.c2
-rw-r--r--lib/stackdepot.c6
-rw-r--r--lib/string.c71
-rw-r--r--lib/strncpy_from_user.c23
-rw-r--r--lib/strnlen_user.c23
-rw-r--r--lib/test_kmod.c2
-rw-r--r--lib/zlib_inflate/inffast.c91
-rw-r--r--mm/backing-dev.c1
-rw-r--r--mm/filemap.c8
-rw-r--r--mm/gup.c44
-rw-r--r--mm/huge_memory.c96
-rw-r--r--mm/hugetlb.c149
-rw-r--r--mm/kasan/kasan_init.c23
-rw-r--r--mm/khugepaged.c72
-rw-r--r--mm/kmemleak.c2
-rw-r--r--mm/ksm.c13
-rw-r--r--mm/list_lru.c10
-rw-r--r--mm/maccess.c167
-rw-r--r--mm/memblock.c49
-rw-r--r--mm/memcontrol.c9
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/memory.c139
-rw-r--r--mm/memory_hotplug.c5
-rw-r--r--mm/mempolicy.c6
-rw-r--r--mm/mmap.c3
-rw-r--r--mm/mremap.c2
-rw-r--r--mm/oom_kill.c2
-rw-r--r--mm/page_alloc.c54
-rw-r--r--mm/page_counter.c6
-rw-r--r--mm/page_io.c11
-rw-r--r--mm/pagewalk.c4
-rw-r--r--mm/percpu.c2
-rw-r--r--mm/shmem.c23
-rw-r--r--mm/slab_common.c52
-rw-r--r--mm/slub.c32
-rw-r--r--mm/sparse.c1
-rw-r--r--mm/swap_state.c8
-rw-r--r--mm/swapfile.c10
-rw-r--r--mm/userfaultfd.c2
-rw-r--r--mm/util.c18
-rw-r--r--mm/vmalloc.c26
-rw-r--r--mm/vmscan.c53
-rw-r--r--mm/vmstat.c3
-rw-r--r--mm/zsmalloc.c17
-rw-r--r--net/8021q/vlan.c3
-rw-r--r--net/9p/trans_fd.c41
-rw-r--r--net/appletalk/ddp.c33
-rw-r--r--net/atm/lec.c6
-rw-r--r--net/ax25/af_ax25.c16
-rw-r--r--net/batman-adv/bat_v_elp.c15
-rw-r--r--net/batman-adv/bat_v_ogm.c13
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c150
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h4
-rw-r--r--net/batman-adv/gateway_client.c6
-rw-r--r--net/batman-adv/log.c1
-rw-r--r--net/batman-adv/network-coding.c9
-rw-r--r--net/batman-adv/routing.c4
-rw-r--r--net/batman-adv/soft-interface.c6
-rw-r--r--net/batman-adv/sysfs.c3
-rw-r--r--net/batman-adv/translation-table.c2
-rw-r--r--net/bluetooth/6lowpan.c5
-rw-r--r--net/bluetooth/a2mp.c23
-rw-r--r--net/bluetooth/amp.c3
-rw-r--r--net/bluetooth/cmtp/core.c5
-rw-r--r--net/bluetooth/ecdh_helper.h2
-rw-r--r--net/bluetooth/hci_conn.c17
-rw-r--r--net/bluetooth/hci_core.c6
-rw-r--r--net/bluetooth/hci_event.c131
-rw-r--r--net/bluetooth/hci_request.c12
-rw-r--r--net/bluetooth/l2cap_core.c40
-rw-r--r--net/bluetooth/l2cap_sock.c47
-rw-r--r--net/bluetooth/mgmt.c7
-rw-r--r--net/bluetooth/smp.c9
-rw-r--r--net/bridge/br_arp_nd_proxy.c8
-rw-r--r--net/bridge/br_device.c1
-rw-r--r--net/bridge/br_netfilter_hooks.c7
-rw-r--r--net/bridge/br_private.h2
-rw-r--r--net/bridge/br_sysfs_if.c9
-rw-r--r--net/bridge/br_vlan.c4
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c6
-rw-r--r--net/can/af_can.c38
-rw-r--r--net/can/proc.c6
-rw-r--r--net/ceph/messenger.c5
-rw-r--r--net/ceph/osd_client.c5
-rw-r--r--net/compat.c1
-rw-r--r--net/core/dev.c98
-rw-r--r--net/core/devlink.c6
-rw-r--r--net/core/drop_monitor.c11
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/filter.c60
-rw-r--r--net/core/gen_estimator.c11
-rw-r--r--net/core/lwt_bpf.c8
-rw-r--r--net/core/neighbour.c3
-rw-r--r--net/core/net-sysfs.c67
-rw-r--r--net/core/netclassid_cgroup.c4
-rw-r--r--net/core/netpoll.c24
-rw-r--r--net/core/netprio_cgroup.c2
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/core/rtnetlink.c3
-rw-r--r--net/core/skbuff.c51
-rw-r--r--net/core/sock.c27
-rw-r--r--net/core/sock_reuseport.c3
-rw-r--r--net/core/sysctl_net_core.c2
-rw-r--r--net/dcb/dcbnl.c10
-rw-r--r--net/dccp/ipv6.c11
-rw-r--r--net/dns_resolver/dns_key.c2
-rw-r--r--net/dsa/Kconfig1
-rw-r--r--net/dsa/dsa.c2
-rw-r--r--net/dsa/dsa2.c3
-rw-r--r--net/dsa/dsa_priv.h3
-rw-r--r--net/dsa/master.c5
-rw-r--r--net/dsa/slave.c27
-rw-r--r--net/dsa/tag_edsa.c37
-rw-r--r--net/dsa/tag_mtk.c34
-rw-r--r--net/hsr/hsr_framereg.c3
-rw-r--r--net/hsr/hsr_netlink.c10
-rw-r--r--net/ieee802154/nl-mac.c7
-rw-r--r--net/ieee802154/nl802154.c52
-rw-r--r--net/ipv4/cipso_ipv4.c17
-rw-r--r--net/ipv4/devinet.c14
-rw-r--r--net/ipv4/esp4.c7
-rw-r--r--net/ipv4/fib_frontend.c2
-rw-r--r--net/ipv4/fib_semantics.c2
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/gre_demux.c2
-rw-r--r--net/ipv4/gre_offload.c13
-rw-r--r--net/ipv4/icmp.c41
-rw-r--r--net/ipv4/inet_connection_sock.c143
-rw-r--r--net/ipv4/inet_diag.c4
-rw-r--r--net/ipv4/inet_hashtables.c1
-rw-r--r--net/ipv4/ip_gre.c15
-rw-r--r--net/ipv4/ip_output.c5
-rw-r--r--net/ipv4/ip_tunnel.c40
-rw-r--r--net/ipv4/ip_vti.c79
-rw-r--r--net/ipv4/ipip.c2
-rw-r--r--net/ipv4/netfilter.c12
-rw-r--r--net/ipv4/netfilter/arp_tables.c2
-rw-r--r--net/ipv4/netfilter/ip_tables.c2
-rw-r--r--net/ipv4/netfilter/ipt_SYNPROXY.c2
-rw-r--r--net/ipv4/netfilter/ipt_rpfilter.c2
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c2
-rw-r--r--net/ipv4/netfilter/nf_log_arp.c19
-rw-r--r--net/ipv4/netfilter/nf_log_ipv4.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_l3proto_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c7
-rw-r--r--net/ipv4/netfilter/nf_reject_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nft_chain_route_ipv4.c2
-rw-r--r--net/ipv4/ping.c3
-rw-r--r--net/ipv4/route.c41
-rw-r--r--net/ipv4/syncookies.c9
-rw-r--r--net/ipv4/tcp.c105
-rw-r--r--net/ipv4/tcp_bbr.c182
-rw-r--r--net/ipv4/tcp_cong.c11
-rw-r--r--net/ipv4/tcp_cubic.c2
-rw-r--r--net/ipv4/tcp_diag.c5
-rw-r--r--net/ipv4/tcp_input.c64
-rw-r--r--net/ipv4/tcp_ipv4.c38
-rw-r--r--net/ipv4/tcp_minisocks.c11
-rw-r--r--net/ipv4/tcp_output.c36
-rw-r--r--net/ipv4/tcp_recovery.c5
-rw-r--r--net/ipv4/udp.c20
-rw-r--r--net/ipv4/udp_offload.c2
-rw-r--r--net/ipv4/xfrm4_output.c2
-rw-r--r--net/ipv6/Kconfig1
-rw-r--r--net/ipv6/addrconf.c3
-rw-r--r--net/ipv6/addrconf_core.c11
-rw-r--r--net/ipv6/addrlabel.c26
-rw-r--r--net/ipv6/af_inet6.c4
-rw-r--r--net/ipv6/ah6.c3
-rw-r--r--net/ipv6/anycast.c17
-rw-r--r--net/ipv6/calipso.c17
-rw-r--r--net/ipv6/datagram.c2
-rw-r--r--net/ipv6/esp6.c7
-rw-r--r--net/ipv6/esp6_offload.c9
-rw-r--r--net/ipv6/icmp.c19
-rw-r--r--net/ipv6/inet6_connection_sock.c4
-rw-r--r--net/ipv6/ip6_fib.c29
-rw-r--r--net/ipv6/ip6_gre.c43
-rw-r--r--net/ipv6/ip6_icmp.c46
-rw-r--r--net/ipv6/ip6_input.c10
-rw-r--r--net/ipv6/ip6_output.c48
-rw-r--r--net/ipv6/ip6_tunnel.c23
-rw-r--r--net/ipv6/ip6_vti.c3
-rw-r--r--net/ipv6/ipv6_sockglue.c3
-rw-r--r--net/ipv6/mcast.c4
-rw-r--r--net/ipv6/netfilter.c6
-rw-r--r--net/ipv6/netfilter/ip6_tables.c2
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c2
-rw-r--r--net/ipv6/netfilter/nf_log_ipv6.c8
-rw-r--r--net/ipv6/netfilter/nf_nat_l3proto_ipv6.c2
-rw-r--r--net/ipv6/netfilter/nft_chain_route_ipv6.c2
-rw-r--r--net/ipv6/raw.c4
-rw-r--r--net/ipv6/reassembly.c4
-rw-r--r--net/ipv6/route.c14
-rw-r--r--net/ipv6/sit.c16
-rw-r--r--net/ipv6/syncookies.c12
-rw-r--r--net/ipv6/tcp_ipv6.c24
-rw-r--r--net/ipv6/udp.c17
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/iucv/af_iucv.c7
-rw-r--r--net/key/af_key.c13
-rw-r--r--net/l2tp/l2tp_core.c8
-rw-r--r--net/l2tp/l2tp_ip.c29
-rw-r--r--net/l2tp/l2tp_ip6.c32
-rw-r--r--net/lapb/lapb_out.c3
-rw-r--r--net/llc/af_llc.c12
-rw-r--r--net/mac80211/cfg.c12
-rw-r--r--net/mac80211/driver-ops.c5
-rw-r--r--net/mac80211/ibss.c2
-rw-r--r--net/mac80211/ieee80211_i.h37
-rw-r--r--net/mac80211/iface.c17
-rw-r--r--net/mac80211/key.c7
-rw-r--r--net/mac80211/key.h2
-rw-r--r--net/mac80211/main.c20
-rw-r--r--net/mac80211/mesh_hwmp.c9
-rw-r--r--net/mac80211/mesh_pathtbl.c5
-rw-r--r--net/mac80211/mlme.c7
-rw-r--r--net/mac80211/rate.c3
-rw-r--r--net/mac80211/rc80211_minstrel.c27
-rw-r--r--net/mac80211/rc80211_minstrel.h1
-rw-r--r--net/mac80211/rx.c186
-rw-r--r--net/mac80211/sta_info.c44
-rw-r--r--net/mac80211/sta_info.h32
-rw-r--r--net/mac80211/status.c5
-rw-r--r--net/mac80211/tx.c39
-rw-r--r--net/mac80211/vht.c22
-rw-r--r--net/mac80211/wpa.c13
-rw-r--r--net/mac802154/llsec.c2
-rw-r--r--net/mac802154/tx.c8
-rw-r--r--net/mpls/af_mpls.c7
-rw-r--r--net/mpls/mpls_gso.c3
-rw-r--r--net/ncsi/ncsi-manage.c25
-rw-r--r--net/ncsi/ncsi-netlink.c22
-rw-r--r--net/ncsi/ncsi-netlink.h3
-rw-r--r--net/ncsi/ncsi-rsp.c2
-rw-r--r--net/netfilter/ipset/ip_set_core.c5
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h20
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c16
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c7
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c12
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c6
-rw-r--r--net/netfilter/nf_conntrack_core.c7
-rw-r--r--net/netfilter/nf_conntrack_netlink.c3
-rw-r--r--net/netfilter/nf_conntrack_pptp.c62
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c39
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c19
-rw-r--r--net/netfilter/nf_conntrack_standalone.c9
-rw-r--r--net/netfilter/nf_dup_netdev.c1
-rw-r--r--net/netfilter/nf_flow_table_core.c4
-rw-r--r--net/netfilter/nf_log_common.c12
-rw-r--r--net/netfilter/nf_nat_core.c1
-rw-r--r--net/netfilter/nf_nat_proto_udp.c5
-rw-r--r--net/netfilter/nf_tables_api.c71
-rw-r--r--net/netfilter/nfnetlink.c11
-rw-r--r--net/netfilter/nfnetlink_cthelper.c3
-rw-r--r--net/netfilter/nfnetlink_log.c3
-rw-r--r--net/netfilter/nfnetlink_osf.c14
-rw-r--r--net/netfilter/nfnetlink_queue.c2
-rw-r--r--net/netfilter/nft_dynset.c4
-rw-r--r--net/netfilter/nft_fwd_netdev.c1
-rw-r--r--net/netfilter/nft_limit.c4
-rw-r--r--net/netfilter/nft_nat.c4
-rw-r--r--net/netfilter/nft_payload.c4
-rw-r--r--net/netfilter/nft_set_hash.c10
-rw-r--r--net/netfilter/nft_set_rbtree.c17
-rw-r--r--net/netfilter/x_tables.c18
-rw-r--r--net/netfilter/xt_RATEEST.c3
-rw-r--r--net/netfilter/xt_SECMARK.c88
-rw-r--r--net/netfilter/xt_recent.c12
-rw-r--r--net/netlabel/netlabel_cipso_v4.c3
-rw-r--r--net/netlabel/netlabel_domainhash.c59
-rw-r--r--net/netlabel/netlabel_kapi.c6
-rw-r--r--net/netlabel/netlabel_unlabeled.c17
-rw-r--r--net/netlink/genetlink.c49
-rw-r--r--net/netrom/nr_route.c1
-rw-r--r--net/nfc/digital_dep.c2
-rw-r--r--net/nfc/llcp_sock.c14
-rw-r--r--net/nfc/nci/core.c1
-rw-r--r--net/nfc/nci/hci.c5
-rw-r--r--net/nfc/netlink.c3
-rw-r--r--net/nfc/rawsock.c9
-rw-r--r--net/openvswitch/actions.c8
-rw-r--r--net/openvswitch/conntrack.c60
-rw-r--r--net/openvswitch/meter.c12
-rw-r--r--net/openvswitch/meter.h2
-rw-r--r--net/packet/af_packet.c18
-rw-r--r--net/qrtr/qrtr.c69
-rw-r--r--net/qrtr/tun.c18
-rw-r--r--net/rds/rdma.c3
-rw-r--r--net/rds/recv.c3
-rw-r--r--net/rose/rose_loopback.c17
-rw-r--r--net/rxrpc/af_rxrpc.c6
-rw-r--r--net/rxrpc/call_accept.c8
-rw-r--r--net/rxrpc/call_object.c27
-rw-r--r--net/rxrpc/conn_event.c6
-rw-r--r--net/rxrpc/conn_object.c8
-rw-r--r--net/rxrpc/input.c47
-rw-r--r--net/rxrpc/key.c51
-rw-r--r--net/rxrpc/local_object.c9
-rw-r--r--net/rxrpc/output.c44
-rw-r--r--net/rxrpc/proc.c6
-rw-r--r--net/rxrpc/recvmsg.c4
-rw-r--r--net/rxrpc/rxkad.c3
-rw-r--r--net/rxrpc/sendmsg.c5
-rw-r--r--net/sched/act_api.c3
-rw-r--r--net/sched/act_connmark.c9
-rw-r--r--net/sched/act_csum.c2
-rw-r--r--net/sched/act_skbedit.c2
-rw-r--r--net/sched/act_tunnel_key.c2
-rw-r--r--net/sched/cls_api.c2
-rw-r--r--net/sched/cls_flow.c8
-rw-r--r--net/sched/cls_flower.c2
-rw-r--r--net/sched/cls_tcindex.c8
-rw-r--r--net/sched/em_ipset.c2
-rw-r--r--net/sched/em_meta.c2
-rw-r--r--net/sched/sch_api.c11
-rw-r--r--net/sched/sch_atm.c8
-rw-r--r--net/sched/sch_cake.c62
-rw-r--r--net/sched/sch_choke.c10
-rw-r--r--net/sched/sch_dsmark.c9
-rw-r--r--net/sched/sch_etf.c7
-rw-r--r--net/sched/sch_fq_codel.c2
-rw-r--r--net/sched/sch_generic.c49
-rw-r--r--net/sched/sch_gred.c2
-rw-r--r--net/sched/sch_netem.c9
-rw-r--r--net/sched/sch_red.c7
-rw-r--r--net/sched/sch_sfq.c11
-rw-r--r--net/sched/sch_skbprio.c3
-rw-r--r--net/sched/sch_teql.c5
-rw-r--r--net/sctp/associola.c5
-rw-r--r--net/sctp/auth.c1
-rw-r--r--net/sctp/bind_addr.c1
-rw-r--r--net/sctp/input.c4
-rw-r--r--net/sctp/ipv6.c11
-rw-r--r--net/sctp/outqueue.c6
-rw-r--r--net/sctp/proc.c16
-rw-r--r--net/sctp/protocol.c3
-rw-r--r--net/sctp/sm_make_chunk.c8
-rw-r--r--net/sctp/sm_sideeffect.c22
-rw-r--r--net/sctp/sm_statefuns.c43
-rw-r--r--net/sctp/socket.c57
-rw-r--r--net/sctp/stream.c27
-rw-r--r--net/sctp/transport.c2
-rw-r--r--net/smc/af_smc.c4
-rw-r--r--net/smc/smc_core.c2
-rw-r--r--net/smc/smc_diag.c16
-rw-r--r--net/socket.c2
-rw-r--r--net/sunrpc/addr.c6
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c30
-rw-r--r--net/sunrpc/auth_gss/auth_gss_internal.h45
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c31
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c12
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c56
-rw-r--r--net/sunrpc/rpc_pipe.c1
-rw-r--r--net/sunrpc/rpcb_clnt.c4
-rw-r--r--net/sunrpc/svc.c6
-rw-r--r--net/sunrpc/svc_xprt.c26
-rw-r--r--net/sunrpc/svcsock.c4
-rw-r--r--net/sunrpc/xdr.c4
-rw-r--r--net/sunrpc/xprt.c65
-rw-r--r--net/sunrpc/xprtrdma/module.c1
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_backchannel.c7
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c24
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_rw.c31
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c32
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c5
-rw-r--r--net/sunrpc/xprtrdma/transport.c1
-rw-r--r--net/sunrpc/xprtsock.c4
-rw-r--r--net/tipc/bcast.c8
-rw-r--r--net/tipc/core.c5
-rw-r--r--net/tipc/group.c18
-rw-r--r--net/tipc/link.c21
-rw-r--r--net/tipc/msg.c14
-rw-r--r--net/tipc/netlink_compat.c14
-rw-r--r--net/tipc/node.c7
-rw-r--r--net/tipc/socket.c21
-rw-r--r--net/tipc/topsrv.c19
-rw-r--r--net/tipc/udp_media.c9
-rw-r--r--net/tls/tls_device.c19
-rw-r--r--net/tls/tls_sw.c6
-rw-r--r--net/unix/af_unix.c11
-rw-r--r--net/vmw_vsock/af_vsock.c18
-rw-r--r--net/vmw_vsock/hyperv_transport.c4
-rw-r--r--net/vmw_vsock/virtio_transport.c265
-rw-r--r--net/vmw_vsock/virtio_transport_common.c17
-rw-r--r--net/vmw_vsock/vmci_transport.c3
-rw-r--r--net/wireless/nl80211.c16
-rw-r--r--net/wireless/reg.c5
-rw-r--r--net/wireless/scan.c2
-rw-r--r--net/wireless/sme.c2
-rw-r--r--net/wireless/util.c7
-rw-r--r--net/wireless/wext-core.c5
-rw-r--r--net/x25/af_x25.c9
-rw-r--r--net/x25/x25_dev.c4
-rw-r--r--net/x25/x25_subr.c6
-rw-r--r--net/xdp/xdp_umem.c13
-rw-r--r--net/xdp/xsk.c12
-rw-r--r--net/xdp/xsk_queue.h7
-rw-r--r--net/xfrm/xfrm_device.c4
-rw-r--r--net/xfrm/xfrm_input.c4
-rw-r--r--net/xfrm/xfrm_interface.c32
-rw-r--r--net/xfrm/xfrm_output.c15
-rw-r--r--net/xfrm/xfrm_policy.c11
-rw-r--r--net/xfrm/xfrm_state.c60
-rwxr-xr-x[-rw-r--r--]samples/bpf/lwt_len_hist.sh2
-rw-r--r--samples/bpf/lwt_len_hist_user.c2
-rwxr-xr-x[-rw-r--r--]samples/bpf/test_lwt_bpf.sh0
-rw-r--r--samples/bpf/tracex1_kern.c4
-rw-r--r--samples/kfifo/bytestream-example.c8
-rw-r--r--samples/kfifo/inttype-example.c8
-rw-r--r--samples/kfifo/record-example.c8
-rw-r--r--samples/mic/mpssd/mpssd.c4
-rw-r--r--samples/vfio-mdev/mdpy.c2
-rw-r--r--scripts/Kbuild.include11
-rw-r--r--scripts/Makefile9
-rwxr-xr-xscripts/bloat-o-meter2
-rwxr-xr-xscripts/checkpatch.pl6
-rwxr-xr-xscripts/config7
-rwxr-xr-xscripts/decode_stacktrace.sh4
-rwxr-xr-xscripts/decodecode2
-rwxr-xr-xscripts/depmod.sh2
-rwxr-xr-xscripts/diffconfig2
-rw-r--r--scripts/gcc-plugins/Makefile1
-rw-r--r--scripts/gcc-plugins/gcc-common.h4
-rw-r--r--scripts/gdb/linux/symbols.py2
-rw-r--r--scripts/kconfig/nconf.c2
-rw-r--r--scripts/kconfig/preprocess.c2
-rw-r--r--scripts/kconfig/qconf.cc81
-rwxr-xr-xscripts/mksysmap2
-rw-r--r--scripts/mod/modpost.c2
-rw-r--r--scripts/recordmcount.c2
-rwxr-xr-xscripts/recordmcount.pl21
-rwxr-xr-xscripts/setlocalversion21
-rwxr-xr-xscripts/split-man.pl2
-rw-r--r--security/apparmor/apparmorfs.c3
-rw-r--r--security/apparmor/audit.c3
-rw-r--r--security/apparmor/domain.c12
-rw-r--r--security/apparmor/include/label.h1
-rw-r--r--security/apparmor/label.c37
-rw-r--r--security/apparmor/lsm.c9
-rw-r--r--security/apparmor/match.c5
-rw-r--r--security/commoncap.c70
-rw-r--r--security/integrity/evm/evm_crypto.c6
-rw-r--r--security/integrity/evm/evm_main.c10
-rw-r--r--security/integrity/evm/evm_secfs.c9
-rw-r--r--security/integrity/ima/ima.h10
-rw-r--r--security/integrity/ima/ima_crypto.c30
-rw-r--r--security/integrity/ima/ima_fs.c3
-rw-r--r--security/integrity/ima/ima_init.c2
-rw-r--r--security/integrity/ima/ima_kexec.c3
-rw-r--r--security/integrity/ima/ima_mok.c5
-rw-r--r--security/integrity/ima/ima_policy.c3
-rw-r--r--security/integrity/ima/ima_template_lib.c18
-rw-r--r--security/keys/big_key.c11
-rw-r--r--security/keys/encrypted-keys/encrypted.c7
-rw-r--r--security/keys/internal.h3
-rw-r--r--security/keys/key.c4
-rw-r--r--security/keys/keyctl.c117
-rw-r--r--security/keys/keyring.c6
-rw-r--r--security/keys/proc.c2
-rw-r--r--security/keys/request_key_auth.c7
-rw-r--r--security/keys/trusted.c16
-rw-r--r--security/keys/user_defined.c5
-rw-r--r--security/lsm_audit.c7
-rw-r--r--security/selinux/hooks.c98
-rw-r--r--security/selinux/ibpkey.c4
-rw-r--r--security/selinux/selinuxfs.c1
-rw-r--r--security/selinux/ss/services.c4
-rw-r--r--security/smack/smackfs.c50
-rw-r--r--sound/core/compress_offload.c4
-rw-r--r--sound/core/control.c2
-rw-r--r--sound/core/hwdep.c4
-rw-r--r--sound/core/info.c4
-rw-r--r--sound/core/init.c2
-rw-r--r--sound/core/oss/mulaw.c4
-rw-r--r--sound/core/oss/pcm_oss.c28
-rw-r--r--sound/core/oss/pcm_plugin.c44
-rw-r--r--sound/core/pcm_lib.c1
-rw-r--r--sound/core/pcm_native.c14
-rw-r--r--sound/core/rawmidi.c80
-rw-r--r--sound/core/seq/oss/seq_oss.c11
-rw-r--r--sound/core/seq/oss/seq_oss_synth.c3
-rw-r--r--sound/core/seq/seq_queue.h8
-rw-r--r--sound/drivers/aloop.c11
-rw-r--r--sound/drivers/opl3/opl3_synth.c2
-rw-r--r--sound/firewire/Kconfig4
-rw-r--r--sound/firewire/bebob/bebob.c2
-rw-r--r--sound/firewire/bebob/bebob_hwdep.c3
-rw-r--r--sound/firewire/dice/dice-alesis.c2
-rw-r--r--sound/firewire/dice/dice-tcelectronic.c4
-rw-r--r--sound/firewire/digi00x/digi00x.c5
-rw-r--r--sound/firewire/fireface/ff-transaction.c2
-rw-r--r--sound/firewire/fireworks/fireworks_transaction.c4
-rw-r--r--sound/firewire/oxfw/oxfw.c1
-rw-r--r--sound/firewire/tascam/tascam-transaction.c2
-rw-r--r--sound/firewire/tascam/tascam.c30
-rw-r--r--sound/hda/ext/hdac_ext_controller.c2
-rw-r--r--sound/hda/hdac_bus.c4
-rw-r--r--sound/hda/hdac_device.c2
-rw-r--r--sound/isa/es1688/es1688.c4
-rw-r--r--sound/isa/opti9xx/miro.c9
-rw-r--r--sound/isa/opti9xx/opti92x-ad1848.c9
-rw-r--r--sound/isa/sb/emu8000.c4
-rw-r--r--sound/isa/sb/sb16_csp.c8
-rw-r--r--sound/isa/sb/sb8.c4
-rw-r--r--sound/isa/wavefront/wavefront_synth.c8
-rw-r--r--sound/pci/asihpi/hpioctl.c4
-rw-r--r--sound/pci/ca0106/ca0106_main.c3
-rw-r--r--sound/pci/cs46xx/cs46xx_lib.c2
-rw-r--r--sound/pci/cs46xx/dsp_spos_scb_lib.c2
-rw-r--r--sound/pci/ctxfi/cthw20k2.c2
-rw-r--r--sound/pci/echoaudio/echoaudio.c2
-rw-r--r--sound/pci/hda/hda_auto_parser.c6
-rw-r--r--sound/pci/hda/hda_beep.c6
-rw-r--r--sound/pci/hda/hda_bind.c4
-rw-r--r--sound/pci/hda/hda_codec.c4
-rw-r--r--sound/pci/hda/hda_controller.c18
-rw-r--r--sound/pci/hda/hda_generic.c32
-rw-r--r--sound/pci/hda/hda_generic.h1
-rw-r--r--sound/pci/hda/hda_intel.c53
-rw-r--r--sound/pci/hda/hda_sysfs.c2
-rw-r--r--sound/pci/hda/patch_ca0132.c2
-rw-r--r--sound/pci/hda/patch_conexant.c15
-rw-r--r--sound/pci/hda/patch_hdmi.c50
-rw-r--r--sound/pci/hda/patch_realtek.c420
-rw-r--r--sound/pci/hda/patch_sigmatel.c2
-rw-r--r--sound/pci/hda/patch_via.c14
-rw-r--r--sound/pci/ice1712/ice1712.c3
-rw-r--r--sound/pci/ice1712/prodigy192.c2
-rw-r--r--sound/pci/ice1712/prodigy_hifi.c4
-rw-r--r--sound/pci/lx6464es/lx6464es.c8
-rw-r--r--sound/pci/mixart/mixart_core.c5
-rw-r--r--sound/pci/oxygen/xonar_dg.c2
-rw-r--r--sound/pci/rme9652/hdsp.c3
-rw-r--r--sound/pci/rme9652/hdspm.c3
-rw-r--r--sound/pci/rme9652/rme9652.c3
-rw-r--r--sound/soc/codecs/ak4458.c1
-rw-r--r--sound/soc/codecs/ak5558.c5
-rw-r--r--sound/soc/codecs/cpcap.c12
-rw-r--r--sound/soc/codecs/cs35l33.c1
-rw-r--r--sound/soc/codecs/cs42l42.c74
-rw-r--r--sound/soc/codecs/cs42l42.h13
-rw-r--r--sound/soc/codecs/cs42l56.c3
-rw-r--r--sound/soc/codecs/cs43130.c28
-rw-r--r--sound/soc/codecs/es8316.c9
-rw-r--r--sound/soc/codecs/hdac_hdmi.c6
-rw-r--r--sound/soc/codecs/max98090.c8
-rw-r--r--sound/soc/codecs/msm8916-wcd-analog.c4
-rw-r--r--sound/soc/codecs/rt286.c23
-rw-r--r--sound/soc/codecs/rt5640.c4
-rw-r--r--sound/soc/codecs/rt5645.c14
-rw-r--r--sound/soc/codecs/rt5651.c4
-rw-r--r--sound/soc/codecs/rt5659.c5
-rw-r--r--sound/soc/codecs/rt5670.c71
-rw-r--r--sound/soc/codecs/rt5670.h2
-rw-r--r--sound/soc/codecs/sgtl5000.c36
-rw-r--r--sound/soc/codecs/sgtl5000.h1
-rw-r--r--sound/soc/codecs/tas571x.c20
-rw-r--r--sound/soc/codecs/wm8958-dsp2.c4
-rw-r--r--sound/soc/codecs/wm8960.c11
-rw-r--r--sound/soc/codecs/wm8994.c10
-rw-r--r--sound/soc/codecs/wm8997.c2
-rw-r--r--sound/soc/codecs/wm8998.c4
-rw-r--r--sound/soc/codecs/wm_adsp.c5
-rw-r--r--sound/soc/codecs/wm_hubs.c3
-rw-r--r--sound/soc/codecs/wm_hubs.h1
-rw-r--r--sound/soc/davinci/davinci-mcasp.c4
-rw-r--r--sound/soc/fsl/fsl_asrc_dma.c1
-rw-r--r--sound/soc/fsl/fsl_esai.c8
-rw-r--r--sound/soc/fsl/fsl_ssi.c19
-rw-r--r--sound/soc/img/img-i2s-in.c5
-rw-r--r--sound/soc/img/img-i2s-out.c8
-rw-r--r--sound/soc/img/img-parallel-out.c4
-rw-r--r--sound/soc/intel/atom/sst-atom-controls.c4
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-pcm.c11
-rw-r--r--sound/soc/intel/atom/sst/sst_pci.c2
-rw-r--r--sound/soc/intel/boards/bxt_rt298.c2
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c114
-rw-r--r--sound/soc/intel/boards/haswell.c1
-rw-r--r--sound/soc/intel/skylake/cnl-sst.c1
-rw-r--r--sound/soc/jz4740/jz4740-i2s.c4
-rw-r--r--sound/soc/kirkwood/kirkwood-dma.c2
-rw-r--r--sound/soc/meson/Kconfig2
-rw-r--r--sound/soc/meson/axg-fifo.c10
-rw-r--r--sound/soc/meson/axg-tdm-interface.c40
-rw-r--r--sound/soc/qcom/Kconfig2
-rw-r--r--sound/soc/qcom/apq8016_sbc.c1
-rw-r--r--sound/soc/qcom/apq8096.c1
-rw-r--r--sound/soc/qcom/lpass-cpu.c16
-rw-r--r--sound/soc/qcom/lpass-platform.c8
-rw-r--r--sound/soc/qcom/qdsp6/q6afe-dai.c16
-rw-r--r--sound/soc/qcom/qdsp6/q6asm.c7
-rw-r--r--sound/soc/qcom/qdsp6/q6routing.c16
-rw-r--r--sound/soc/qcom/sdm845.c1
-rw-r--r--sound/soc/qcom/storm.c1
-rw-r--r--sound/soc/rockchip/rockchip_pdm.c4
-rw-r--r--sound/soc/samsung/tm2_wm5110.c2
-rw-r--r--sound/soc/sh/rcar/ssi.c11
-rw-r--r--sound/soc/sh/rcar/ssiu.c2
-rw-r--r--sound/soc/soc-core.c22
-rw-r--r--sound/soc/soc-dapm.c29
-rw-r--r--sound/soc/soc-ops.c4
-rw-r--r--sound/soc/soc-pcm.c8
-rw-r--r--sound/soc/soc-topology.c6
-rw-r--r--sound/soc/sunxi/sun4i-codec.c5
-rw-r--r--sound/soc/tegra/tegra30_ahub.c4
-rw-r--r--sound/soc/tegra/tegra30_i2s.c4
-rw-r--r--sound/soc/tegra/tegra_wm8903.c6
-rw-r--r--sound/usb/card.c45
-rw-r--r--sound/usb/card.h1
-rw-r--r--sound/usb/clock.c24
-rw-r--r--sound/usb/endpoint.c6
-rw-r--r--sound/usb/format.c54
-rw-r--r--sound/usb/line6/capture.c2
-rw-r--r--sound/usb/line6/driver.c6
-rw-r--r--sound/usb/line6/playback.c2
-rw-r--r--sound/usb/line6/pod.c5
-rw-r--r--sound/usb/line6/variax.c6
-rw-r--r--sound/usb/midi.c56
-rw-r--r--sound/usb/mixer.c159
-rw-r--r--sound/usb/mixer.h19
-rw-r--r--sound/usb/mixer_maps.c153
-rw-r--r--sound/usb/mixer_quirks.c22
-rw-r--r--sound/usb/mixer_scarlett.c14
-rw-r--r--sound/usb/mixer_us16x08.c2
-rw-r--r--sound/usb/pcm.c14
-rw-r--r--sound/usb/proc.c2
-rw-r--r--sound/usb/quirks-table.h184
-rw-r--r--sound/usb/quirks.c90
-rw-r--r--sound/usb/quirks.h2
-rw-r--r--sound/usb/stream.c11
-rw-r--r--sound/usb/usbaudio.h4
-rw-r--r--sound/usb/usx2y/usbusx2yaudio.c2
-rw-r--r--sound/usb/validate.c4
-rw-r--r--tools/arch/ia64/include/asm/barrier.h3
-rw-r--r--tools/bpf/bpftool/btf_dumper.c2
-rw-r--r--tools/build/Build.include3
-rw-r--r--tools/build/Makefile4
-rw-r--r--tools/build/Makefile.feature5
-rw-r--r--tools/build/feature/Makefile14
-rw-r--r--tools/build/feature/test-all.c15
-rw-r--r--tools/build/feature/test-eventfd.c9
-rw-r--r--tools/build/feature/test-get_current_dir_name.c10
-rw-r--r--tools/build/feature/test-gettid.c11
-rw-r--r--tools/gpio/Makefile2
-rw-r--r--tools/gpio/gpio-hammer.c17
-rw-r--r--tools/include/uapi/linux/bpf.h4
-rw-r--r--tools/include/uapi/linux/perf_event.h2
-rw-r--r--tools/lib/api/fs/fs.c17
-rw-r--r--tools/lib/api/fs/fs.h12
-rw-r--r--tools/lib/bpf/Makefile2
-rw-r--r--tools/lib/traceevent/event-parse.c1
-rw-r--r--tools/objtool/Makefile3
-rw-r--r--tools/objtool/arch/x86/include/asm/insn.h15
-rw-r--r--tools/objtool/check.c32
-rw-r--r--tools/objtool/elf.c7
-rw-r--r--tools/objtool/orc_dump.c44
-rw-r--r--tools/objtool/orc_gen.c33
-rw-r--r--tools/perf/Documentation/perf-record.txt4
-rw-r--r--tools/perf/Documentation/perf-stat.txt4
-rw-r--r--tools/perf/Makefile.config23
-rw-r--r--tools/perf/Makefile.perf4
-rw-r--r--tools/perf/bench/mem-functions.c21
-rw-r--r--tools/perf/builtin-lock.c2
-rw-r--r--tools/perf/builtin-probe.c3
-rw-r--r--tools/perf/builtin-report.c3
-rw-r--r--tools/perf/builtin-stat.c2
-rw-r--r--tools/perf/builtin-top.c4
-rw-r--r--tools/perf/jvmti/jvmti_agent.c2
-rw-r--r--tools/perf/pmu-events/jevents.c19
-rwxr-xr-xtools/perf/python/tracepoint.py2
-rw-r--r--tools/perf/tests/bp_signal.c5
-rw-r--r--tools/perf/tests/pmu.c1
-rw-r--r--tools/perf/tests/sample-parsing.c2
-rw-r--r--tools/perf/tests/shell/lib/probe_vfs_getname.sh2
-rwxr-xr-xtools/perf/trace/beauty/arch_errno_names.sh2
-rw-r--r--tools/perf/util/Build1
-rw-r--r--tools/perf/util/auxtrace.c4
-rw-r--r--tools/perf/util/cpumap.c10
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c8
-rw-r--r--tools/perf/util/cs-etm.c29
-rw-r--r--tools/perf/util/cs-etm.h10
-rw-r--r--tools/perf/util/dso.c16
-rw-r--r--tools/perf/util/dso.h1
-rw-r--r--tools/perf/util/dwarf-aux.c8
-rw-r--r--tools/perf/util/event.c9
-rw-r--r--tools/perf/util/evsel.c3
-rw-r--r--tools/perf/util/expr.y3
-rw-r--r--tools/perf/util/get_current_dir_name.c18
-rw-r--r--tools/perf/util/intel-pt-decoder/insn.h15
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c30
-rw-r--r--tools/perf/util/intel-pt.c13
-rw-r--r--tools/perf/util/map.c7
-rw-r--r--tools/perf/util/mem2node.c3
-rw-r--r--tools/perf/util/metricgroup.c3
-rw-r--r--tools/perf/util/parse-events.c9
-rw-r--r--tools/perf/util/parse-events.y2
-rw-r--r--tools/perf/util/parse-regs-options.c2
-rw-r--r--tools/perf/util/pmu.c11
-rw-r--r--tools/perf/util/pmu.h1
-rw-r--r--tools/perf/util/print_binary.c2
-rw-r--r--tools/perf/util/probe-event.c49
-rw-r--r--tools/perf/util/probe-finder.c3
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c7
-rw-r--r--tools/perf/util/session.c1
-rw-r--r--tools/perf/util/sort.c2
-rw-r--r--tools/perf/util/srcline.c16
-rw-r--r--tools/perf/util/stat.c6
-rw-r--r--tools/perf/util/symbol-elf.c7
-rw-r--r--tools/perf/util/symbol.c2
-rw-r--r--tools/perf/util/symbol_fprintf.c2
-rw-r--r--tools/perf/util/trace-event-read.c1
-rw-r--r--tools/perf/util/util.h6
-rw-r--r--tools/power/acpi/Makefile.config1
-rwxr-xr-xtools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py22
-rw-r--r--tools/scripts/Makefile.include10
-rwxr-xr-xtools/testing/ktest/compare-ktest-sample.pl2
-rwxr-xr-xtools/testing/ktest/ktest.pl7
-rw-r--r--tools/testing/selftests/bpf/test_maps.c2
-rwxr-xr-xtools/testing/selftests/bpf/test_offload.py3
-rw-r--r--tools/testing/selftests/bpf/test_progs.c1
-rw-r--r--tools/testing/selftests/bpf/test_select_reuseport.c8
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c112
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.c2
-rw-r--r--tools/testing/selftests/ftrace/settings1
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc2
-rw-r--r--tools/testing/selftests/ipc/msgque.c2
-rwxr-xr-xtools/testing/selftests/kmod/kmod.sh13
-rw-r--r--tools/testing/selftests/kvm/include/x86.h2
-rw-r--r--tools/testing/selftests/kvm/lib/x86.c3
-rw-r--r--tools/testing/selftests/lib.mk4
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh1
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh9
-rw-r--r--tools/testing/selftests/net/msg_zerocopy.c5
-rw-r--r--tools/testing/selftests/net/psock_fanout.c3
-rw-r--r--tools/testing/selftests/networking/timestamping/rxtimestamp.c4
-rw-r--r--tools/testing/selftests/networking/timestamping/timestamping.c10
-rwxr-xr-xtools/testing/selftests/ntb/ntb_test.sh2
-rw-r--r--tools/testing/selftests/powerpc/alignment/alignment_handler.c5
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/context_switch.c21
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/ebb.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c1
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c7
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c2
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c55
-rw-r--r--tools/testing/selftests/powerpc/utils.c37
-rw-r--r--tools/testing/selftests/proc/proc-loadavg-001.c1
-rw-r--r--tools/testing/selftests/proc/proc-self-syscall.c1
-rw-r--r--tools/testing/selftests/proc/proc-uptime-002.c1
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc_batch.py2
-rw-r--r--tools/testing/selftests/vm/mlock2-tests.c233
-rw-r--r--tools/testing/selftests/x86/protection_keys.c3
-rw-r--r--tools/testing/selftests/x86/ptrace_syscall.c8
-rw-r--r--tools/testing/selftests/x86/syscall_nt.c1
-rw-r--r--tools/usb/usbip/libsrc/usbip_host_common.c2
-rw-r--r--tools/vm/Makefile2
-rw-r--r--virt/kvm/arm/aarch32.c28
-rw-r--r--virt/kvm/arm/arm.c2
-rw-r--r--virt/kvm/arm/hyp/aarch32.c8
-rw-r--r--virt/kvm/arm/mmio.c2
-rw-r--r--virt/kvm/arm/mmu.c25
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c1
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c11
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c22
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c4
-rw-r--r--virt/kvm/kvm_main.c200
4422 files changed, 48402 insertions, 27739 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index 8127a08e366d..d10bcca6c3fb 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -1559,7 +1559,8 @@ What: /sys/bus/iio/devices/iio:deviceX/in_concentrationX_voc_raw
KernelVersion: 4.3
Contact: linux-iio@vger.kernel.org
Description:
- Raw (unscaled no offset etc.) percentage reading of a substance.
+ Raw (unscaled no offset etc.) reading of a substance. Units
+ after application of scale and offset are percents.
What: /sys/bus/iio/devices/iio:deviceX/in_resistance_raw
What: /sys/bus/iio/devices/iio:deviceX/in_resistanceX_raw
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index b492fb6057c9..b9c14c11efc5 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -478,6 +478,7 @@ What: /sys/devices/system/cpu/vulnerabilities
/sys/devices/system/cpu/vulnerabilities/spec_store_bypass
/sys/devices/system/cpu/vulnerabilities/l1tf
/sys/devices/system/cpu/vulnerabilities/mds
+ /sys/devices/system/cpu/vulnerabilities/srbds
/sys/devices/system/cpu/vulnerabilities/tsx_async_abort
/sys/devices/system/cpu/vulnerabilities/itlb_multihit
Date: January 2018
diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst
index 0795e3c2643f..ca4dbdd9016d 100644
--- a/Documentation/admin-guide/hw-vuln/index.rst
+++ b/Documentation/admin-guide/hw-vuln/index.rst
@@ -14,3 +14,4 @@ are configurable at compile, boot or run time.
mds
tsx_async_abort
multihit.rst
+ special-register-buffer-data-sampling.rst
diff --git a/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst b/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst
new file mode 100644
index 000000000000..47b1b3afac99
--- /dev/null
+++ b/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst
@@ -0,0 +1,149 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+SRBDS - Special Register Buffer Data Sampling
+=============================================
+
+SRBDS is a hardware vulnerability that allows MDS :doc:`mds` techniques to
+infer values returned from special register accesses. Special register
+accesses are accesses to off core registers. According to Intel's evaluation,
+the special register reads that have a security expectation of privacy are
+RDRAND, RDSEED and SGX EGETKEY.
+
+When RDRAND, RDSEED and EGETKEY instructions are used, the data is moved
+to the core through the special register mechanism that is susceptible
+to MDS attacks.
+
+Affected processors
+--------------------
+Core models (desktop, mobile, Xeon-E3) that implement RDRAND and/or RDSEED may
+be affected.
+
+A processor is affected by SRBDS if its Family_Model and stepping is
+in the following list, with the exception of the listed processors
+exporting MDS_NO while Intel TSX is available yet not enabled. The
+latter class of processors are only affected when Intel TSX is enabled
+by software using TSX_CTRL_MSR otherwise they are not affected.
+
+ ============= ============ ========
+ common name Family_Model Stepping
+ ============= ============ ========
+ IvyBridge 06_3AH All
+
+ Haswell 06_3CH All
+ Haswell_L 06_45H All
+ Haswell_G 06_46H All
+
+ Broadwell_G 06_47H All
+ Broadwell 06_3DH All
+
+ Skylake_L 06_4EH All
+ Skylake 06_5EH All
+
+ Kabylake_L 06_8EH <= 0xC
+ Kabylake 06_9EH <= 0xD
+ ============= ============ ========
+
+Related CVEs
+------------
+
+The following CVE entry is related to this SRBDS issue:
+
+ ============== ===== =====================================
+ CVE-2020-0543 SRBDS Special Register Buffer Data Sampling
+ ============== ===== =====================================
+
+Attack scenarios
+----------------
+An unprivileged user can extract values returned from RDRAND and RDSEED
+executed on another core or sibling thread using MDS techniques.
+
+
+Mitigation mechanism
+-------------------
+Intel will release microcode updates that modify the RDRAND, RDSEED, and
+EGETKEY instructions to overwrite secret special register data in the shared
+staging buffer before the secret data can be accessed by another logical
+processor.
+
+During execution of the RDRAND, RDSEED, or EGETKEY instructions, off-core
+accesses from other logical processors will be delayed until the special
+register read is complete and the secret data in the shared staging buffer is
+overwritten.
+
+This has three effects on performance:
+
+#. RDRAND, RDSEED, or EGETKEY instructions have higher latency.
+
+#. Executing RDRAND at the same time on multiple logical processors will be
+ serialized, resulting in an overall reduction in the maximum RDRAND
+ bandwidth.
+
+#. Executing RDRAND, RDSEED or EGETKEY will delay memory accesses from other
+ logical processors that miss their core caches, with an impact similar to
+ legacy locked cache-line-split accesses.
+
+The microcode updates provide an opt-out mechanism (RNGDS_MITG_DIS) to disable
+the mitigation for RDRAND and RDSEED instructions executed outside of Intel
+Software Guard Extensions (Intel SGX) enclaves. On logical processors that
+disable the mitigation using this opt-out mechanism, RDRAND and RDSEED do not
+take longer to execute and do not impact performance of sibling logical
+processors memory accesses. The opt-out mechanism does not affect Intel SGX
+enclaves (including execution of RDRAND or RDSEED inside an enclave, as well
+as EGETKEY execution).
+
+IA32_MCU_OPT_CTRL MSR Definition
+--------------------------------
+Along with the mitigation for this issue, Intel added a new thread-scope
+IA32_MCU_OPT_CTRL MSR, (address 0x123). The presence of this MSR and
+RNGDS_MITG_DIS (bit 0) is enumerated by CPUID.(EAX=07H,ECX=0).EDX[SRBDS_CTRL =
+9]==1. This MSR is introduced through the microcode update.
+
+Setting IA32_MCU_OPT_CTRL[0] (RNGDS_MITG_DIS) to 1 for a logical processor
+disables the mitigation for RDRAND and RDSEED executed outside of an Intel SGX
+enclave on that logical processor. Opting out of the mitigation for a
+particular logical processor does not affect the RDRAND and RDSEED mitigations
+for other logical processors.
+
+Note that inside of an Intel SGX enclave, the mitigation is applied regardless
+of the value of RNGDS_MITG_DS.
+
+Mitigation control on the kernel command line
+---------------------------------------------
+The kernel command line allows control over the SRBDS mitigation at boot time
+with the option "srbds=". The option for this is:
+
+ ============= =============================================================
+ off This option disables SRBDS mitigation for RDRAND and RDSEED on
+ affected platforms.
+ ============= =============================================================
+
+SRBDS System Information
+-----------------------
+The Linux kernel provides vulnerability status information through sysfs. For
+SRBDS this can be accessed by the following sysfs file:
+/sys/devices/system/cpu/vulnerabilities/srbds
+
+The possible values contained in this file are:
+
+ ============================== =============================================
+ Not affected Processor not vulnerable
+ Vulnerable Processor vulnerable and mitigation disabled
+ Vulnerable: No microcode Processor vulnerable and microcode is missing
+ mitigation
+ Mitigation: Microcode Processor is vulnerable and mitigation is in
+ effect.
+ Mitigation: TSX disabled Processor is only vulnerable when TSX is
+ enabled while this system was booted with TSX
+ disabled.
+ Unknown: Dependent on
+ hypervisor status Running on virtual guest processor that is
+ affected but with no way to know if host
+ processor is mitigated or vulnerable.
+ ============================== =============================================
+
+SRBDS Default mitigation
+------------------------
+This new microcode serializes processor access during execution of RDRAND,
+RDSEED ensures that the shared buffer is overwritten before it is released for
+reuse. Use the "srbds=off" kernel command line to disable the mitigation for
+RDRAND and RDSEED.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 1a5101b7e853..558332df02a8 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -558,7 +558,7 @@
loops can be debugged more effectively on production
systems.
- clearcpuid=BITNUM [X86]
+ clearcpuid=BITNUM[,BITNUM...] [X86]
Disable CPUID feature X for the kernel. See
arch/x86/include/asm/cpufeatures.h for the valid bit
numbers. Note the Linux specific bits are not necessarily
@@ -2560,6 +2560,8 @@
mds=off [X86]
tsx_async_abort=off [X86]
kvm.nx_huge_pages=off [X86]
+ no_entry_flush [PPC]
+ no_uaccess_flush [PPC]
Exceptions:
This does not have any effect on
@@ -2870,6 +2872,8 @@
noefi Disable EFI runtime services support.
+ no_entry_flush [PPC] Don't flush the L1-D cache when entering the kernel.
+
noexec [IA-64]
noexec [X86]
@@ -2919,6 +2923,9 @@
nospec_store_bypass_disable
[HW] Disable all mitigations for the Speculative Store Bypass vulnerability
+ no_uaccess_flush
+ [PPC] Don't flush the L1-D cache after accessing user data.
+
noxsave [BUGS=X86] Disables x86 extended register state save
and restore using xsave. The kernel will fallback to
enabling legacy floating-point and sse state.
@@ -4415,6 +4422,26 @@
spia_pedr=
spia_peddr=
+ srbds= [X86,INTEL]
+ Control the Special Register Buffer Data Sampling
+ (SRBDS) mitigation.
+
+ Certain CPUs are vulnerable to an MDS-like
+ exploit which can leak bits from the random
+ number generator.
+
+ By default, this issue is mitigated by
+ microcode. However, the microcode fix can cause
+ the RDRAND and RDSEED instructions to become
+ much slower. Among other effects, this will
+ result in reduced throughput from /dev/urandom.
+
+ The microcode mitigation can be disabled with
+ the following option:
+
+ off: Disable mitigation and remove
+ performance impact to RDRAND and RDSEED
+
srcutree.counter_wrap_check [KNL]
Specifies how frequently to check for
grace-period sequence counter wrap for the
@@ -4971,6 +4998,7 @@
device);
j = NO_REPORT_LUNS (don't use report luns
command, uas only);
+ k = NO_SAME (do not use WRITE_SAME, uas only)
l = NOT_LOCKABLE (don't try to lock and
unlock ejectable media, not on uas);
m = MAX_SECTORS_64 (don't transfer more
@@ -5250,6 +5278,14 @@
with /sys/devices/system/xen_memory/xen_memory0/scrub_pages.
Default value controlled with CONFIG_XEN_SCRUB_PAGES_DEFAULT.
+ xen.event_eoi_delay= [XEN]
+ How long to delay EOI handling in case of event
+ storms (jiffies). Default is 10.
+
+ xen.event_loop_timeout= [XEN]
+ After which time (jiffies) the event handling loop
+ should start to delay EOI handling. Default is 2.
+
xirc2ps_cs= [NET,PCMCIA]
Format:
<irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
index eeb3fc9d777b..667ea906266e 100644
--- a/Documentation/arm64/silicon-errata.txt
+++ b/Documentation/arm64/silicon-errata.txt
@@ -59,6 +59,7 @@ stable kernels.
| ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
| ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 |
| ARM | Cortex-A76 | #1463225 | ARM64_ERRATUM_1463225 |
+| ARM | Neoverse-N1 | #1542419 | ARM64_ERRATUM_1542419 |
| ARM | MMU-500 | #841119,#826419 | N/A |
| | | | |
| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
diff --git a/Documentation/device-mapper/dm-integrity.txt b/Documentation/device-mapper/dm-integrity.txt
index 297251b0d2d5..bf6af2ade0a6 100644
--- a/Documentation/device-mapper/dm-integrity.txt
+++ b/Documentation/device-mapper/dm-integrity.txt
@@ -146,6 +146,13 @@ block_size:number
Supported values are 512, 1024, 2048 and 4096 bytes. If not
specified the default block size is 512 bytes.
+legacy_recalculate
+ Allow recalculating of volumes with HMAC keys. This is disabled by
+ default for security reasons - an attacker could modify the volume,
+ set recalc_sector to zero, and the kernel would not detect the
+ modification.
+
+
The journal mode (D/J), buffer_sectors, journal_watermark, commit_time can
be changed when reloading the target (load an inactive table and swap the
tables with suspend and resume). The other arguments should not be changed
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt
index b6a7e7397b8b..b944fe067188 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt
@@ -16,6 +16,9 @@ Required properties:
Documentation/devicetree/bindings/graph.txt. This port should be connected
to the input port of an attached HDMI or LVDS encoder chip.
+Optional properties:
+- pinctrl-names: Contain "default" and "sleep".
+
Example:
dpi0: dpi@1401d000 {
@@ -26,6 +29,9 @@ dpi0: dpi@1401d000 {
<&mmsys CLK_MM_DPI_ENGINE>,
<&apmixedsys CLK_APMIXED_TVDPLL>;
clock-names = "pixel", "engine", "pll";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&dpi_pin_func>;
+ pinctrl-1 = <&dpi_pin_idle>;
port {
dpi0_out: endpoint {
diff --git a/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt b/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt
index c82794002595..89647d714387 100644
--- a/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt
+++ b/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt
@@ -21,7 +21,7 @@ controller state. The mux controller state is described in
Example:
mux: mux-controller {
- compatible = "mux-gpio";
+ compatible = "gpio-mux";
#mux-control-cells = <0>;
mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>,
diff --git a/Documentation/devicetree/bindings/net/btusb.txt b/Documentation/devicetree/bindings/net/btusb.txt
index 37d67926dd6d..d883a35641d1 100644
--- a/Documentation/devicetree/bindings/net/btusb.txt
+++ b/Documentation/devicetree/bindings/net/btusb.txt
@@ -35,7 +35,7 @@ Following example uses irq pin number 3 of gpio0 for out of band wake-on-bt:
compatible = "usb1286,204e";
reg = <1>;
interrupt-parent = <&gpio0>;
- interrupt-name = "wakeup";
+ interrupt-names = "wakeup";
interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
};
};
diff --git a/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt b/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt
index cfaf88998918..9e4dc510a40a 100644
--- a/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt
+++ b/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt
@@ -25,7 +25,7 @@ Example (for ARM-based BeagleBone with NPC100 NFC controller on I2C2):
clock-frequency = <100000>;
interrupt-parent = <&gpio1>;
- interrupts = <29 GPIO_ACTIVE_HIGH>;
+ interrupts = <29 IRQ_TYPE_LEVEL_HIGH>;
enable-gpios = <&gpio0 30 GPIO_ACTIVE_HIGH>;
firmware-gpios = <&gpio0 31 GPIO_ACTIVE_HIGH>;
diff --git a/Documentation/devicetree/bindings/net/nfc/pn544.txt b/Documentation/devicetree/bindings/net/nfc/pn544.txt
index 92f399ec22b8..2bd82562ce8e 100644
--- a/Documentation/devicetree/bindings/net/nfc/pn544.txt
+++ b/Documentation/devicetree/bindings/net/nfc/pn544.txt
@@ -25,7 +25,7 @@ Example (for ARM-based BeagleBone with PN544 on I2C2):
clock-frequency = <400000>;
interrupt-parent = <&gpio1>;
- interrupts = <17 GPIO_ACTIVE_HIGH>;
+ interrupts = <17 IRQ_TYPE_LEVEL_HIGH>;
enable-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
firmware-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
diff --git a/Documentation/devicetree/bindings/sound/wm8994.txt b/Documentation/devicetree/bindings/sound/wm8994.txt
index 68cccc4653ba..367b58ce1bb9 100644
--- a/Documentation/devicetree/bindings/sound/wm8994.txt
+++ b/Documentation/devicetree/bindings/sound/wm8994.txt
@@ -14,9 +14,15 @@ Required properties:
- #gpio-cells : Must be 2. The first cell is the pin number and the
second cell is used to specify optional parameters (currently unused).
- - AVDD2-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply, CPVDD-supply,
- SPKVDD1-supply, SPKVDD2-supply : power supplies for the device, as covered
- in Documentation/devicetree/bindings/regulator/regulator.txt
+ - power supplies for the device, as covered in
+ Documentation/devicetree/bindings/regulator/regulator.txt, depending
+ on compatible:
+ - for wlf,wm1811 and wlf,wm8958:
+ AVDD1-supply, AVDD2-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply,
+ DCVDD-supply, CPVDD-supply, SPKVDD1-supply, SPKVDD2-supply
+ - for wlf,wm8994:
+ AVDD1-supply, AVDD2-supply, DBVDD-supply, DCVDD-supply, CPVDD-supply,
+ SPKVDD1-supply, SPKVDD2-supply
Optional properties:
@@ -73,11 +79,11 @@ wm8994: codec@1a {
lineout1-se;
+ AVDD1-supply = <&regulator>;
AVDD2-supply = <&regulator>;
CPVDD-supply = <&regulator>;
- DBVDD1-supply = <&regulator>;
- DBVDD2-supply = <&regulator>;
- DBVDD3-supply = <&regulator>;
+ DBVDD-supply = <&regulator>;
+ DCVDD-supply = <&regulator>;
SPKVDD1-supply = <&regulator>;
SPKVDD2-supply = <&regulator>;
};
diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt
index 3e4c38b806ac..ae46c0ac9f11 100644
--- a/Documentation/devicetree/bindings/usb/dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/dwc3.txt
@@ -68,6 +68,8 @@ Optional properties:
from P0 to P1/P2/P3 without delay.
- snps,dis-tx-ipgap-linecheck-quirk: when set, disable u2mac linestate check
during HS transmit.
+ - snps,parkmode-disable-ss-quirk: when set, all SuperSpeed bus instances in
+ park mode are disabled.
- snps,dis_metastability_quirk: when set, disable metastability workaround.
CAUTION: use only if you are absolutely sure of it.
- snps,is-utmi-l1-suspend: true when DWC3 asserts output signal
diff --git a/Documentation/driver-api/libata.rst b/Documentation/driver-api/libata.rst
index 70e180e6b93d..9f3e5dc31184 100644
--- a/Documentation/driver-api/libata.rst
+++ b/Documentation/driver-api/libata.rst
@@ -250,7 +250,7 @@ High-level taskfile hooks
::
- void (*qc_prep) (struct ata_queued_cmd *qc);
+ enum ata_completion_errors (*qc_prep) (struct ata_queued_cmd *qc);
int (*qc_issue) (struct ata_queued_cmd *qc);
diff --git a/Documentation/driver-api/mtdnand.rst b/Documentation/driver-api/mtdnand.rst
index c55a6034c397..5470a3d6bd9e 100644
--- a/Documentation/driver-api/mtdnand.rst
+++ b/Documentation/driver-api/mtdnand.rst
@@ -246,7 +246,7 @@ necessary information about the device.
this->eccmode = NAND_ECC_SOFT;
/* Scan to find existence of the device */
- if (nand_scan (board_mtd, 1)) {
+ if (nand_scan (this, 1)) {
err = -ENXIO;
goto out_ior;
}
@@ -277,7 +277,7 @@ unregisters the partitions in the MTD layer.
static void __exit board_cleanup (void)
{
/* Release resources, unregister device */
- nand_release (board_mtd);
+ nand_release (mtd_to_nand(board_mtd));
/* unmap physical address */
iounmap(baseaddr);
diff --git a/Documentation/filesystems/affs.txt b/Documentation/filesystems/affs.txt
index 71b63c2b9841..a8f1a58e3692 100644
--- a/Documentation/filesystems/affs.txt
+++ b/Documentation/filesystems/affs.txt
@@ -93,13 +93,15 @@ The Amiga protection flags RWEDRWEDHSPARWED are handled as follows:
- R maps to r for user, group and others. On directories, R implies x.
- - If both W and D are allowed, w will be set.
+ - W maps to w.
- E maps to x.
- - H and P are always retained and ignored under Linux.
+ - D is ignored.
- - A is always reset when a file is written to.
+ - H, S and P are always retained and ignored under Linux.
+
+ - A is cleared when a file is written to.
User id and group id will be used unless set[gu]id are given as mount
options. Since most of the Amiga file systems are single user systems
@@ -111,11 +113,13 @@ Linux -> Amiga:
The Linux rwxrwxrwx file mode is handled as follows:
- - r permission will set R for user, group and others.
+ - r permission will allow R for user, group and others.
+
+ - w permission will allow W for user, group and others.
- - w permission will set W and D for user, group and others.
+ - x permission of the user will allow E for plain files.
- - x permission of the user will set E for plain files.
+ - D will be allowed for user, group and others.
- All other flags (suid, sgid, ...) are ignored and will
not be retained.
diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst
index cfbc18f0d9c9..5b667ee1242a 100644
--- a/Documentation/filesystems/fscrypt.rst
+++ b/Documentation/filesystems/fscrypt.rst
@@ -426,10 +426,18 @@ astute users may notice some differences in behavior:
- Unencrypted files, or files encrypted with a different encryption
policy (i.e. different key, modes, or flags), cannot be renamed or
linked into an encrypted directory; see `Encryption policy
- enforcement`_. Attempts to do so will fail with EPERM. However,
+ enforcement`_. Attempts to do so will fail with EXDEV. However,
encrypted files can be renamed within an encrypted directory, or
into an unencrypted directory.
+ Note: "moving" an unencrypted file into an encrypted directory, e.g.
+ with the `mv` program, is implemented in userspace by a copy
+ followed by a delete. Be aware that the original unencrypted data
+ may remain recoverable from free space on the disk; prefer to keep
+ all files encrypted from the very beginning. The `shred` program
+ may be used to overwrite the source files but isn't guaranteed to be
+ effective on all filesystems and storage devices.
+
- Direct I/O is not supported on encrypted files. Attempts to use
direct I/O on such files will fall back to buffered I/O.
@@ -516,7 +524,7 @@ not be encrypted.
Except for those special files, it is forbidden to have unencrypted
files, or files encrypted with a different encryption policy, in an
encrypted directory tree. Attempts to link or rename such a file into
-an encrypted directory will fail with EPERM. This is also enforced
+an encrypted directory will fail with EXDEV. This is also enforced
during ->lookup() to provide limited protection against offline
attacks that try to disable or downgrade encryption in known locations
where applications may later write sensitive data. It is recommended
diff --git a/Documentation/filesystems/seq_file.txt b/Documentation/filesystems/seq_file.txt
index d412b236a9d6..7cf7143921a1 100644
--- a/Documentation/filesystems/seq_file.txt
+++ b/Documentation/filesystems/seq_file.txt
@@ -192,6 +192,12 @@ between the calls to start() and stop(), so holding a lock during that time
is a reasonable thing to do. The seq_file code will also avoid taking any
other locks while the iterator is active.
+The iterater value returned by start() or next() is guaranteed to be
+passed to a subsequent next() or stop() call. This allows resources
+such as locks that were taken to be reliably released. There is *no*
+guarantee that the iterator will be passed to show(), though in practice
+it often will be.
+
Formatted output
diff --git a/Documentation/filesystems/sysfs.txt b/Documentation/filesystems/sysfs.txt
index a1426cabcef1..2e38fafc1b63 100644
--- a/Documentation/filesystems/sysfs.txt
+++ b/Documentation/filesystems/sysfs.txt
@@ -211,12 +211,10 @@ Other notes:
is 4096.
- show() methods should return the number of bytes printed into the
- buffer. This is the return value of scnprintf().
+ buffer.
-- show() must not use snprintf() when formatting the value to be
- returned to user space. If you can guarantee that an overflow
- will never happen you can use sprintf() otherwise you must use
- scnprintf().
+- show() should only use sysfs_emit() or sysfs_emit_at() when formatting
+ the value to be returned to user space.
- store() should return the number of bytes used from the buffer. If the
entire buffer has been used, just return the count argument.
diff --git a/Documentation/kbuild/llvm.rst b/Documentation/kbuild/llvm.rst
new file mode 100644
index 000000000000..c776b6eee969
--- /dev/null
+++ b/Documentation/kbuild/llvm.rst
@@ -0,0 +1,87 @@
+==============================
+Building Linux with Clang/LLVM
+==============================
+
+This document covers how to build the Linux kernel with Clang and LLVM
+utilities.
+
+About
+-----
+
+The Linux kernel has always traditionally been compiled with GNU toolchains
+such as GCC and binutils. Ongoing work has allowed for `Clang
+<https://clang.llvm.org/>`_ and `LLVM <https://llvm.org/>`_ utilities to be
+used as viable substitutes. Distributions such as `Android
+<https://www.android.com/>`_, `ChromeOS
+<https://www.chromium.org/chromium-os>`_, and `OpenMandriva
+<https://www.openmandriva.org/>`_ use Clang built kernels. `LLVM is a
+collection of toolchain components implemented in terms of C++ objects
+<https://www.aosabook.org/en/llvm.html>`_. Clang is a front-end to LLVM that
+supports C and the GNU C extensions required by the kernel, and is pronounced
+"klang," not "see-lang."
+
+Clang
+-----
+
+The compiler used can be swapped out via `CC=` command line argument to `make`.
+`CC=` should be set when selecting a config and during a build.
+
+ make CC=clang defconfig
+
+ make CC=clang
+
+Cross Compiling
+---------------
+
+A single Clang compiler binary will typically contain all supported backends,
+which can help simplify cross compiling.
+
+ ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- make CC=clang
+
+`CROSS_COMPILE` is not used to prefix the Clang compiler binary, instead
+`CROSS_COMPILE` is used to set a command line flag: `--target <triple>`. For
+example:
+
+ clang --target aarch64-linux-gnu foo.c
+
+LLVM Utilities
+--------------
+
+LLVM has substitutes for GNU binutils utilities. Kbuild supports `LLVM=1`
+to enable them.
+
+ make LLVM=1
+
+They can be enabled individually. The full list of the parameters:
+
+ make CC=clang LD=ld.lld AR=llvm-ar NM=llvm-nm STRIP=llvm-strip \\
+ OBJCOPY=llvm-objcopy OBJDUMP=llvm-objdump OBJSIZE=llvm-size \\
+ READELF=llvm-readelf HOSTCC=clang HOSTCXX=clang++ HOSTAR=llvm-ar \\
+ HOSTLD=ld.lld
+
+Currently, the integrated assembler is disabled by default. You can pass
+`LLVM_IAS=1` to enable it.
+
+Getting Help
+------------
+
+- `Website <https://clangbuiltlinux.github.io/>`_
+- `Mailing List <https://groups.google.com/forum/#!forum/clang-built-linux>`_: <clang-built-linux@googlegroups.com>
+- `Issue Tracker <https://github.com/ClangBuiltLinux/linux/issues>`_
+- IRC: #clangbuiltlinux on chat.freenode.net
+- `Telegram <https://t.me/ClangBuiltLinux>`_: @ClangBuiltLinux
+- `Wiki <https://github.com/ClangBuiltLinux/linux/wiki>`_
+- `Beginner Bugs <https://github.com/ClangBuiltLinux/linux/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22>`_
+
+Getting LLVM
+-------------
+
+- http://releases.llvm.org/download.html
+- https://github.com/llvm/llvm-project
+- https://llvm.org/docs/GettingStarted.html
+- https://llvm.org/docs/CMake.html
+- https://apt.llvm.org/
+- https://www.archlinux.org/packages/extra/x86_64/llvm/
+- https://github.com/ClangBuiltLinux/tc-build
+- https://github.com/ClangBuiltLinux/linux/wiki/Building-Clang-from-source
+- https://android.googlesource.com/platform/prebuilts/clang/host/linux-x86/
diff --git a/Documentation/media/uapi/v4l/colorspaces-defs.rst b/Documentation/media/uapi/v4l/colorspaces-defs.rst
index f24615544792..16e46bec8093 100644
--- a/Documentation/media/uapi/v4l/colorspaces-defs.rst
+++ b/Documentation/media/uapi/v4l/colorspaces-defs.rst
@@ -29,8 +29,7 @@ whole range, 0-255, dividing the angular value by 1.41. The enum
:c:type:`v4l2_hsv_encoding` specifies which encoding is used.
.. note:: The default R'G'B' quantization is full range for all
- colorspaces except for BT.2020 which uses limited range R'G'B'
- quantization.
+ colorspaces. HSV formats are always full range.
.. tabularcolumns:: |p{6.0cm}|p{11.5cm}|
@@ -162,8 +161,8 @@ whole range, 0-255, dividing the angular value by 1.41. The enum
- Details
* - ``V4L2_QUANTIZATION_DEFAULT``
- Use the default quantization encoding as defined by the
- colorspace. This is always full range for R'G'B' (except for the
- BT.2020 colorspace) and HSV. It is usually limited range for Y'CbCr.
+ colorspace. This is always full range for R'G'B' and HSV.
+ It is usually limited range for Y'CbCr.
* - ``V4L2_QUANTIZATION_FULL_RANGE``
- Use the full range quantization encoding. I.e. the range [0…1] is
mapped to [0…255] (with possible clipping to [1…254] to avoid the
@@ -173,4 +172,4 @@ whole range, 0-255, dividing the angular value by 1.41. The enum
* - ``V4L2_QUANTIZATION_LIM_RANGE``
- Use the limited range quantization encoding. I.e. the range [0…1]
is mapped to [16…235]. Cb and Cr are mapped from [-0.5…0.5] to
- [16…240].
+ [16…240]. Limited Range cannot be used with HSV.
diff --git a/Documentation/media/uapi/v4l/colorspaces-details.rst b/Documentation/media/uapi/v4l/colorspaces-details.rst
index 09fabf4cd412..ca7176cae8dd 100644
--- a/Documentation/media/uapi/v4l/colorspaces-details.rst
+++ b/Documentation/media/uapi/v4l/colorspaces-details.rst
@@ -370,9 +370,8 @@ Colorspace BT.2020 (V4L2_COLORSPACE_BT2020)
The :ref:`itu2020` standard defines the colorspace used by Ultra-high
definition television (UHDTV). The default transfer function is
``V4L2_XFER_FUNC_709``. The default Y'CbCr encoding is
-``V4L2_YCBCR_ENC_BT2020``. The default R'G'B' quantization is limited
-range (!), and so is the default Y'CbCr quantization. The chromaticities
-of the primary colors and the white reference are:
+``V4L2_YCBCR_ENC_BT2020``. The default Y'CbCr quantization is limited range.
+The chromaticities of the primary colors and the white reference are:
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 7eb9366422f5..3c617d620b6f 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -934,12 +934,14 @@ icmp_ratelimit - INTEGER
icmp_msgs_per_sec - INTEGER
Limit maximal number of ICMP packets sent per second from this host.
Only messages whose type matches icmp_ratemask (see below) are
- controlled by this limit.
+ controlled by this limit. For security reasons, the precise count
+ of messages per second is randomized.
Default: 1000
icmp_msgs_burst - INTEGER
icmp_msgs_per_sec controls number of ICMP packets sent per second,
while icmp_msgs_burst controls the burst size of these packets.
+ For security reasons, the precise burst size is randomized.
Default: 50
icmp_ratemask - INTEGER
diff --git a/Documentation/sound/hd-audio/index.rst b/Documentation/sound/hd-audio/index.rst
index f8a72ffffe66..6e12de9fc34e 100644
--- a/Documentation/sound/hd-audio/index.rst
+++ b/Documentation/sound/hd-audio/index.rst
@@ -8,3 +8,4 @@ HD-Audio
models
controls
dp-mst
+ realtek-pc-beep
diff --git a/Documentation/sound/hd-audio/models.rst b/Documentation/sound/hd-audio/models.rst
index e06238131f77..8c0de54b5649 100644
--- a/Documentation/sound/hd-audio/models.rst
+++ b/Documentation/sound/hd-audio/models.rst
@@ -216,8 +216,6 @@ alc298-dell-aio
ALC298 fixups on Dell AIO machines
alc275-dell-xps
ALC275 fixups on Dell XPS models
-alc256-dell-xps13
- ALC256 fixups on Dell XPS13
lenovo-spk-noise
Workaround for speaker noise on Lenovo machines
lenovo-hotkey
diff --git a/Documentation/sound/hd-audio/realtek-pc-beep.rst b/Documentation/sound/hd-audio/realtek-pc-beep.rst
new file mode 100644
index 000000000000..be47c6f76a6e
--- /dev/null
+++ b/Documentation/sound/hd-audio/realtek-pc-beep.rst
@@ -0,0 +1,129 @@
+===============================
+Realtek PC Beep Hidden Register
+===============================
+
+This file documents the "PC Beep Hidden Register", which is present in certain
+Realtek HDA codecs and controls a muxer and pair of passthrough mixers that can
+route audio between pins but aren't themselves exposed as HDA widgets. As far
+as I can tell, these hidden routes are designed to allow flexible PC Beep output
+for codecs that don't have mixer widgets in their output paths. Why it's easier
+to hide a mixer behind an undocumented vendor register than to just expose it
+as a widget, I have no idea.
+
+Register Description
+====================
+
+The register is accessed via processing coefficient 0x36 on NID 20h. Bits not
+identified below have no discernible effect on my machine, a Dell XPS 13 9350::
+
+ MSB LSB
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | |h|S|L| | B |R| | Known bits
+ +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+ |0|0|1|1| 0x7 |0|0x0|1| 0x7 | Reset value
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+1Ah input select (B): 2 bits
+ When zero, expose the PC Beep line (from the internal beep generator, when
+ enabled with the Set Beep Generation verb on NID 01h, or else from the
+ external PCBEEP pin) on the 1Ah pin node. When nonzero, expose the headphone
+ jack (or possibly Line In on some machines) input instead. If PC Beep is
+ selected, the 1Ah boost control has no effect.
+
+Amplify 1Ah loopback, left (L): 1 bit
+ Amplify the left channel of 1Ah before mixing it into outputs as specified
+ by h and S bits. Does not affect the level of 1Ah exposed to other widgets.
+
+Amplify 1Ah loopback, right (R): 1 bit
+ Amplify the right channel of 1Ah before mixing it into outputs as specified
+ by h and S bits. Does not affect the level of 1Ah exposed to other widgets.
+
+Loopback 1Ah to 21h [active low] (h): 1 bit
+ When zero, mix 1Ah (possibly with amplification, depending on L and R bits)
+ into 21h (headphone jack on my machine). Mixed signal respects the mute
+ setting on 21h.
+
+Loopback 1Ah to 14h (S): 1 bit
+ When one, mix 1Ah (possibly with amplification, depending on L and R bits)
+ into 14h (internal speaker on my machine). Mixed signal **ignores** the mute
+ setting on 14h and is present whenever 14h is configured as an output.
+
+Path diagrams
+=============
+
+1Ah input selection (DIV is the PC Beep divider set on NID 01h)::
+
+ <Beep generator> <PCBEEP pin> <Headphone jack>
+ | | |
+ +--DIV--+--!DIV--+ {1Ah boost control}
+ | |
+ +--(b == 0)--+--(b != 0)--+
+ |
+ >1Ah (Beep/Headphone Mic/Line In)<
+
+Loopback of 1Ah to 21h/14h::
+
+ <1Ah (Beep/Headphone Mic/Line In)>
+ |
+ {amplify if L/R}
+ |
+ +-----!h-----+-----S-----+
+ | |
+ {21h mute control} |
+ | |
+ >21h (Headphone)< >14h (Internal Speaker)<
+
+Background
+==========
+
+All Realtek HDA codecs have a vendor-defined widget with node ID 20h which
+provides access to a bank of registers that control various codec functions.
+Registers are read and written via the standard HDA processing coefficient
+verbs (Set/Get Coefficient Index, Set/Get Processing Coefficient). The node is
+named "Realtek Vendor Registers" in public datasheets' verb listings and,
+apart from that, is entirely undocumented.
+
+This particular register, exposed at coefficient 0x36 and named in commits from
+Realtek, is of note: unlike most registers, which seem to control detailed
+amplifier parameters not in scope of the HDA specification, it controls audio
+routing which could just as easily have been defined using standard HDA mixer
+and selector widgets.
+
+Specifically, it selects between two sources for the input pin widget with Node
+ID (NID) 1Ah: the widget's signal can come either from an audio jack (on my
+laptop, a Dell XPS 13 9350, it's the headphone jack, but comments in Realtek
+commits indicate that it might be a Line In on some machines) or from the PC
+Beep line (which is itself multiplexed between the codec's internal beep
+generator and external PCBEEP pin, depending on if the beep generator is
+enabled via verbs on NID 01h). Additionally, it can mix (with optional
+amplification) that signal onto the 21h and/or 14h output pins.
+
+The register's reset value is 0x3717, corresponding to PC Beep on 1Ah that is
+then amplified and mixed into both the headphones and the speakers. Not only
+does this violate the HDA specification, which says that "[a vendor defined
+beep input pin] connection may be maintained *only* while the Link reset
+(**RST#**) is asserted", it means that we cannot ignore the register if we care
+about the input that 1Ah would otherwise expose or if the PCBEEP trace is
+poorly shielded and picks up chassis noise (both of which are the case on my
+machine).
+
+Unfortunately, there are lots of ways to get this register configuration wrong.
+Linux, it seems, has gone through most of them. For one, the register resets
+after S3 suspend: judging by existing code, this isn't the case for all vendor
+registers, and it's led to some fixes that improve behavior on cold boot but
+don't last after suspend. Other fixes have successfully switched the 1Ah input
+away from PC Beep but have failed to disable both loopback paths. On my
+machine, this means that the headphone input is amplified and looped back to
+the headphone output, which uses the exact same pins! As you might expect, this
+causes terrible headphone noise, the character of which is controlled by the
+1Ah boost control. (If you've seen instructions online to fix XPS 13 headphone
+noise by changing "Headphone Mic Boost" in ALSA, now you know why.)
+
+The information here has been obtained through black-box reverse engineering of
+the ALC256 codec's behavior and is not guaranteed to be correct. It likely
+also applies for the ALC255, ALC257, ALC235, and ALC236, since those codecs
+seem to be close relatives of the ALC256. (They all share one initialization
+function.) Additionally, other codecs like the ALC225 and ALC285 also have this
+register, judging by existing fixups in ``patch_realtek.c``, but specific
+data (e.g. node IDs, bit positions, pin mappings) for those codecs may differ
+from what I've described here.
diff --git a/Documentation/sphinx/parse-headers.pl b/Documentation/sphinx/parse-headers.pl
index c518050ffc3f..0dcf369ab904 100755
--- a/Documentation/sphinx/parse-headers.pl
+++ b/Documentation/sphinx/parse-headers.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
use strict;
use Text::Tabs;
use Getopt::Long;
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py
index 94bf6944bb1e..7e79ff6b09e0 100755
--- a/Documentation/target/tcm_mod_builder.py
+++ b/Documentation/target/tcm_mod_builder.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
diff --git a/Documentation/trace/postprocess/decode_msr.py b/Documentation/trace/postprocess/decode_msr.py
index 0ab40e0db580..aa9cc7abd5c2 100644
--- a/Documentation/trace/postprocess/decode_msr.py
+++ b/Documentation/trace/postprocess/decode_msr.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
# add symbolic names to read_msr / write_msr in trace
# decode_msr msr-index.h < trace
import sys
diff --git a/Documentation/trace/postprocess/trace-pagealloc-postprocess.pl b/Documentation/trace/postprocess/trace-pagealloc-postprocess.pl
index 0a120aae33ce..b9b7d80c2f9d 100644
--- a/Documentation/trace/postprocess/trace-pagealloc-postprocess.pl
+++ b/Documentation/trace/postprocess/trace-pagealloc-postprocess.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
# This is a POC (proof of concept or piece of crap, take your pick) for reading the
# text representation of trace output related to page allocation. It makes an attempt
# to extract some high-level information on what is going on. The accuracy of the parser
diff --git a/Documentation/trace/postprocess/trace-vmscan-postprocess.pl b/Documentation/trace/postprocess/trace-vmscan-postprocess.pl
index 66bfd8396877..9efcf5ed8cb5 100644
--- a/Documentation/trace/postprocess/trace-vmscan-postprocess.pl
+++ b/Documentation/trace/postprocess/trace-vmscan-postprocess.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
# This is a POC for reading the text representation of trace output related to
# page reclaim. It makes an attempt to extract some high-level information on
# what is going on. The accuracy of the parser may vary
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 8e16017ff397..d2f265a9dc0d 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -3999,9 +3999,11 @@ EOI was received.
#define KVM_EXIT_HYPERV_SYNIC 1
#define KVM_EXIT_HYPERV_HCALL 2
__u32 type;
+ __u32 pad1;
union {
struct {
__u32 msr;
+ __u32 pad2;
__u64 control;
__u64 evt_page;
__u64 msg_page;
diff --git a/MAINTAINERS b/MAINTAINERS
index b9f9da0b886f..1061db6fbc32 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3613,6 +3613,15 @@ M: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com>
S: Maintained
F: .clang-format
+CLANG/LLVM BUILD SUPPORT
+L: clang-built-linux@googlegroups.com
+W: https://clangbuiltlinux.github.io/
+B: https://github.com/ClangBuiltLinux/linux/issues
+C: irc://chat.freenode.net/clangbuiltlinux
+S: Supported
+K: \b(?i:clang|llvm)\b
+F: Documentation/kbuild/llvm.rst
+
CLEANCACHE API
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
L: linux-kernel@vger.kernel.org
diff --git a/Makefile b/Makefile
index 6f849dafbfec..e5d41b6792d7 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
-SUBLEVEL = 114
+SUBLEVEL = 193
EXTRAVERSION =
NAME = "People's Front"
@@ -358,8 +358,13 @@ HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS 2>/dev/null)
HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null)
HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null)
-HOSTCC = gcc
-HOSTCXX = g++
+ifneq ($(LLVM),)
+HOSTCC = clang
+HOSTCXX = clang++
+else
+HOSTCC = gcc
+HOSTCXX = g++
+endif
KBUILD_HOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 \
-fomit-frame-pointer -std=gnu89 $(HOST_LFS_CFLAGS) \
$(HOSTCFLAGS)
@@ -368,21 +373,34 @@ KBUILD_HOSTLDFLAGS := $(HOST_LFS_LDFLAGS) $(HOSTLDFLAGS)
KBUILD_HOSTLDLIBS := $(HOST_LFS_LIBS) $(HOSTLDLIBS)
# Make variables (CC, etc...)
-AS = $(CROSS_COMPILE)as
-LD = $(CROSS_COMPILE)ld
-CC = $(CROSS_COMPILE)gcc
CPP = $(CC) -E
+ifneq ($(LLVM),)
+CC = clang
+LD = ld.lld
+AR = llvm-ar
+NM = llvm-nm
+OBJCOPY = llvm-objcopy
+OBJDUMP = llvm-objdump
+READELF = llvm-readelf
+OBJSIZE = llvm-size
+STRIP = llvm-strip
+else
+CC = $(CROSS_COMPILE)gcc
+LD = $(CROSS_COMPILE)ld
AR = $(CROSS_COMPILE)ar
NM = $(CROSS_COMPILE)nm
-STRIP = $(CROSS_COMPILE)strip
OBJCOPY = $(CROSS_COMPILE)objcopy
OBJDUMP = $(CROSS_COMPILE)objdump
+READELF = $(CROSS_COMPILE)readelf
+OBJSIZE = $(CROSS_COMPILE)size
+STRIP = $(CROSS_COMPILE)strip
+endif
LEX = flex
YACC = bison
AWK = awk
GENKSYMS = scripts/genksyms/genksyms
INSTALLKERNEL := installkernel
-DEPMOD = /sbin/depmod
+DEPMOD = depmod
PERL = perl
PYTHON = python
PYTHON2 = python2
@@ -420,7 +438,7 @@ KBUILD_AFLAGS := -D__ASSEMBLY__
KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-fno-strict-aliasing -fno-common -fshort-wchar \
-Werror-implicit-function-declaration \
- -Wno-format-security \
+ -Werror=return-type -Wno-format-security \
-std=gnu89
KBUILD_CPPFLAGS := -D__KERNEL__
KBUILD_AFLAGS_KERNEL :=
@@ -432,8 +450,8 @@ KBUILD_LDFLAGS :=
GCC_PLUGINS_CFLAGS :=
CLANG_FLAGS :=
-export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC
-export CPP AR NM STRIP OBJCOPY OBJDUMP KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS
+export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC
+export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS
export MAKE LEX YACC AWK GENKSYMS INSTALLKERNEL PERL PYTHON PYTHON2 PYTHON3 UTS_MACHINE
export HOSTCXX KBUILD_HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
@@ -485,13 +503,15 @@ ifeq ($(cc-name),clang)
ifneq ($(CROSS_COMPILE),)
CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%))
GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
-CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)
+CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE))
GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
endif
ifneq ($(GCC_TOOLCHAIN),)
CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN)
endif
+ifneq ($(LLVM_IAS),1)
CLANG_FLAGS += -no-integrated-as
+endif
CLANG_FLAGS += -Werror=unknown-warning-option
KBUILD_CFLAGS += $(CLANG_FLAGS)
KBUILD_AFLAGS += $(CLANG_FLAGS)
@@ -554,12 +574,8 @@ KBUILD_MODULES :=
KBUILD_BUILTIN := 1
# If we have only "make modules", don't compile built-in objects.
-# When we're building modules with modversions, we need to consider
-# the built-in objects during the descend as well, in order to
-# make sure the checksums are up to date before we record them.
-
ifeq ($(MAKECMDGOALS),modules)
- KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1)
+ KBUILD_BUILTIN :=
endif
# If we have "make <whatever> modules", compile modules
@@ -657,20 +673,14 @@ KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context)
KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,)
-else
-ifdef CONFIG_PROFILE_ALL_BRANCHES
-KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,)
+KBUILD_CFLAGS += -Os
else
KBUILD_CFLAGS += -O2
endif
-endif
-
-KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \
- $(call cc-disable-warning,maybe-uninitialized,))
# Tell gcc to never replace conditional load with a non-conditional one
KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
+KBUILD_CFLAGS += $(call cc-option,-fno-allow-store-data-races)
include scripts/Makefile.kcov
include scripts/Makefile.gcc-plugins
@@ -735,8 +745,11 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
else
KBUILD_CFLAGS += -g
endif
+ifneq ($(LLVM_IAS),1)
KBUILD_AFLAGS += -Wa,-gdwarf-2
endif
+endif
+
ifdef CONFIG_DEBUG_INFO_DWARF4
KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
endif
@@ -799,6 +812,17 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
# disable stringop warnings in gcc 8+
KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation)
+# We'll want to enable this eventually, but it's not going away for 5.7 at least
+KBUILD_CFLAGS += $(call cc-disable-warning, zero-length-bounds)
+KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds)
+KBUILD_CFLAGS += $(call cc-disable-warning, stringop-overflow)
+
+# Another good warning that we'll want to enable eventually
+KBUILD_CFLAGS += $(call cc-disable-warning, restrict)
+
+# Enabled with W=2, disabled by default as noisy
+KBUILD_CFLAGS += $(call cc-disable-warning, maybe-uninitialized)
+
# disable invalid "can't wrap" optimizations for signed / pointers
KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
@@ -835,12 +859,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init)
# change __FILE__ to the relative path from the srctree
KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
-# ensure -fcf-protection is disabled when using retpoline as it is
-# incompatible with -mindirect-branch=thunk-extern
-ifdef CONFIG_RETPOLINE
-KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
-endif
-
# use the deterministic mode of AR if available
KBUILD_ARFLAGS := $(call ar-option,D)
@@ -1224,6 +1242,13 @@ ifdef CONFIG_MODULES
all: modules
+# When we're building modules with modversions, we need to consider
+# the built-in objects during the descend as well, in order to
+# make sure the checksums are up to date before we record them.
+ifdef CONFIG_MODVERSIONS
+ KBUILD_BUILTIN := 1
+endif
+
# Build modules
#
# A module can be listed more than once in obj-m resulting in
diff --git a/arch/Kconfig b/arch/Kconfig
index a336548487e6..e3a030f7a722 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -366,6 +366,13 @@ config HAVE_RCU_TABLE_FREE
config HAVE_RCU_TABLE_INVALIDATE
bool
+config ARCH_WANT_IRQS_OFF_ACTIVATE_MM
+ bool
+ help
+ Temporary select until all architectures can be converted to have
+ irqs disabled over activate_mm. Architectures that do IPI based TLB
+ shootdowns should enable this.
+
config ARCH_HAVE_NMI_SAFE_CMPXCHG
bool
diff --git a/arch/alpha/defconfig b/arch/alpha/defconfig
index f4ec420d7f2d..3a132c91d45b 100644
--- a/arch/alpha/defconfig
+++ b/arch/alpha/defconfig
@@ -36,7 +36,6 @@ CONFIG_BLK_DEV_CY82C693=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_SCSI_AIC7XXX=m
CONFIG_AIC7XXX_CMDS_PER_DEVICE=253
# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
index 4c533fc94d62..0bba9e991189 100644
--- a/arch/alpha/include/asm/io.h
+++ b/arch/alpha/include/asm/io.h
@@ -327,14 +327,18 @@ static inline int __is_mmio(const volatile void __iomem *addr)
#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
extern inline unsigned int ioread8(void __iomem *addr)
{
- unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
+ unsigned int ret;
+ mb();
+ ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
mb();
return ret;
}
extern inline unsigned int ioread16(void __iomem *addr)
{
- unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
+ unsigned int ret;
+ mb();
+ ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
mb();
return ret;
}
@@ -375,7 +379,9 @@ extern inline void outw(u16 b, unsigned long port)
#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
extern inline unsigned int ioread32(void __iomem *addr)
{
- unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
+ unsigned int ret;
+ mb();
+ ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
mb();
return ret;
}
@@ -420,14 +426,18 @@ extern inline void __raw_writew(u16 b, volatile void __iomem *addr)
extern inline u8 readb(const volatile void __iomem *addr)
{
- u8 ret = __raw_readb(addr);
+ u8 ret;
+ mb();
+ ret = __raw_readb(addr);
mb();
return ret;
}
extern inline u16 readw(const volatile void __iomem *addr)
{
- u16 ret = __raw_readw(addr);
+ u16 ret;
+ mb();
+ ret = __raw_readw(addr);
mb();
return ret;
}
@@ -468,14 +478,18 @@ extern inline void __raw_writeq(u64 b, volatile void __iomem *addr)
extern inline u32 readl(const volatile void __iomem *addr)
{
- u32 ret = __raw_readl(addr);
+ u32 ret;
+ mb();
+ ret = __raw_readl(addr);
mb();
return ret;
}
extern inline u64 readq(const volatile void __iomem *addr)
{
- u64 ret = __raw_readq(addr);
+ u64 ret;
+ mb();
+ ret = __raw_readq(addr);
mb();
return ret;
}
@@ -493,10 +507,10 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
}
#endif
-#define ioread16be(p) be16_to_cpu(ioread16(p))
-#define ioread32be(p) be32_to_cpu(ioread32(p))
-#define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p))
-#define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p))
+#define ioread16be(p) swab16(ioread16(p))
+#define ioread32be(p) swab32(ioread32(p))
+#define iowrite16be(v,p) iowrite16(swab16(v), (p))
+#define iowrite32be(v,p) iowrite32(swab32(v), (p))
#define inb_p inb
#define inw_p inw
@@ -504,14 +518,44 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
#define outb_p outb
#define outw_p outw
#define outl_p outl
-#define readb_relaxed(addr) __raw_readb(addr)
-#define readw_relaxed(addr) __raw_readw(addr)
-#define readl_relaxed(addr) __raw_readl(addr)
-#define readq_relaxed(addr) __raw_readq(addr)
-#define writeb_relaxed(b, addr) __raw_writeb(b, addr)
-#define writew_relaxed(b, addr) __raw_writew(b, addr)
-#define writel_relaxed(b, addr) __raw_writel(b, addr)
-#define writeq_relaxed(b, addr) __raw_writeq(b, addr)
+
+extern u8 readb_relaxed(const volatile void __iomem *addr);
+extern u16 readw_relaxed(const volatile void __iomem *addr);
+extern u32 readl_relaxed(const volatile void __iomem *addr);
+extern u64 readq_relaxed(const volatile void __iomem *addr);
+
+#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
+extern inline u8 readb_relaxed(const volatile void __iomem *addr)
+{
+ mb();
+ return __raw_readb(addr);
+}
+
+extern inline u16 readw_relaxed(const volatile void __iomem *addr)
+{
+ mb();
+ return __raw_readw(addr);
+}
+#endif
+
+#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
+extern inline u32 readl_relaxed(const volatile void __iomem *addr)
+{
+ mb();
+ return __raw_readl(addr);
+}
+
+extern inline u64 readq_relaxed(const volatile void __iomem *addr)
+{
+ mb();
+ return __raw_readq(addr);
+}
+#endif
+
+#define writeb_relaxed writeb
+#define writew_relaxed writew
+#define writel_relaxed writel
+#define writeq_relaxed writeq
#define mmiowb()
diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h
index 87d8c4f0307d..7295967b5028 100644
--- a/arch/alpha/include/asm/uaccess.h
+++ b/arch/alpha/include/asm/uaccess.h
@@ -30,11 +30,13 @@
* Address valid if:
* - "addr" doesn't have any high-bits set
* - AND "size" doesn't have any high-bits set
- * - AND "addr+size" doesn't have any high-bits set
+ * - AND "addr+size-(size != 0)" doesn't have any high-bits set
* - OR we are in kernel mode.
*/
-#define __access_ok(addr, size) \
- ((get_fs().seg & (addr | size | (addr+size))) == 0)
+#define __access_ok(addr, size) ({ \
+ unsigned long __ao_a = (addr), __ao_b = (size); \
+ unsigned long __ao_end = __ao_a + __ao_b - !!__ao_b; \
+ (get_fs().seg & (__ao_a | __ao_b | __ao_end)) == 0; })
#define access_ok(type, addr, size) \
({ \
diff --git a/arch/alpha/kernel/io.c b/arch/alpha/kernel/io.c
index c025a3e5e357..938de13adfbf 100644
--- a/arch/alpha/kernel/io.c
+++ b/arch/alpha/kernel/io.c
@@ -16,21 +16,27 @@
unsigned int
ioread8(void __iomem *addr)
{
- unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
+ unsigned int ret;
+ mb();
+ ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
mb();
return ret;
}
unsigned int ioread16(void __iomem *addr)
{
- unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
+ unsigned int ret;
+ mb();
+ ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
mb();
return ret;
}
unsigned int ioread32(void __iomem *addr)
{
- unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
+ unsigned int ret;
+ mb();
+ ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
mb();
return ret;
}
@@ -148,28 +154,36 @@ EXPORT_SYMBOL(__raw_writeq);
u8 readb(const volatile void __iomem *addr)
{
- u8 ret = __raw_readb(addr);
+ u8 ret;
+ mb();
+ ret = __raw_readb(addr);
mb();
return ret;
}
u16 readw(const volatile void __iomem *addr)
{
- u16 ret = __raw_readw(addr);
+ u16 ret;
+ mb();
+ ret = __raw_readw(addr);
mb();
return ret;
}
u32 readl(const volatile void __iomem *addr)
{
- u32 ret = __raw_readl(addr);
+ u32 ret;
+ mb();
+ ret = __raw_readl(addr);
mb();
return ret;
}
u64 readq(const volatile void __iomem *addr)
{
- u64 ret = __raw_readq(addr);
+ u64 ret;
+ mb();
+ ret = __raw_readq(addr);
mb();
return ret;
}
@@ -207,6 +221,38 @@ EXPORT_SYMBOL(writew);
EXPORT_SYMBOL(writel);
EXPORT_SYMBOL(writeq);
+/*
+ * The _relaxed functions must be ordered w.r.t. each other, but they don't
+ * have to be ordered w.r.t. other memory accesses.
+ */
+u8 readb_relaxed(const volatile void __iomem *addr)
+{
+ mb();
+ return __raw_readb(addr);
+}
+
+u16 readw_relaxed(const volatile void __iomem *addr)
+{
+ mb();
+ return __raw_readw(addr);
+}
+
+u32 readl_relaxed(const volatile void __iomem *addr)
+{
+ mb();
+ return __raw_readl(addr);
+}
+
+u64 readq_relaxed(const volatile void __iomem *addr)
+{
+ mb();
+ return __raw_readq(addr);
+}
+
+EXPORT_SYMBOL(readb_relaxed);
+EXPORT_SYMBOL(readw_relaxed);
+EXPORT_SYMBOL(readl_relaxed);
+EXPORT_SYMBOL(readq_relaxed);
/*
* Read COUNT 8-bit bytes from port PORT into memory starting at SRC.
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 16e6cc22e25c..99c55f015ce8 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -91,14 +91,9 @@ libs-y += arch/arc/lib/ $(LIBGCC)
boot := arch/arc/boot
-#default target for make without any arguments.
-KBUILD_IMAGE := $(boot)/bootpImage
-
-all: bootpImage
-bootpImage: vmlinux
-
-boot_targets += uImage uImage.bin uImage.gz
+boot_targets := uImage uImage.bin uImage.gz uImage.lzma
+PHONY += $(boot_targets)
$(boot_targets): vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
index d131c54acd3e..f6b6e3c9ca8a 100644
--- a/arch/arc/boot/dts/hsdk.dts
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -83,6 +83,8 @@
arcpct: pct {
compatible = "snps,archs-pct";
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <20>;
};
/* TIMER0 with interrupt for clockevent */
@@ -173,7 +175,7 @@
reg = <0x8000 0x2000>;
interrupts = <10>;
interrupt-names = "macirq";
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
snps,pbl = <32>;
snps,multicast-filter-bins = <256>;
clocks = <&gmacclk>;
@@ -191,7 +193,7 @@
#address-cells = <1>;
#size-cells = <0>;
compatible = "snps,dwmac-mdio";
- phy0: ethernet-phy@0 {
+ phy0: ethernet-phy@0 { /* Micrel KSZ9031 */
reg = <0>;
ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h
index aa2d6da9d187..12c74e826530 100644
--- a/arch/arc/include/asm/elf.h
+++ b/arch/arc/include/asm/elf.h
@@ -26,7 +26,7 @@
#define R_ARC_32_PCREL 0x31
/*to set parameters in the core dumps */
-#define ELF_ARCH EM_ARCOMPACT
+#define ELF_ARCH EM_ARC_INUSE
#define ELF_CLASS ELFCLASS32
#ifdef CONFIG_CPU_BIG_ENDIAN
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
index 09ddddf71cc5..a70fef79c405 100644
--- a/arch/arc/include/asm/page.h
+++ b/arch/arc/include/asm/page.h
@@ -13,6 +13,7 @@
#ifndef __ASSEMBLY__
#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)
+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
struct vm_area_struct;
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index 85d9ea4a0acc..37ad245cf989 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -169,7 +169,7 @@ tracesys:
; Do the Sys Call as we normally would.
; Validate the Sys Call number
- cmp r8, NR_syscalls
+ cmp r8, NR_syscalls - 1
mov.hi r0, -ENOSYS
bhi tracesys_exit
@@ -252,7 +252,7 @@ ENTRY(EV_Trap)
;============ Normal syscall case
; syscall num shd not exceed the total system calls avail
- cmp r8, NR_syscalls
+ cmp r8, NR_syscalls - 1
mov.hi r0, -ENOSYS
bhi .Lret_from_system_call
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 89c97dcfa360..c10994daee39 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -15,6 +15,7 @@
#include <linux/clocksource.h>
#include <linux/console.h>
#include <linux/module.h>
+#include <linux/sizes.h>
#include <linux/cpu.h>
#include <linux/of_fdt.h>
#include <linux/of.h>
@@ -406,12 +407,12 @@ static void arc_chk_core_config(void)
if ((unsigned int)__arc_dccm_base != cpu->dccm.base_addr)
panic("Linux built with incorrect DCCM Base address\n");
- if (CONFIG_ARC_DCCM_SZ != cpu->dccm.sz)
+ if (CONFIG_ARC_DCCM_SZ * SZ_1K != cpu->dccm.sz)
panic("Linux built with incorrect DCCM Size\n");
#endif
#ifdef CONFIG_ARC_HAS_ICCM
- if (CONFIG_ARC_ICCM_SZ != cpu->iccm.sz)
+ if (CONFIG_ARC_ICCM_SZ * SZ_1K != cpu->iccm.sz)
panic("Linux built with incorrect ICCM Size\n");
#endif
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index 48685445002e..da243420bcb5 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -99,7 +99,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
sizeof(sf->uc.uc_mcontext.regs.scratch));
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
- return err;
+ return err ? -EFAULT : 0;
}
static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
@@ -113,7 +113,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
&(sf->uc.uc_mcontext.regs.scratch),
sizeof(sf->uc.uc_mcontext.regs.scratch));
if (err)
- return err;
+ return -EFAULT;
set_current_blocked(&set);
regs->bta = uregs.scratch.bta;
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
index bf40e06f3fb8..a211e87aa6d9 100644
--- a/arch/arc/kernel/stacktrace.c
+++ b/arch/arc/kernel/stacktrace.c
@@ -41,15 +41,15 @@
#ifdef CONFIG_ARC_DW2_UNWIND
-static void seed_unwind_frame_info(struct task_struct *tsk,
- struct pt_regs *regs,
- struct unwind_frame_info *frame_info)
+static int
+seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs,
+ struct unwind_frame_info *frame_info)
{
/*
* synchronous unwinding (e.g. dump_stack)
* - uses current values of SP and friends
*/
- if (tsk == NULL && regs == NULL) {
+ if (regs == NULL && (tsk == NULL || tsk == current)) {
unsigned long fp, sp, blink, ret;
frame_info->task = current;
@@ -68,11 +68,15 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
frame_info->call_frame = 0;
} else if (regs == NULL) {
/*
- * Asynchronous unwinding of sleeping task
- * - Gets SP etc from task's pt_regs (saved bottom of kernel
- * mode stack of task)
+ * Asynchronous unwinding of a likely sleeping task
+ * - first ensure it is actually sleeping
+ * - if so, it will be in __switch_to, kernel mode SP of task
+ * is safe-kept and BLINK at a well known location in there
*/
+ if (tsk->state == TASK_RUNNING)
+ return -1;
+
frame_info->task = tsk;
frame_info->regs.r27 = TSK_K_FP(tsk);
@@ -106,6 +110,8 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
frame_info->regs.r63 = regs->ret;
frame_info->call_frame = 0;
}
+
+ return 0;
}
#endif
@@ -115,11 +121,12 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
int (*consumer_fn) (unsigned int, void *), void *arg)
{
#ifdef CONFIG_ARC_DW2_UNWIND
- int ret = 0;
+ int ret = 0, cnt = 0;
unsigned int address;
struct unwind_frame_info frame_info;
- seed_unwind_frame_info(tsk, regs, &frame_info);
+ if (seed_unwind_frame_info(tsk, regs, &frame_info))
+ return 0;
while (1) {
address = UNW_PC(&frame_info);
@@ -135,6 +142,11 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
break;
frame_info.regs.r63 = frame_info.regs.r31;
+
+ if (cnt++ > 128) {
+ printk("unwinder looping too long, aborting !\n");
+ return 0;
+ }
}
return address; /* return the last address it saw */
diff --git a/arch/arc/plat-eznps/Kconfig b/arch/arc/plat-eznps/Kconfig
index ce908e2c5282..71378bfec8d0 100644
--- a/arch/arc/plat-eznps/Kconfig
+++ b/arch/arc/plat-eznps/Kconfig
@@ -6,6 +6,7 @@
menuconfig ARC_PLAT_EZNPS
bool "\"EZchip\" ARC dev platform"
+ depends on ISA_ARCOMPACT
select CPU_BIG_ENDIAN
select CLKSRC_NPS if !PHYS_ADDR_T_64BIT
select EZNPS_GIC
diff --git a/arch/arc/plat-eznps/include/plat/ctop.h b/arch/arc/plat-eznps/include/plat/ctop.h
index 4f6a1673b3a6..ddfca2c3357a 100644
--- a/arch/arc/plat-eznps/include/plat/ctop.h
+++ b/arch/arc/plat-eznps/include/plat/ctop.h
@@ -43,7 +43,6 @@
#define CTOP_AUX_DPC (CTOP_AUX_BASE + 0x02C)
#define CTOP_AUX_LPC (CTOP_AUX_BASE + 0x030)
#define CTOP_AUX_EFLAGS (CTOP_AUX_BASE + 0x080)
-#define CTOP_AUX_IACK (CTOP_AUX_BASE + 0x088)
#define CTOP_AUX_GPA1 (CTOP_AUX_BASE + 0x08C)
#define CTOP_AUX_UDMC (CTOP_AUX_BASE + 0x300)
diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig
index c285a83cbf08..df35ea1912e8 100644
--- a/arch/arc/plat-hsdk/Kconfig
+++ b/arch/arc/plat-hsdk/Kconfig
@@ -11,5 +11,6 @@ menuconfig ARC_SOC_HSDK
select ARC_HAS_ACCL_REGS
select ARC_IRQ_NO_AUTOSAVE
select CLK_HSDK
+ select RESET_CONTROLLER
select RESET_HSDK
select MIGHT_HAVE_PCI
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index e2f7c50dbace..1877da816f65 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -592,8 +592,10 @@ config ARCH_S3C24XX
select HAVE_S3C2410_WATCHDOG if WATCHDOG
select HAVE_S3C_RTC if RTC_CLASS
select NEED_MACH_IO_H
+ select S3C2410_WATCHDOG
select SAMSUNG_ATAGS
select USE_OF
+ select WATCHDOG
help
Samsung S3C2410, S3C2412, S3C2413, S3C2416, S3C2440, S3C2442, S3C2443
and S3C2450 SoCs based systems, such as the Simtec Electronics BAST
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 1f5a5ffe7fcf..ec2327a3796d 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -120,9 +120,9 @@ ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin -I$(obj)
asflags-y := -DZIMAGE
# Supply kernel BSS size to the decompressor via a linker symbol.
-KBSS_SZ = $(shell echo $$(($$($(CROSS_COMPILE)nm $(obj)/../../../../vmlinux | \
- sed -n -e 's/^\([^ ]*\) [AB] __bss_start$$/-0x\1/p' \
- -e 's/^\([^ ]*\) [AB] __bss_stop$$/+0x\1/p') )) )
+KBSS_SZ = $(shell echo $$(($$($(NM) $(obj)/../../../../vmlinux | \
+ sed -n -e 's/^\([^ ]*\) [ABD] __bss_start$$/-0x\1/p' \
+ -e 's/^\([^ ]*\) [ABD] __bss_stop$$/+0x\1/p') )) )
LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ)
# Supply ZRELADDR to the decompressor via a linker symbol.
ifneq ($(CONFIG_AUTO_ZRELADDR),y)
@@ -166,7 +166,7 @@ $(obj)/bswapsdi2.S: $(srctree)/arch/$(SRCARCH)/lib/bswapsdi2.S
# The .data section is already discarded by the linker script so no need
# to bother about it here.
check_for_bad_syms = \
-bad_syms=$$($(CROSS_COMPILE)nm $@ | sed -n 's/^.\{8\} [bc] \(.*\)/\1/p') && \
+bad_syms=$$($(NM) $@ | sed -n 's/^.\{8\} [bc] \(.*\)/\1/p') && \
[ -z "$$bad_syms" ] || \
( echo "following symbols must have non local/private scope:" >&2; \
echo "$$bad_syms" >&2; rm -f $@; false )
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index e205bbbe2794..69e661f574a0 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -1090,9 +1090,9 @@ __armv4_mmu_cache_off:
__armv7_mmu_cache_off:
mrc p15, 0, r0, c1, c0
#ifdef CONFIG_MMU
- bic r0, r0, #0x000d
+ bic r0, r0, #0x0005
#else
- bic r0, r0, #0x000c
+ bic r0, r0, #0x0004
#endif
mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
mov r12, lr
diff --git a/arch/arm/boot/compressed/vmlinux.lds.S b/arch/arm/boot/compressed/vmlinux.lds.S
index 2b963d8e76dd..89a8f7588c78 100644
--- a/arch/arm/boot/compressed/vmlinux.lds.S
+++ b/arch/arm/boot/compressed/vmlinux.lds.S
@@ -46,7 +46,7 @@ SECTIONS
}
.table : ALIGN(4) {
_table_start = .;
- LONG(ZIMAGE_MAGIC(2))
+ LONG(ZIMAGE_MAGIC(4))
LONG(ZIMAGE_MAGIC(0x5a534c4b))
LONG(ZIMAGE_MAGIC(__piggy_size_addr - _start))
LONG(ZIMAGE_MAGIC(_kernel_bss_size))
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index d3dd6a16e70a..e321acaf35d6 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -39,6 +39,9 @@
ethernet1 = &cpsw_emac1;
spi0 = &spi0;
spi1 = &spi1;
+ mmc0 = &mmc1;
+ mmc1 = &mmc2;
+ mmc2 = &mmc3;
};
cpus {
diff --git a/arch/arm/boot/dts/armada-385-turris-omnia.dts b/arch/arm/boot/dts/armada-385-turris-omnia.dts
index 768b6c5d2129..fde4c302f08e 100644
--- a/arch/arm/boot/dts/armada-385-turris-omnia.dts
+++ b/arch/arm/boot/dts/armada-385-turris-omnia.dts
@@ -236,6 +236,7 @@
status = "okay";
compatible = "ethernet-phy-id0141.0DD1", "ethernet-phy-ieee802.3-c22";
reg = <1>;
+ marvell,reg-init = <3 18 0 0x4985>;
/* irq is connected to &pcawan pin 7 */
};
diff --git a/arch/arm/boot/dts/armada-388-helios4.dts b/arch/arm/boot/dts/armada-388-helios4.dts
index 705adfa8c680..a94758090fb0 100644
--- a/arch/arm/boot/dts/armada-388-helios4.dts
+++ b/arch/arm/boot/dts/armada-388-helios4.dts
@@ -70,6 +70,9 @@
system-leds {
compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&helios_system_led_pins>;
+
status-led {
label = "helios4:green:status";
gpios = <&gpio0 24 GPIO_ACTIVE_LOW>;
@@ -86,6 +89,9 @@
io-leds {
compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&helios_io_led_pins>;
+
sata1-led {
label = "helios4:green:ata1";
gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
@@ -121,11 +127,15 @@
fan1: j10-pwm {
compatible = "pwm-fan";
pwms = <&gpio1 9 40000>; /* Target freq:25 kHz */
+ pinctrl-names = "default";
+ pinctrl-0 = <&helios_fan1_pins>;
};
fan2: j17-pwm {
compatible = "pwm-fan";
pwms = <&gpio1 23 40000>; /* Target freq:25 kHz */
+ pinctrl-names = "default";
+ pinctrl-0 = <&helios_fan2_pins>;
};
usb2_phy: usb2-phy {
@@ -291,16 +301,22 @@
"mpp39", "mpp40";
marvell,function = "sd0";
};
- helios_led_pins: helios-led-pins {
- marvell,pins = "mpp24", "mpp25",
- "mpp49", "mpp50",
+ helios_system_led_pins: helios-system-led-pins {
+ marvell,pins = "mpp24", "mpp25";
+ marvell,function = "gpio";
+ };
+ helios_io_led_pins: helios-io-led-pins {
+ marvell,pins = "mpp49", "mpp50",
"mpp52", "mpp53",
"mpp54";
marvell,function = "gpio";
};
- helios_fan_pins: helios-fan-pins {
- marvell,pins = "mpp41", "mpp43",
- "mpp48", "mpp55";
+ helios_fan1_pins: helios_fan1_pins {
+ marvell,pins = "mpp41", "mpp43";
+ marvell,function = "gpio";
+ };
+ helios_fan2_pins: helios_fan2_pins {
+ marvell,pins = "mpp48", "mpp55";
marvell,function = "gpio";
};
microsom_spi1_cs_pins: spi1-cs-pins {
diff --git a/arch/arm/boot/dts/armada-xp-98dx3236.dtsi b/arch/arm/boot/dts/armada-xp-98dx3236.dtsi
index 3e7d093d7a9a..966d9a6c40fc 100644
--- a/arch/arm/boot/dts/armada-xp-98dx3236.dtsi
+++ b/arch/arm/boot/dts/armada-xp-98dx3236.dtsi
@@ -266,11 +266,6 @@
reg = <0x11000 0x100>;
};
-&i2c1 {
- compatible = "marvell,mv78230-i2c", "marvell,mv64xxx-i2c";
- reg = <0x11100 0x100>;
-};
-
&mpic {
reg = <0x20a00 0x2d0>, <0x21070 0x58>;
};
diff --git a/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts b/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts
index 22dade6393d0..d1dbe3b6ad5a 100644
--- a/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts
@@ -22,9 +22,9 @@
#size-cells = <1>;
ranges;
- vga_memory: framebuffer@7f000000 {
+ vga_memory: framebuffer@9f000000 {
no-map;
- reg = <0x7f000000 0x01000000>;
+ reg = <0x9f000000 0x01000000>; /* 16M */
};
};
diff --git a/arch/arm/boot/dts/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed-g4.dtsi
index 69f6b9d2e7e7..2af093759143 100644
--- a/arch/arm/boot/dts/aspeed-g4.dtsi
+++ b/arch/arm/boot/dts/aspeed-g4.dtsi
@@ -312,6 +312,7 @@
compatible = "aspeed,ast2400-ibt-bmc";
reg = <0xc0 0x18>;
interrupts = <8>;
+ clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed-g5.dtsi
index f2e1015d75ab..bcd57e6c51e5 100644
--- a/arch/arm/boot/dts/aspeed-g5.dtsi
+++ b/arch/arm/boot/dts/aspeed-g5.dtsi
@@ -372,6 +372,7 @@
compatible = "aspeed,ast2500-ibt-bmc";
reg = <0xc0 0x18>;
interrupts = <8>;
+ clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
index cf0087b4c9e1..ea02a51c71e2 100644
--- a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
+++ b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
@@ -67,8 +67,8 @@
pinctrl-0 = <&pinctrl_macb0_default>;
phy-mode = "rmii";
- ethernet-phy@0 {
- reg = <0x0>;
+ ethernet-phy@7 {
+ reg = <0x7>;
interrupt-parent = <&pioA>;
interrupts = <PIN_PD31 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
index 2214bfe7aa20..7c611f5555a4 100644
--- a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
@@ -40,7 +40,7 @@
ahb {
usb0: gadget@300000 {
- atmel,vbus-gpio = <&pioA PIN_PA27 GPIO_ACTIVE_HIGH>;
+ atmel,vbus-gpio = <&pioA PIN_PB11 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usba_vbus>;
status = "okay";
@@ -125,8 +125,6 @@
bus-width = <8>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sdmmc0_default>;
- non-removable;
- mmc-ddr-1_8v;
status = "okay";
};
diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
index 02c1d2958d78..74440dad4335 100644
--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
@@ -243,6 +243,11 @@
atmel,pins =
<AT91_PIOE 9 AT91_PERIPH_GPIO AT91_PINCTRL_DEGLITCH>; /* PE9, conflicts with A9 */
};
+ pinctrl_usb_default: usb_default {
+ atmel,pins =
+ <AT91_PIOE 3 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
+ AT91_PIOE 4 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
+ };
};
};
};
@@ -260,6 +265,8 @@
&pioE 3 GPIO_ACTIVE_LOW
&pioE 4 GPIO_ACTIVE_LOW
>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb_default>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
index 7d554b9ab27f..e998d72d8b10 100644
--- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
@@ -170,6 +170,11 @@
atmel,pins =
<AT91_PIOE 31 AT91_PERIPH_GPIO AT91_PINCTRL_DEGLITCH>;
};
+ pinctrl_usb_default: usb_default {
+ atmel,pins =
+ <AT91_PIOE 11 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
+ AT91_PIOE 14 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
+ };
pinctrl_key_gpio: key_gpio_0 {
atmel,pins =
<AT91_PIOE 8 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
@@ -195,6 +200,8 @@
&pioE 11 GPIO_ACTIVE_HIGH
&pioE 14 GPIO_ACTIVE_HIGH
>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb_default>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/at91sam9rl.dtsi b/arch/arm/boot/dts/at91sam9rl.dtsi
index ad495f5a5790..cdf016232fb7 100644
--- a/arch/arm/boot/dts/at91sam9rl.dtsi
+++ b/arch/arm/boot/dts/at91sam9rl.dtsi
@@ -277,23 +277,26 @@
atmel,adc-use-res = "highres";
trigger0 {
- trigger-name = "timer-counter-0";
+ trigger-name = "external-rising";
trigger-value = <0x1>;
+ trigger-external;
};
+
trigger1 {
- trigger-name = "timer-counter-1";
- trigger-value = <0x3>;
+ trigger-name = "external-falling";
+ trigger-value = <0x2>;
+ trigger-external;
};
trigger2 {
- trigger-name = "timer-counter-2";
- trigger-value = <0x5>;
+ trigger-name = "external-any";
+ trigger-value = <0x3>;
+ trigger-external;
};
trigger3 {
- trigger-name = "external";
- trigger-value = <0x13>;
- trigger-external;
+ trigger-name = "continuous";
+ trigger-value = <0x6>;
};
};
diff --git a/arch/arm/boot/dts/bcm-hr2.dtsi b/arch/arm/boot/dts/bcm-hr2.dtsi
index e4d49731287f..dd71ab08136b 100644
--- a/arch/arm/boot/dts/bcm-hr2.dtsi
+++ b/arch/arm/boot/dts/bcm-hr2.dtsi
@@ -75,7 +75,7 @@
timer@20200 {
compatible = "arm,cortex-a9-global-timer";
reg = <0x20200 0x100>;
- interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
clocks = <&periph_clk>;
};
@@ -83,7 +83,7 @@
compatible = "arm,cortex-a9-twd-timer";
reg = <0x20600 0x20>;
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(1) |
- IRQ_TYPE_LEVEL_HIGH)>;
+ IRQ_TYPE_EDGE_RISING)>;
clocks = <&periph_clk>;
};
@@ -91,7 +91,7 @@
compatible = "arm,cortex-a9-twd-wdt";
reg = <0x20620 0x20>;
interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(1) |
- IRQ_TYPE_LEVEL_HIGH)>;
+ IRQ_TYPE_EDGE_RISING)>;
clocks = <&periph_clk>;
};
@@ -217,7 +217,7 @@
};
qspi: spi@27200 {
- compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi";
+ compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi";
reg = <0x027200 0x184>,
<0x027000 0x124>,
<0x11c408 0x004>,
diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
index 2b219addeb44..b395cb195db2 100644
--- a/arch/arm/boot/dts/bcm-nsp.dtsi
+++ b/arch/arm/boot/dts/bcm-nsp.dtsi
@@ -249,10 +249,10 @@
status = "disabled";
};
- mailbox: mailbox@25000 {
+ mailbox: mailbox@25c00 {
compatible = "brcm,iproc-fa2-mbox";
- reg = <0x25000 0x445>;
- interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x25c00 0x400>;
+ interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
#mbox-cells = <1>;
brcm,rx-status-len = <32>;
brcm,use-bcm-hdr;
@@ -274,7 +274,7 @@
};
qspi: spi@27200 {
- compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi";
+ compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi";
reg = <0x027200 0x184>,
<0x027000 0x124>,
<0x11c408 0x004>,
diff --git a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
index 5fcadb9cf992..9f7145b1cc5e 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
@@ -25,7 +25,7 @@
leds {
act {
- gpios = <&gpio 47 GPIO_ACTIVE_HIGH>;
+ gpios = <&gpio 47 GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index c9322a56300d..f9add515301f 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -476,6 +476,7 @@
"dsi0_ddr2",
"dsi0_ddr";
+ status = "disabled";
};
thermal: thermal@7e212000 {
diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
index a678fb7c9e3b..c91716d5980c 100644
--- a/arch/arm/boot/dts/bcm5301x.dtsi
+++ b/arch/arm/boot/dts/bcm5301x.dtsi
@@ -445,7 +445,7 @@
};
spi@18029200 {
- compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi";
+ compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi";
reg = <0x18029200 0x184>,
<0x18029000 0x124>,
<0x1811b408 0x004>,
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index e97ef16ce68a..0c0781a37c5a 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -312,6 +312,7 @@
#address-cells = <1>;
ranges = <0x51000000 0x51000000 0x3000
0x0 0x20000000 0x10000000>;
+ dma-ranges;
/**
* To enable PCI endpoint mode, disable the pcie1_rc
* node and enable pcie1_ep mode.
@@ -325,7 +326,6 @@
device_type = "pci";
ranges = <0x81000000 0 0 0x03000 0 0x00010000
0x82000000 0 0x20013000 0x13000 0 0xffed000>;
- dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>;
bus-range = <0x00 0xff>;
#interrupt-cells = <1>;
num-lanes = <1>;
@@ -368,6 +368,7 @@
#address-cells = <1>;
ranges = <0x51800000 0x51800000 0x3000
0x0 0x30000000 0x10000000>;
+ dma-ranges;
status = "disabled";
pcie2_rc: pcie@51800000 {
reg = <0x51800000 0x2000>, <0x51802000 0x14c>, <0x1000 0x2000>;
@@ -378,7 +379,6 @@
device_type = "pci";
ranges = <0x81000000 0 0 0x03000 0 0x00010000
0x82000000 0 0x30013000 0x13000 0 0xffed000>;
- dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>;
bus-range = <0x00 0xff>;
#interrupt-cells = <1>;
num-lanes = <1>;
diff --git a/arch/arm/boot/dts/dra76x.dtsi b/arch/arm/boot/dts/dra76x.dtsi
index 216e1d1a69c7..473d6721b788 100644
--- a/arch/arm/boot/dts/dra76x.dtsi
+++ b/arch/arm/boot/dts/dra76x.dtsi
@@ -35,8 +35,8 @@
interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "int0", "int1";
- clocks = <&mcan_clk>, <&l3_iclk_div>;
- clock-names = "cclk", "hclk";
+ clocks = <&l3_iclk_div>, <&mcan_clk>;
+ clock-names = "hclk", "cclk";
bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>;
};
};
diff --git a/arch/arm/boot/dts/exynos3250-artik5.dtsi b/arch/arm/boot/dts/exynos3250-artik5.dtsi
index 7c22cbf6f3d4..6e30db644c83 100644
--- a/arch/arm/boot/dts/exynos3250-artik5.dtsi
+++ b/arch/arm/boot/dts/exynos3250-artik5.dtsi
@@ -68,7 +68,7 @@
s2mps14_pmic@66 {
compatible = "samsung,s2mps14-pmic";
interrupt-parent = <&gpx3>;
- interrupts = <5 IRQ_TYPE_NONE>;
+ interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&s2mps14_irq>;
reg = <0x66>;
diff --git a/arch/arm/boot/dts/exynos3250-monk.dts b/arch/arm/boot/dts/exynos3250-monk.dts
index 6ffedf4ed9f2..d343dc13ceec 100644
--- a/arch/arm/boot/dts/exynos3250-monk.dts
+++ b/arch/arm/boot/dts/exynos3250-monk.dts
@@ -188,7 +188,7 @@
s2mps14_pmic@66 {
compatible = "samsung,s2mps14-pmic";
interrupt-parent = <&gpx0>;
- interrupts = <7 IRQ_TYPE_NONE>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
reg = <0x66>;
wakeup-source;
diff --git a/arch/arm/boot/dts/exynos3250-rinato.dts b/arch/arm/boot/dts/exynos3250-rinato.dts
index 2a6b828c01b7..29df4cfa9165 100644
--- a/arch/arm/boot/dts/exynos3250-rinato.dts
+++ b/arch/arm/boot/dts/exynos3250-rinato.dts
@@ -253,7 +253,7 @@
s2mps14_pmic@66 {
compatible = "samsung,s2mps14-pmic";
interrupt-parent = <&gpx0>;
- interrupts = <7 IRQ_TYPE_NONE>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
reg = <0x66>;
wakeup-source;
diff --git a/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi b/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi
index 30eee5942eff..945ee8f55e65 100644
--- a/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi
+++ b/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi
@@ -50,7 +50,7 @@
i2c_cm36651: i2c-gpio-2 {
compatible = "i2c-gpio";
- gpios = <&gpf0 0 GPIO_ACTIVE_LOW>, <&gpf0 1 GPIO_ACTIVE_LOW>;
+ gpios = <&gpf0 0 GPIO_ACTIVE_HIGH>, <&gpf0 1 GPIO_ACTIVE_HIGH>;
i2c-gpio,delay-us = <2>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/exynos4412-midas.dtsi b/arch/arm/boot/dts/exynos4412-midas.dtsi
index c0476c290977..60fbad25b5f2 100644
--- a/arch/arm/boot/dts/exynos4412-midas.dtsi
+++ b/arch/arm/boot/dts/exynos4412-midas.dtsi
@@ -139,7 +139,7 @@
max77693@66 {
compatible = "maxim,max77693";
interrupt-parent = <&gpx1>;
- interrupts = <5 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&max77693_irq>;
reg = <0x66>;
@@ -187,7 +187,7 @@
max77693-fuel-gauge@36 {
compatible = "maxim,max17047";
interrupt-parent = <&gpx2>;
- interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&max77693_fuel_irq>;
reg = <0x36>;
@@ -579,7 +579,7 @@
max77686: max77686_pmic@9 {
compatible = "maxim,max77686";
interrupt-parent = <&gpx0>;
- interrupts = <7 IRQ_TYPE_NONE>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
pinctrl-0 = <&max77686_irq>;
pinctrl-names = "default";
reg = <0x09>;
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
index 00820d239753..dbca8eeefae1 100644
--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
@@ -265,7 +265,7 @@
max77686: pmic@9 {
compatible = "maxim,max77686";
interrupt-parent = <&gpx3>;
- interrupts = <2 IRQ_TYPE_NONE>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&max77686_irq>;
reg = <0x09>;
diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
index d5e66189ed2a..594b246afbed 100644
--- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
+++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
@@ -132,7 +132,7 @@
compatible = "maxim,max77686";
reg = <0x09>;
interrupt-parent = <&gpx3>;
- interrupts = <2 IRQ_TYPE_NONE>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&max77686_irq>;
wakeup-source;
diff --git a/arch/arm/boot/dts/exynos5250-snow-common.dtsi b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
index fd9226d3b207..3981acb00b5e 100644
--- a/arch/arm/boot/dts/exynos5250-snow-common.dtsi
+++ b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
@@ -292,7 +292,7 @@
max77686: max77686@9 {
compatible = "maxim,max77686";
interrupt-parent = <&gpx3>;
- interrupts = <2 IRQ_TYPE_NONE>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&max77686_irq>;
wakeup-source;
diff --git a/arch/arm/boot/dts/exynos5250-spring.dts b/arch/arm/boot/dts/exynos5250-spring.dts
index 3d501926c227..2355c5316484 100644
--- a/arch/arm/boot/dts/exynos5250-spring.dts
+++ b/arch/arm/boot/dts/exynos5250-spring.dts
@@ -108,7 +108,7 @@
compatible = "samsung,s5m8767-pmic";
reg = <0x66>;
interrupt-parent = <&gpx3>;
- interrupts = <2 IRQ_TYPE_NONE>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&s5m8767_irq &s5m8767_dvs &s5m8767_ds>;
wakeup-source;
diff --git a/arch/arm/boot/dts/exynos5410-odroidxu.dts b/arch/arm/boot/dts/exynos5410-odroidxu.dts
index a2046f5f998c..840a854ee838 100644
--- a/arch/arm/boot/dts/exynos5410-odroidxu.dts
+++ b/arch/arm/boot/dts/exynos5410-odroidxu.dts
@@ -324,6 +324,8 @@
regulator-name = "vddq_lcd";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
+ /* Supplies also GPK and GPJ */
+ regulator-always-on;
};
ldo8_reg: LDO8 {
@@ -626,11 +628,11 @@
};
&usbdrd_dwc3_0 {
- dr_mode = "host";
+ dr_mode = "peripheral";
};
&usbdrd_dwc3_1 {
- dr_mode = "peripheral";
+ dr_mode = "host";
};
&usbdrd3_0 {
diff --git a/arch/arm/boot/dts/exynos5410-pinctrl.dtsi b/arch/arm/boot/dts/exynos5410-pinctrl.dtsi
index 369a8a7f2105..481ee99aa9c9 100644
--- a/arch/arm/boot/dts/exynos5410-pinctrl.dtsi
+++ b/arch/arm/boot/dts/exynos5410-pinctrl.dtsi
@@ -560,6 +560,34 @@
interrupt-controller;
#interrupt-cells = <2>;
};
+
+ usb3_1_oc: usb3-1-oc {
+ samsung,pins = "gpk2-4", "gpk2-5";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
+ };
+
+ usb3_1_vbusctrl: usb3-1-vbusctrl {
+ samsung,pins = "gpk2-6", "gpk2-7";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
+ };
+
+ usb3_0_oc: usb3-0-oc {
+ samsung,pins = "gpk3-0", "gpk3-1";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
+ };
+
+ usb3_0_vbusctrl: usb3-0-vbusctrl {
+ samsung,pins = "gpk3-2", "gpk3-3";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
+ };
};
&pinctrl_2 {
diff --git a/arch/arm/boot/dts/exynos5410.dtsi b/arch/arm/boot/dts/exynos5410.dtsi
index 57fc9c949e54..95b794b1ea62 100644
--- a/arch/arm/boot/dts/exynos5410.dtsi
+++ b/arch/arm/boot/dts/exynos5410.dtsi
@@ -392,6 +392,8 @@
&usbdrd3_0 {
clocks = <&clock CLK_USBD300>;
clock-names = "usbdrd30";
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb3_0_oc>, <&usb3_0_vbusctrl>;
};
&usbdrd_phy0 {
@@ -403,6 +405,8 @@
&usbdrd3_1 {
clocks = <&clock CLK_USBD301>;
clock-names = "usbdrd30";
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb3_1_oc>, <&usb3_1_vbusctrl>;
};
&usbdrd_dwc3_1 {
diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
index a370857beac0..fbaca74cbaea 100644
--- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts
+++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
@@ -84,7 +84,7 @@
reg = <0x66>;
interrupt-parent = <&gpx3>;
- interrupts = <2 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&s2mps11_irq>;
diff --git a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
index d476ba0f07b6..ba7187a74be3 100644
--- a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
@@ -136,7 +136,7 @@
samsung,s2mps11-acokb-ground;
interrupt-parent = <&gpx0>;
- interrupts = <4 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&s2mps11_irq>;
diff --git a/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts b/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts
index bfd4946cf9fe..8b63b6593d3a 100644
--- a/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts
+++ b/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts
@@ -81,8 +81,8 @@
imx27-phycard-s-rdk {
pinctrl_i2c1: i2c1grp {
fsl,pins = <
- MX27_PAD_I2C2_SDA__I2C2_SDA 0x0
- MX27_PAD_I2C2_SCL__I2C2_SCL 0x0
+ MX27_PAD_I2C_DATA__I2C_DATA 0x0
+ MX27_PAD_I2C_CLK__I2C_CLK 0x0
>;
};
diff --git a/arch/arm/boot/dts/imx50-evk.dts b/arch/arm/boot/dts/imx50-evk.dts
index a25da415cb02..907339bc81e5 100644
--- a/arch/arm/boot/dts/imx50-evk.dts
+++ b/arch/arm/boot/dts/imx50-evk.dts
@@ -59,7 +59,7 @@
MX50_PAD_CSPI_MISO__CSPI_MISO 0x00
MX50_PAD_CSPI_MOSI__CSPI_MOSI 0x00
MX50_PAD_CSPI_SS0__GPIO4_11 0xc4
- MX50_PAD_ECSPI1_MOSI__CSPI_SS1 0xf4
+ MX50_PAD_ECSPI1_MOSI__GPIO4_13 0x84
>;
};
diff --git a/arch/arm/boot/dts/imx6q-b450v3.dts b/arch/arm/boot/dts/imx6q-b450v3.dts
index 3ec58500e9c2..25bf45659737 100644
--- a/arch/arm/boot/dts/imx6q-b450v3.dts
+++ b/arch/arm/boot/dts/imx6q-b450v3.dts
@@ -65,13 +65,6 @@
};
};
-&clks {
- assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
- <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
- assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
- <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
-};
-
&ldb {
status = "okay";
diff --git a/arch/arm/boot/dts/imx6q-b650v3.dts b/arch/arm/boot/dts/imx6q-b650v3.dts
index 5650a9b11091..0326711a8700 100644
--- a/arch/arm/boot/dts/imx6q-b650v3.dts
+++ b/arch/arm/boot/dts/imx6q-b650v3.dts
@@ -65,13 +65,6 @@
};
};
-&clks {
- assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
- <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
- assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
- <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
-};
-
&ldb {
status = "okay";
diff --git a/arch/arm/boot/dts/imx6q-b850v3.dts b/arch/arm/boot/dts/imx6q-b850v3.dts
index 044a5bebe1c5..612f782ddaaa 100644
--- a/arch/arm/boot/dts/imx6q-b850v3.dts
+++ b/arch/arm/boot/dts/imx6q-b850v3.dts
@@ -53,17 +53,6 @@
};
};
-&clks {
- assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
- <&clks IMX6QDL_CLK_LDB_DI1_SEL>,
- <&clks IMX6QDL_CLK_IPU1_DI0_PRE_SEL>,
- <&clks IMX6QDL_CLK_IPU2_DI0_PRE_SEL>;
- assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
- <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
- <&clks IMX6QDL_CLK_PLL2_PFD2_396M>,
- <&clks IMX6QDL_CLK_PLL2_PFD2_396M>;
-};
-
&ldb {
fsl,dual-channel;
status = "okay";
diff --git a/arch/arm/boot/dts/imx6q-bx50v3.dtsi b/arch/arm/boot/dts/imx6q-bx50v3.dtsi
index d3cba09be0cb..c1f554348187 100644
--- a/arch/arm/boot/dts/imx6q-bx50v3.dtsi
+++ b/arch/arm/boot/dts/imx6q-bx50v3.dtsi
@@ -391,3 +391,18 @@
#interrupt-cells = <1>;
};
};
+
+&clks {
+ assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
+ <&clks IMX6QDL_CLK_LDB_DI1_SEL>,
+ <&clks IMX6QDL_CLK_IPU1_DI0_PRE_SEL>,
+ <&clks IMX6QDL_CLK_IPU1_DI1_PRE_SEL>,
+ <&clks IMX6QDL_CLK_IPU2_DI0_PRE_SEL>,
+ <&clks IMX6QDL_CLK_IPU2_DI1_PRE_SEL>;
+ assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
+ <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
+ <&clks IMX6QDL_CLK_PLL2_PFD0_352M>,
+ <&clks IMX6QDL_CLK_PLL2_PFD0_352M>,
+ <&clks IMX6QDL_CLK_PLL2_PFD0_352M>,
+ <&clks IMX6QDL_CLK_PLL2_PFD0_352M>;
+};
diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
index b8044681006c..2df4694c0d3b 100644
--- a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
@@ -278,7 +278,7 @@
/* VDD_AUD_1P8: Audio codec */
reg_aud_1p8v: ldo3 {
- regulator-name = "vdd1p8";
+ regulator-name = "vdd1p8a";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-boot-on;
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
index fe4e89d773f5..9499d113b139 100644
--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
@@ -423,6 +423,7 @@
pinctrl-0 = <&pinctrl_usdhc2>;
cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+ vmmc-supply = <&vdd_sd1_reg>;
status = "disabled";
};
@@ -432,5 +433,6 @@
&pinctrl_usdhc3_cdwp>;
cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
wp-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>;
+ vmmc-supply = <&vdd_sd0_reg>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx6qdl-udoo.dtsi b/arch/arm/boot/dts/imx6qdl-udoo.dtsi
index 4f27861bbb32..4cc9858f7ff8 100644
--- a/arch/arm/boot/dts/imx6qdl-udoo.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-udoo.dtsi
@@ -97,7 +97,7 @@
&fec {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index 00d44a60972f..e64ff80c83c5 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -1013,9 +1013,8 @@
compatible = "fsl,imx6q-fec";
reg = <0x02188000 0x4000>;
interrupt-names = "int0", "pps";
- interrupts-extended =
- <&intc 0 118 IRQ_TYPE_LEVEL_HIGH>,
- <&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>,
+ <0 119 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6QDL_CLK_ENET>,
<&clks IMX6QDL_CLK_ENET>,
<&clks IMX6QDL_CLK_ENET_REF>;
diff --git a/arch/arm/boot/dts/imx6qp.dtsi b/arch/arm/boot/dts/imx6qp.dtsi
index 5f51f8e5c1fa..d91f92f944c5 100644
--- a/arch/arm/boot/dts/imx6qp.dtsi
+++ b/arch/arm/boot/dts/imx6qp.dtsi
@@ -77,7 +77,6 @@
};
&fec {
- /delete-property/interrupts-extended;
interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>,
<0 119 IRQ_TYPE_LEVEL_HIGH>;
};
diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi
index 55d1872aa81a..9d19183f40e1 100644
--- a/arch/arm/boot/dts/imx6sl.dtsi
+++ b/arch/arm/boot/dts/imx6sl.dtsi
@@ -922,8 +922,10 @@
};
rngb: rngb@21b4000 {
+ compatible = "fsl,imx6sl-rngb", "fsl,imx25-rngb";
reg = <0x021b4000 0x4000>;
interrupts = <0 5 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_DUMMY>;
};
weim: weim@21b8000 {
diff --git a/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi b/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi
index 3e39b9a1f35d..0093548d50ff 100644
--- a/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi
+++ b/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi
@@ -55,6 +55,8 @@
&mcbsp2 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcbsp2_pins>;
};
&charger {
diff --git a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
index 86c5644f558c..032e8dde1381 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
+++ b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
@@ -84,6 +84,8 @@
};
&mcbsp2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcbsp2_pins>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/lpc32xx.dtsi b/arch/arm/boot/dts/lpc32xx.dtsi
index 9ad3df11db0d..abef034987a2 100644
--- a/arch/arm/boot/dts/lpc32xx.dtsi
+++ b/arch/arm/boot/dts/lpc32xx.dtsi
@@ -323,9 +323,6 @@
clocks = <&xtal_32k>, <&xtal>;
clock-names = "xtal_32k", "xtal";
-
- assigned-clocks = <&clk LPC32XX_CLK_HCLK_PLL>;
- assigned-clock-rates = <208000000>;
};
};
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index 074b4ec520c6..b66b2bd1aa85 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -168,7 +168,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0 0x1550000 0x0 0x10000>,
- <0x0 0x40000000 0x0 0x40000000>;
+ <0x0 0x40000000 0x0 0x20000000>;
reg-names = "QuadSPI", "QuadSPI-memory";
interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "qspi_en", "qspi";
@@ -609,7 +609,7 @@
fsl,tmr-prsc = <2>;
fsl,tmr-add = <0xaaaaaaab>;
fsl,tmr-fiper1 = <999999995>;
- fsl,tmr-fiper2 = <99990>;
+ fsl,tmr-fiper2 = <999999995>;
fsl,max-adj = <499999999>;
};
diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
index f57acf8f66b9..75de8134b1d1 100644
--- a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
+++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
@@ -16,8 +16,10 @@
#interrupt-cells = <2>;
#address-cells = <1>;
#size-cells = <0>;
- spi-max-frequency = <3000000>;
+ spi-max-frequency = <9600000>;
spi-cs-high;
+ spi-cpol;
+ spi-cpha;
cpcap_adc: adc {
compatible = "motorola,mapphone-cpcap-adc";
diff --git a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
index 2b760f90f38c..5375c6699843 100644
--- a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
+++ b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
@@ -192,6 +192,7 @@
fixed-link {
speed = <1000>;
full-duplex;
+ pause;
};
};
};
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index 4043ecb38016..0c8fcfb292bf 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -23,6 +23,9 @@
i2c0 = &i2c1;
i2c1 = &i2c2;
i2c2 = &i2c3;
+ mmc0 = &mmc1;
+ mmc1 = &mmc2;
+ mmc2 = &mmc3;
serial0 = &uart1;
serial1 = &uart2;
serial2 = &uart3;
diff --git a/arch/arm/boot/dts/omap4-duovero-parlor.dts b/arch/arm/boot/dts/omap4-duovero-parlor.dts
index cfcac0d73851..93d6fb6db578 100644
--- a/arch/arm/boot/dts/omap4-duovero-parlor.dts
+++ b/arch/arm/boot/dts/omap4-duovero-parlor.dts
@@ -142,7 +142,7 @@
ethernet@gpmc {
reg = <5 0 0xff>;
interrupt-parent = <&gpio2>;
- interrupts = <12 IRQ_TYPE_EDGE_FALLING>; /* gpio_44 */
+ interrupts = <12 IRQ_TYPE_LEVEL_LOW>; /* gpio_44 */
phy-mode = "mii";
diff --git a/arch/arm/boot/dts/omap4-panda-es.dts b/arch/arm/boot/dts/omap4-panda-es.dts
index 19d02df8d8a5..70fd28120c27 100644
--- a/arch/arm/boot/dts/omap4-panda-es.dts
+++ b/arch/arm/boot/dts/omap4-panda-es.dts
@@ -49,7 +49,7 @@
button_pins: pinmux_button_pins {
pinctrl-single,pins = <
- OMAP4_IOPAD(0x11b, PIN_INPUT_PULLUP | MUX_MODE3) /* gpio_113 */
+ OMAP4_IOPAD(0x0fc, PIN_INPUT_PULLUP | MUX_MODE3) /* gpio_113 */
>;
};
};
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 1a96d4317c97..046e77024567 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -25,6 +25,11 @@
i2c1 = &i2c2;
i2c2 = &i2c3;
i2c3 = &i2c4;
+ mmc0 = &mmc1;
+ mmc1 = &mmc2;
+ mmc2 = &mmc3;
+ mmc3 = &mmc4;
+ mmc4 = &mmc5;
serial0 = &uart1;
serial1 = &uart2;
serial2 = &uart3;
@@ -516,7 +521,7 @@
status = "disabled";
};
- target-module@56000000 {
+ sgx_module: target-module@56000000 {
compatible = "ti,sysc-omap4", "ti,sysc";
ti,hwmods = "gpu";
reg = <0x5601fc00 0x4>,
diff --git a/arch/arm/boot/dts/omap443x.dtsi b/arch/arm/boot/dts/omap443x.dtsi
index cbcdcb4e7d1c..6e320efd9fc1 100644
--- a/arch/arm/boot/dts/omap443x.dtsi
+++ b/arch/arm/boot/dts/omap443x.dtsi
@@ -33,10 +33,12 @@
};
ocp {
+ /* 4430 has only gpio_86 tshut and no talert interrupt */
bandgap: bandgap@4a002260 {
reg = <0x4a002260 0x4
0x4a00232C 0x4>;
compatible = "ti,omap4430-bandgap";
+ gpios = <&gpio3 22 GPIO_ACTIVE_HIGH>;
#thermal-sensor-cells = <0>;
};
@@ -74,3 +76,13 @@
};
/include/ "omap443x-clocks.dtsi"
+
+/*
+ * Use dpll_per for sgx at 153.6MHz like droid4 stock v3.0.8 Android kernel
+ */
+&sgx_module {
+ assigned-clocks = <&l3_gfx_clkctrl OMAP4_GPU_CLKCTRL 24>,
+ <&dpll_per_m7x2_ck>;
+ assigned-clock-rates = <0>, <153600000>;
+ assigned-clock-parents = <&dpll_per_m7x2_ck>;
+};
diff --git a/arch/arm/boot/dts/omap44xx-clocks.dtsi b/arch/arm/boot/dts/omap44xx-clocks.dtsi
index 279ff2f419df..c654588f9e8c 100644
--- a/arch/arm/boot/dts/omap44xx-clocks.dtsi
+++ b/arch/arm/boot/dts/omap44xx-clocks.dtsi
@@ -773,14 +773,6 @@
ti,max-div = <2>;
};
- sha2md5_fck: sha2md5_fck@15c8 {
- #clock-cells = <0>;
- compatible = "ti,gate-clock";
- clocks = <&l3_div_ck>;
- ti,bit-shift = <1>;
- reg = <0x15c8>;
- };
-
usb_phy_cm_clk32k: usb_phy_cm_clk32k@640 {
#clock-cells = <0>;
compatible = "ti,gate-clock";
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 3c0bafe0fb05..cf66c374de4f 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -26,6 +26,11 @@
i2c2 = &i2c3;
i2c3 = &i2c4;
i2c4 = &i2c5;
+ mmc0 = &mmc1;
+ mmc1 = &mmc2;
+ mmc2 = &mmc3;
+ mmc3 = &mmc4;
+ mmc4 = &mmc5;
serial0 = &uart1;
serial1 = &uart2;
serial2 = &uart3;
diff --git a/arch/arm/boot/dts/owl-s500.dtsi b/arch/arm/boot/dts/owl-s500.dtsi
index 43c9980a4260..75a76842c270 100644
--- a/arch/arm/boot/dts/owl-s500.dtsi
+++ b/arch/arm/boot/dts/owl-s500.dtsi
@@ -85,21 +85,21 @@
global_timer: timer@b0020200 {
compatible = "arm,cortex-a9-global-timer";
reg = <0xb0020200 0x100>;
- interrupts = <GIC_PPI 0 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
+ interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
status = "disabled";
};
twd_timer: timer@b0020600 {
compatible = "arm,cortex-a9-twd-timer";
reg = <0xb0020600 0x20>;
- interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
status = "disabled";
};
twd_wdt: wdt@b0020620 {
compatible = "arm,cortex-a9-twd-wdt";
reg = <0xb0020620 0xe0>;
- interrupts = <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
+ interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/picoxcell-pc3x2.dtsi b/arch/arm/boot/dts/picoxcell-pc3x2.dtsi
index a1266cf8776c..8362c6a3bcd3 100644
--- a/arch/arm/boot/dts/picoxcell-pc3x2.dtsi
+++ b/arch/arm/boot/dts/picoxcell-pc3x2.dtsi
@@ -54,18 +54,21 @@
emac: gem@30000 {
compatible = "cadence,gem";
reg = <0x30000 0x10000>;
+ interrupt-parent = <&vic0>;
interrupts = <31>;
};
dmac1: dmac@40000 {
compatible = "snps,dw-dmac";
reg = <0x40000 0x10000>;
+ interrupt-parent = <&vic0>;
interrupts = <25>;
};
dmac2: dmac@50000 {
compatible = "snps,dw-dmac";
reg = <0x50000 0x10000>;
+ interrupt-parent = <&vic0>;
interrupts = <26>;
};
@@ -243,6 +246,7 @@
axi2pico@c0000000 {
compatible = "picochip,axi2pico-pc3x2";
reg = <0xc0000000 0x10000>;
+ interrupt-parent = <&vic0>;
interrupts = <13 14 15 16 17 18 19 20 21>;
};
};
diff --git a/arch/arm/boot/dts/r8a73a4.dtsi b/arch/arm/boot/dts/r8a73a4.dtsi
index dd865f3c2eda..4447f45f0cba 100644
--- a/arch/arm/boot/dts/r8a73a4.dtsi
+++ b/arch/arm/boot/dts/r8a73a4.dtsi
@@ -131,7 +131,14 @@
cmt1: timer@e6130000 {
compatible = "renesas,r8a73a4-cmt1", "renesas,rcar-gen2-cmt1";
reg = <0 0xe6130000 0 0x1004>;
- interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A73A4_CLK_CMT1>;
clock-names = "fck";
power-domains = <&pd_c5>;
diff --git a/arch/arm/boot/dts/r8a7740.dtsi b/arch/arm/boot/dts/r8a7740.dtsi
index 383cba68dbba..e12ea97eae27 100644
--- a/arch/arm/boot/dts/r8a7740.dtsi
+++ b/arch/arm/boot/dts/r8a7740.dtsi
@@ -479,7 +479,7 @@
cpg_clocks: cpg_clocks@e6150000 {
compatible = "renesas,r8a7740-cpg-clocks";
reg = <0xe6150000 0x10000>;
- clocks = <&extal1_clk>, <&extalr_clk>;
+ clocks = <&extal1_clk>, <&extal2_clk>, <&extalr_clk>;
#clock-cells = <1>;
clock-output-names = "system", "pllc0", "pllc1",
"pllc2", "r",
diff --git a/arch/arm/boot/dts/r8a7793-gose.dts b/arch/arm/boot/dts/r8a7793-gose.dts
index 6b2f3a4fd13d..7802ce842a73 100644
--- a/arch/arm/boot/dts/r8a7793-gose.dts
+++ b/arch/arm/boot/dts/r8a7793-gose.dts
@@ -339,7 +339,7 @@
reg = <0x20>;
remote = <&vin1>;
- port {
+ ports {
#address-cells = <1>;
#size-cells = <0>;
@@ -399,7 +399,7 @@
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
default-input = <0>;
- port {
+ ports {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi
index d560fc4051c5..db612271371b 100644
--- a/arch/arm/boot/dts/rk3036.dtsi
+++ b/arch/arm/boot/dts/rk3036.dtsi
@@ -128,7 +128,7 @@
assigned-clocks = <&cru SCLK_GPU>;
assigned-clock-rates = <100000000>;
clocks = <&cru SCLK_GPU>, <&cru SCLK_GPU>;
- clock-names = "core", "bus";
+ clock-names = "bus", "core";
resets = <&cru SRST_GPU>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/rk3228-evb.dts b/arch/arm/boot/dts/rk3228-evb.dts
index 5670b33fd1bd..aed879db6c15 100644
--- a/arch/arm/boot/dts/rk3228-evb.dts
+++ b/arch/arm/boot/dts/rk3228-evb.dts
@@ -46,7 +46,7 @@
#address-cells = <1>;
#size-cells = <0>;
- phy: phy@0 {
+ phy: ethernet-phy@0 {
compatible = "ethernet-phy-id1234.d400", "ethernet-phy-ieee802.3-c22";
reg = <0>;
clocks = <&cru SCLK_MAC_PHY>;
diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi
index cd8f2a3b0e91..2aa74267ae51 100644
--- a/arch/arm/boot/dts/rk322x.dtsi
+++ b/arch/arm/boot/dts/rk322x.dtsi
@@ -539,7 +539,7 @@
"pp1",
"ppmmu1";
clocks = <&cru ACLK_GPU>, <&cru ACLK_GPU>;
- clock-names = "core", "bus";
+ clock-names = "bus", "core";
resets = <&cru SRST_GPU_A>;
status = "disabled";
};
@@ -944,7 +944,7 @@
};
};
- spi-0 {
+ spi0 {
spi0_clk: spi0-clk {
rockchip,pins = <0 9 RK_FUNC_2 &pcfg_pull_up>;
};
@@ -962,7 +962,7 @@
};
};
- spi-1 {
+ spi1 {
spi1_clk: spi1-clk {
rockchip,pins = <0 23 RK_FUNC_2 &pcfg_pull_up>;
};
diff --git a/arch/arm/boot/dts/rk3xxx.dtsi b/arch/arm/boot/dts/rk3xxx.dtsi
index d752dc611fd7..86a0d98d28ff 100644
--- a/arch/arm/boot/dts/rk3xxx.dtsi
+++ b/arch/arm/boot/dts/rk3xxx.dtsi
@@ -84,7 +84,7 @@
compatible = "arm,mali-400";
reg = <0x10090000 0x10000>;
clocks = <&cru ACLK_GPU>, <&cru ACLK_GPU>;
- clock-names = "core", "bus";
+ clock-names = "bus", "core";
assigned-clocks = <&cru ACLK_GPU>;
assigned-clock-rates = <100000000>;
resets = <&cru SRST_GPU>;
diff --git a/arch/arm/boot/dts/s5pv210-aries.dtsi b/arch/arm/boot/dts/s5pv210-aries.dtsi
index 575094ea7024..7b62c381d3c8 100644
--- a/arch/arm/boot/dts/s5pv210-aries.dtsi
+++ b/arch/arm/boot/dts/s5pv210-aries.dtsi
@@ -374,6 +374,7 @@
pinctrl-names = "default";
cap-sd-highspeed;
cap-mmc-highspeed;
+ keep-power-in-suspend;
mmc-pwrseq = <&wifi_pwrseq>;
non-removable;
diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi
index 67358562a6ea..020a864623ff 100644
--- a/arch/arm/boot/dts/s5pv210.dtsi
+++ b/arch/arm/boot/dts/s5pv210.dtsi
@@ -98,19 +98,16 @@
};
clocks: clock-controller@e0100000 {
- compatible = "samsung,s5pv210-clock", "simple-bus";
+ compatible = "samsung,s5pv210-clock";
reg = <0xe0100000 0x10000>;
clock-names = "xxti", "xusbxti";
clocks = <&xxti>, <&xusbxti>;
#clock-cells = <1>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
+ };
- pmu_syscon: syscon@e0108000 {
- compatible = "samsung-s5pv210-pmu", "syscon";
- reg = <0xe0108000 0x8000>;
- };
+ pmu_syscon: syscon@e0108000 {
+ compatible = "samsung-s5pv210-pmu", "syscon";
+ reg = <0xe0108000 0x8000>;
};
pinctrl0: pinctrl@e0200000 {
@@ -126,35 +123,28 @@
};
};
- amba {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "simple-bus";
- ranges;
-
- pdma0: dma@e0900000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0xe0900000 0x1000>;
- interrupt-parent = <&vic0>;
- interrupts = <19>;
- clocks = <&clocks CLK_PDMA0>;
- clock-names = "apb_pclk";
- #dma-cells = <1>;
- #dma-channels = <8>;
- #dma-requests = <32>;
- };
+ pdma0: dma@e0900000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0xe0900000 0x1000>;
+ interrupt-parent = <&vic0>;
+ interrupts = <19>;
+ clocks = <&clocks CLK_PDMA0>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <32>;
+ };
- pdma1: dma@e0a00000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0xe0a00000 0x1000>;
- interrupt-parent = <&vic0>;
- interrupts = <20>;
- clocks = <&clocks CLK_PDMA1>;
- clock-names = "apb_pclk";
- #dma-cells = <1>;
- #dma-channels = <8>;
- #dma-requests = <32>;
- };
+ pdma1: dma@e0a00000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0xe0a00000 0x1000>;
+ interrupt-parent = <&vic0>;
+ interrupts = <20>;
+ clocks = <&clocks CLK_PDMA1>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <32>;
};
spi0: spi@e1300000 {
@@ -227,43 +217,36 @@
status = "disabled";
};
- audio-subsystem {
- compatible = "samsung,s5pv210-audss", "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- clk_audss: clock-controller@eee10000 {
- compatible = "samsung,s5pv210-audss-clock";
- reg = <0xeee10000 0x1000>;
- clock-names = "hclk", "xxti",
- "fout_epll",
- "sclk_audio0";
- clocks = <&clocks DOUT_HCLKP>, <&xxti>,
- <&clocks FOUT_EPLL>,
- <&clocks SCLK_AUDIO0>;
- #clock-cells = <1>;
- };
+ clk_audss: clock-controller@eee10000 {
+ compatible = "samsung,s5pv210-audss-clock";
+ reg = <0xeee10000 0x1000>;
+ clock-names = "hclk", "xxti",
+ "fout_epll",
+ "sclk_audio0";
+ clocks = <&clocks DOUT_HCLKP>, <&xxti>,
+ <&clocks FOUT_EPLL>,
+ <&clocks SCLK_AUDIO0>;
+ #clock-cells = <1>;
+ };
- i2s0: i2s@eee30000 {
- compatible = "samsung,s5pv210-i2s";
- reg = <0xeee30000 0x1000>;
- interrupt-parent = <&vic2>;
- interrupts = <16>;
- dma-names = "rx", "tx", "tx-sec";
- dmas = <&pdma1 9>, <&pdma1 10>, <&pdma1 11>;
- clock-names = "iis",
- "i2s_opclk0",
- "i2s_opclk1";
- clocks = <&clk_audss CLK_I2S>,
- <&clk_audss CLK_I2S>,
- <&clk_audss CLK_DOUT_AUD_BUS>;
- samsung,idma-addr = <0xc0010000>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2s0_bus>;
- #sound-dai-cells = <0>;
- status = "disabled";
- };
+ i2s0: i2s@eee30000 {
+ compatible = "samsung,s5pv210-i2s";
+ reg = <0xeee30000 0x1000>;
+ interrupt-parent = <&vic2>;
+ interrupts = <16>;
+ dma-names = "rx", "tx", "tx-sec";
+ dmas = <&pdma1 9>, <&pdma1 10>, <&pdma1 11>;
+ clock-names = "iis",
+ "i2s_opclk0",
+ "i2s_opclk1";
+ clocks = <&clk_audss CLK_I2S>,
+ <&clk_audss CLK_I2S>,
+ <&clk_audss CLK_DOUT_AUD_BUS>;
+ samsung,idma-addr = <0xc0010000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s0_bus>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
};
i2s1: i2s@e2100000 {
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index b405992eb601..d856c16d0015 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -1247,6 +1247,7 @@
clocks = <&securam_clk>;
#address-cells = <1>;
#size-cells = <1>;
+ no-memory-wc;
ranges = <0 0xf8044000 0x1420>;
};
@@ -1297,7 +1298,7 @@
can0: can@f8054000 {
compatible = "bosch,m_can";
- reg = <0xf8054000 0x4000>, <0x210000 0x4000>;
+ reg = <0xf8054000 0x4000>, <0x210000 0x1c00>;
reg-names = "m_can", "message_ram";
interrupts = <56 IRQ_TYPE_LEVEL_HIGH 7>,
<64 IRQ_TYPE_LEVEL_HIGH 7>;
@@ -1490,7 +1491,7 @@
can1: can@fc050000 {
compatible = "bosch,m_can";
- reg = <0xfc050000 0x4000>, <0x210000 0x4000>;
+ reg = <0xfc050000 0x4000>, <0x210000 0x3800>;
reg-names = "m_can", "message_ram";
interrupts = <57 IRQ_TYPE_LEVEL_HIGH 7>,
<65 IRQ_TYPE_LEVEL_HIGH 7>;
@@ -1500,7 +1501,7 @@
assigned-clocks = <&can1_gclk>;
assigned-clock-parents = <&utmi>;
assigned-clock-rates = <40000000>;
- bosch,mram-cfg = <0x1100 0 0 64 0 0 32 32>;
+ bosch,mram-cfg = <0x1c00 0 0 64 0 0 32 32>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index b38f8c240558..602511ccad6f 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -710,7 +710,7 @@
};
};
- L2: l2-cache@fffef000 {
+ L2: cache-controller@fffef000 {
compatible = "arm,pl310-cache";
reg = <0xfffef000 0x1000>;
interrupts = <0 38 0x04>;
diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi
index b4dd3846e8cc..4b1c8bec2de3 100644
--- a/arch/arm/boot/dts/socfpga_arria10.dtsi
+++ b/arch/arm/boot/dts/socfpga_arria10.dtsi
@@ -618,7 +618,7 @@
reg = <0xffcfb100 0x80>;
};
- L2: l2-cache@fffff000 {
+ L2: cache-controller@fffff000 {
compatible = "arm,pl310-cache";
reg = <0xfffff000 0x1000>;
interrupts = <0 18 IRQ_TYPE_LEVEL_HIGH>;
@@ -791,7 +791,7 @@
timer3: timer3@ffd00100 {
compatible = "snps,dw-apb-timer";
interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0xffd01000 0x100>;
+ reg = <0xffd00100 0x100>;
clocks = <&l4_sys_free_clk>;
clock-names = "timer";
};
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 5d46bb0139fa..707ad5074878 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -143,7 +143,7 @@
trips {
cpu_alert0: cpu-alert0 {
/* milliCelsius */
- temperature = <850000>;
+ temperature = <85000>;
hysteresis = <2000>;
type = "passive";
};
diff --git a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
index f250b20af493..9be1c4a3d95f 100644
--- a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
+++ b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
@@ -131,7 +131,7 @@
pinctrl-0 = <&emac_rgmii_pins>;
phy-supply = <&reg_sw>;
phy-handle = <&rgmii_phy>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
allwinner,rx-delay-ps = <700>;
allwinner,tx-delay-ps = <700>;
status = "okay";
diff --git a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts
index 7e74ba83f809..75396993195d 100644
--- a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts
+++ b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts
@@ -168,7 +168,7 @@
pinctrl-0 = <&emac_rgmii_pins>;
phy-supply = <&reg_dldo4>;
phy-handle = <&rgmii_phy>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
index 49547a43cc90..54cbdaf7ffdc 100644
--- a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
+++ b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
@@ -318,8 +318,8 @@
};
&reg_dldo3 {
- regulator-min-microvolt = <2800000>;
- regulator-max-microvolt = <2800000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
regulator-name = "vdd-csi";
};
diff --git a/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts b/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts
index 1db2541135a7..00e0d6940c30 100644
--- a/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts
+++ b/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts
@@ -32,7 +32,7 @@
pwr_led {
label = "bananapi-m2-zero:red:pwr";
- gpios = <&r_pio 0 10 GPIO_ACTIVE_HIGH>; /* PL10 */
+ gpios = <&r_pio 0 10 GPIO_ACTIVE_LOW>; /* PL10 */
default-state = "on";
};
};
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts
index 71fb73208939..babf4cf1b2f6 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts
@@ -53,11 +53,6 @@
};
};
-&emac {
- /* LEDs changed to active high on the plus */
- /delete-property/ allwinner,leds-active-low;
-};
-
&mmc1 {
vmmc-supply = <&reg_vcc3v3>;
bus-width = <4>;
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts
index 6dbf7b2e0c13..b6ca45d18e51 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts
@@ -67,7 +67,7 @@
pinctrl-0 = <&emac_rgmii_pins>;
phy-supply = <&reg_gmac_3v3>;
phy-handle = <&ext_rgmii_phy>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts b/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts
index c39b9169ea64..5e5223a48ac7 100644
--- a/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts
+++ b/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts
@@ -121,7 +121,7 @@
pinctrl-names = "default";
pinctrl-0 = <&gmac_rgmii_pins>;
phy-handle = <&phy1>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-supply = <&reg_dc1sw>;
status = "okay";
};
@@ -206,16 +206,16 @@
};
&reg_dc1sw {
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
regulator-name = "vcc-gmac-phy";
};
&reg_dcdc1 {
regulator-always-on;
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
- regulator-name = "vcc-3v0";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-3v3";
};
&reg_dcdc2 {
diff --git a/arch/arm/boot/dts/sun8i-v3s.dtsi b/arch/arm/boot/dts/sun8i-v3s.dtsi
index 92fcb756a08a..97cac6d63692 100644
--- a/arch/arm/boot/dts/sun8i-v3s.dtsi
+++ b/arch/arm/boot/dts/sun8i-v3s.dtsi
@@ -419,7 +419,7 @@
gic: interrupt-controller@1c81000 {
compatible = "arm,cortex-a7-gic", "arm,cortex-a15-gic";
reg = <0x01c81000 0x1000>,
- <0x01c82000 0x1000>,
+ <0x01c82000 0x2000>,
<0x01c84000 0x2000>,
<0x01c86000 0x2000>;
interrupt-controller;
diff --git a/arch/arm/boot/dts/uniphier-pxs2.dtsi b/arch/arm/boot/dts/uniphier-pxs2.dtsi
index e2d1a22c5950..d8a32104aad0 100644
--- a/arch/arm/boot/dts/uniphier-pxs2.dtsi
+++ b/arch/arm/boot/dts/uniphier-pxs2.dtsi
@@ -513,7 +513,7 @@
clocks = <&sys_clk 6>;
reset-names = "ether";
resets = <&sys_rst 6>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
local-mac-address = [00 00 00 00 00 00];
socionext,syscon-phy-mode = <&soc_glue 0>;
diff --git a/arch/arm/boot/dts/vfxxx.dtsi b/arch/arm/boot/dts/vfxxx.dtsi
index d392794d9c13..de81e8b4afde 100644
--- a/arch/arm/boot/dts/vfxxx.dtsi
+++ b/arch/arm/boot/dts/vfxxx.dtsi
@@ -532,7 +532,7 @@
};
ocotp: ocotp@400a5000 {
- compatible = "fsl,vf610-ocotp";
+ compatible = "fsl,vf610-ocotp", "syscon";
reg = <0x400a5000 0x1000>;
clocks = <&clks VF610_CLK_OCOTP>;
};
diff --git a/arch/arm/configs/rpc_defconfig b/arch/arm/configs/rpc_defconfig
index 3b82b64950d9..c090643b1ecb 100644
--- a/arch/arm/configs/rpc_defconfig
+++ b/arch/arm/configs/rpc_defconfig
@@ -32,7 +32,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
index bd71d5bf98c9..9c69a9020e77 100644
--- a/arch/arm/configs/s3c2410_defconfig
+++ b/arch/arm/configs/s3c2410_defconfig
@@ -206,7 +206,6 @@ CONFIG_EEPROM_AT24=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=m
CONFIG_SCSI_CONSTANTS=y
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 88286dd483ff..1935b580f0e8 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -21,11 +21,11 @@
#endif
#include <asm/ptrace.h>
-#include <asm/domain.h>
#include <asm/opcodes-virt.h>
#include <asm/asm-offsets.h>
#include <asm/page.h>
#include <asm/thread_info.h>
+#include <asm/uaccess-asm.h>
#define IOMEM(x) (x)
@@ -374,9 +374,9 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
.macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
9999:
.if \inc == 1
- \instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
+ \instr\()b\t\cond\().w \reg, [\ptr, #\off]
.elseif \inc == 4
- \instr\cond\()\t\().w \reg, [\ptr, #\off]
+ \instr\t\cond\().w \reg, [\ptr, #\off]
.else
.error "Unsupported inc macro argument"
.endif
@@ -415,9 +415,9 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
.rept \rept
9999:
.if \inc == 1
- \instr\cond\()b\()\t \reg, [\ptr], #\inc
+ \instr\()b\t\cond \reg, [\ptr], #\inc
.elseif \inc == 4
- \instr\cond\()\t \reg, [\ptr], #\inc
+ \instr\t\cond \reg, [\ptr], #\inc
.else
.error "Unsupported inc macro argument"
.endif
@@ -447,79 +447,6 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
.size \name , . - \name
.endm
- .macro csdb
-#ifdef CONFIG_THUMB2_KERNEL
- .inst.w 0xf3af8014
-#else
- .inst 0xe320f014
-#endif
- .endm
-
- .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
-#ifndef CONFIG_CPU_USE_DOMAINS
- adds \tmp, \addr, #\size - 1
- sbcccs \tmp, \tmp, \limit
- bcs \bad
-#ifdef CONFIG_CPU_SPECTRE
- movcs \addr, #0
- csdb
-#endif
-#endif
- .endm
-
- .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
-#ifdef CONFIG_CPU_SPECTRE
- sub \tmp, \limit, #1
- subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
- addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
- subhss \tmp, \tmp, \size @ tmp = limit - (addr + size) }
- movlo \addr, #0 @ if (tmp < 0) addr = NULL
- csdb
-#endif
- .endm
-
- .macro uaccess_disable, tmp, isb=1
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
- /*
- * Whenever we re-enter userspace, the domains should always be
- * set appropriately.
- */
- mov \tmp, #DACR_UACCESS_DISABLE
- mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
- .if \isb
- instr_sync
- .endif
-#endif
- .endm
-
- .macro uaccess_enable, tmp, isb=1
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
- /*
- * Whenever we re-enter userspace, the domains should always be
- * set appropriately.
- */
- mov \tmp, #DACR_UACCESS_ENABLE
- mcr p15, 0, \tmp, c3, c0, 0
- .if \isb
- instr_sync
- .endif
-#endif
- .endm
-
- .macro uaccess_save, tmp
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
- mrc p15, 0, \tmp, c3, c0, 0
- str \tmp, [sp, #SVC_DACR]
-#endif
- .endm
-
- .macro uaccess_restore
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
- ldr r0, [sp, #SVC_DACR]
- mcr p15, 0, r0, c3, c0, 0
-#endif
- .endm
-
.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
.macro ret\c, reg
#if __LINUX_ARM_ARCH__ < 6
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index ffebe7b7a5b7..91ca80035fc4 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -163,8 +163,13 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
preempt_enable();
#endif
- if (!ret)
- *oval = oldval;
+ /*
+ * Store unconditionally. If ret != 0 the extra store is the least
+ * of the worries but GCC cannot figure out that __futex_atomic_op()
+ * is either setting ret to -EFAULT or storing the old value in
+ * oldval which results in a uninitialized warning at the call site.
+ */
+ *oval = oldval;
return ret;
}
diff --git a/arch/arm/include/asm/kexec-internal.h b/arch/arm/include/asm/kexec-internal.h
new file mode 100644
index 000000000000..ecc2322db7aa
--- /dev/null
+++ b/arch/arm/include/asm/kexec-internal.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ARM_KEXEC_INTERNAL_H
+#define _ARM_KEXEC_INTERNAL_H
+
+struct kexec_relocate_data {
+ unsigned long kexec_start_address;
+ unsigned long kexec_indirection_page;
+ unsigned long kexec_mach_type;
+ unsigned long kexec_r2;
+};
+
+#endif
diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h
index 82290f212d8e..e1eb662e0f9e 100644
--- a/arch/arm/include/asm/kprobes.h
+++ b/arch/arm/include/asm/kprobes.h
@@ -52,20 +52,20 @@ int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
/* optinsn template addresses */
-extern __visible kprobe_opcode_t optprobe_template_entry;
-extern __visible kprobe_opcode_t optprobe_template_val;
-extern __visible kprobe_opcode_t optprobe_template_call;
-extern __visible kprobe_opcode_t optprobe_template_end;
-extern __visible kprobe_opcode_t optprobe_template_sub_sp;
-extern __visible kprobe_opcode_t optprobe_template_add_sp;
-extern __visible kprobe_opcode_t optprobe_template_restore_begin;
-extern __visible kprobe_opcode_t optprobe_template_restore_orig_insn;
-extern __visible kprobe_opcode_t optprobe_template_restore_end;
+extern __visible kprobe_opcode_t optprobe_template_entry[];
+extern __visible kprobe_opcode_t optprobe_template_val[];
+extern __visible kprobe_opcode_t optprobe_template_call[];
+extern __visible kprobe_opcode_t optprobe_template_end[];
+extern __visible kprobe_opcode_t optprobe_template_sub_sp[];
+extern __visible kprobe_opcode_t optprobe_template_add_sp[];
+extern __visible kprobe_opcode_t optprobe_template_restore_begin[];
+extern __visible kprobe_opcode_t optprobe_template_restore_orig_insn[];
+extern __visible kprobe_opcode_t optprobe_template_restore_end[];
#define MAX_OPTIMIZED_LENGTH 4
#define MAX_OPTINSN_SIZE \
- ((unsigned long)&optprobe_template_end - \
- (unsigned long)&optprobe_template_entry)
+ ((unsigned long)optprobe_template_end - \
+ (unsigned long)optprobe_template_entry)
#define RELATIVEJUMP_SIZE 4
struct arch_optimized_insn {
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 7d2ca035d6c8..11d4ff9f3e4d 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -216,7 +216,7 @@ static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
}
-static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
+static inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
}
@@ -248,16 +248,21 @@ static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
}
-static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
+static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
}
-static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
+static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
{
return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
}
+static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
+}
+
static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_FSC;
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index d0d0227fc70d..ae073fceb3f0 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -234,7 +234,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end);
+ unsigned long start, unsigned long end, bool blockable);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
@@ -303,6 +303,7 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_init_debug(void) {}
+static inline void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
@@ -364,4 +365,6 @@ static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {}
struct kvm *kvm_arch_alloc_vm(void);
void kvm_arch_free_vm(struct kvm *kvm);
+#define kvm_arm_vcpu_loaded(vcpu) (false)
+
#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h
index a89b4076cde4..72821b4721ad 100644
--- a/arch/arm/include/asm/percpu.h
+++ b/arch/arm/include/asm/percpu.h
@@ -16,6 +16,8 @@
#ifndef _ASM_ARM_PERCPU_H_
#define _ASM_ARM_PERCPU_H_
+#include <asm/thread_info.h>
+
/*
* Same as asm-generic/percpu.h, except that we store the per cpu offset
* in the TPIDRPRW. TPIDRPRW only exists on V6K and V7
diff --git a/arch/arm/include/asm/uaccess-asm.h b/arch/arm/include/asm/uaccess-asm.h
new file mode 100644
index 000000000000..907571fd05c6
--- /dev/null
+++ b/arch/arm/include/asm/uaccess-asm.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_UACCESS_ASM_H__
+#define __ASM_UACCESS_ASM_H__
+
+#include <asm/asm-offsets.h>
+#include <asm/domain.h>
+#include <asm/memory.h>
+#include <asm/thread_info.h>
+
+ .macro csdb
+#ifdef CONFIG_THUMB2_KERNEL
+ .inst.w 0xf3af8014
+#else
+ .inst 0xe320f014
+#endif
+ .endm
+
+ .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
+#ifndef CONFIG_CPU_USE_DOMAINS
+ adds \tmp, \addr, #\size - 1
+ sbcscc \tmp, \tmp, \limit
+ bcs \bad
+#ifdef CONFIG_CPU_SPECTRE
+ movcs \addr, #0
+ csdb
+#endif
+#endif
+ .endm
+
+ .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
+#ifdef CONFIG_CPU_SPECTRE
+ sub \tmp, \limit, #1
+ subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
+ addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
+ subshs \tmp, \tmp, \size @ tmp = limit - (addr + size) }
+ movlo \addr, #0 @ if (tmp < 0) addr = NULL
+ csdb
+#endif
+ .endm
+
+ .macro uaccess_disable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ /*
+ * Whenever we re-enter userspace, the domains should always be
+ * set appropriately.
+ */
+ mov \tmp, #DACR_UACCESS_DISABLE
+ mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
+ .if \isb
+ instr_sync
+ .endif
+#endif
+ .endm
+
+ .macro uaccess_enable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ /*
+ * Whenever we re-enter userspace, the domains should always be
+ * set appropriately.
+ */
+ mov \tmp, #DACR_UACCESS_ENABLE
+ mcr p15, 0, \tmp, c3, c0, 0
+ .if \isb
+ instr_sync
+ .endif
+#endif
+ .endm
+
+#if defined(CONFIG_CPU_SW_DOMAIN_PAN) || defined(CONFIG_CPU_USE_DOMAINS)
+#define DACR(x...) x
+#else
+#define DACR(x...)
+#endif
+
+ /*
+ * Save the address limit on entry to a privileged exception.
+ *
+ * If we are using the DACR for kernel access by the user accessors
+ * (CONFIG_CPU_USE_DOMAINS=y), always reset the DACR kernel domain
+ * back to client mode, whether or not \disable is set.
+ *
+ * If we are using SW PAN, set the DACR user domain to no access
+ * if \disable is set.
+ */
+ .macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable
+ ldr \tmp1, [\tsk, #TI_ADDR_LIMIT]
+ mov \tmp2, #TASK_SIZE
+ str \tmp2, [\tsk, #TI_ADDR_LIMIT]
+ DACR( mrc p15, 0, \tmp0, c3, c0, 0)
+ DACR( str \tmp0, [sp, #SVC_DACR])
+ str \tmp1, [sp, #SVC_ADDR_LIMIT]
+ .if \disable && IS_ENABLED(CONFIG_CPU_SW_DOMAIN_PAN)
+ /* kernel=client, user=no access */
+ mov \tmp2, #DACR_UACCESS_DISABLE
+ mcr p15, 0, \tmp2, c3, c0, 0
+ instr_sync
+ .elseif IS_ENABLED(CONFIG_CPU_USE_DOMAINS)
+ /* kernel=client */
+ bic \tmp2, \tmp0, #domain_mask(DOMAIN_KERNEL)
+ orr \tmp2, \tmp2, #domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT)
+ mcr p15, 0, \tmp2, c3, c0, 0
+ instr_sync
+ .endif
+ .endm
+
+ /* Restore the user access state previously saved by uaccess_entry */
+ .macro uaccess_exit, tsk, tmp0, tmp1
+ ldr \tmp1, [sp, #SVC_ADDR_LIMIT]
+ DACR( ldr \tmp0, [sp, #SVC_DACR])
+ str \tmp1, [\tsk, #TI_ADDR_LIMIT]
+ DACR( mcr p15, 0, \tmp0, c3, c0, 0)
+ .endm
+
+#undef DACR
+
+#endif /* __ASM_UACCESS_ASM_H__ */
diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h
index ef5dfedacd8d..628c336e8e3b 100644
--- a/arch/arm/include/asm/vfpmacros.h
+++ b/arch/arm/include/asm/vfpmacros.h
@@ -29,13 +29,13 @@
ldr \tmp, =elf_hwcap @ may not have MVFR regs
ldr \tmp, [\tmp, #0]
tst \tmp, #HWCAP_VFPD32
- ldcnel p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
+ ldclne p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
addeq \base, \base, #32*4 @ step over unused register space
#else
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
cmp \tmp, #2 @ 32 x 64bit registers?
- ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
+ ldcleq p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
addne \base, \base, #32*4 @ step over unused register space
#endif
#endif
@@ -53,13 +53,13 @@
ldr \tmp, =elf_hwcap @ may not have MVFR regs
ldr \tmp, [\tmp, #0]
tst \tmp, #HWCAP_VFPD32
- stcnel p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
+ stclne p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
addeq \base, \base, #32*4 @ step over unused register space
#else
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
cmp \tmp, #2 @ 32 x 64bit registers?
- stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
+ stcleq p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
addne \base, \base, #32*4 @ step over unused register space
#endif
#endif
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 3968d6c22455..40afe953a0e2 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -18,6 +18,7 @@
#include <linux/kvm_host.h>
#endif
#include <asm/cacheflush.h>
+#include <asm/kexec-internal.h>
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
#include <asm/mach/arch.h>
@@ -29,6 +30,7 @@
#include <asm/vdso_datapage.h>
#include <asm/hardware/cache-l2x0.h>
#include <linux/kbuild.h>
+#include <linux/arm-smccc.h>
#include "signal.h"
/*
@@ -158,6 +160,8 @@ int main(void)
DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys));
DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash));
#endif
+ DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id));
+ DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state));
BLANK();
DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
@@ -189,5 +193,9 @@ int main(void)
DEFINE(MPU_RGN_PRBAR, offsetof(struct mpu_rgn, prbar));
DEFINE(MPU_RGN_PRLAR, offsetof(struct mpu_rgn, prlar));
#endif
+ DEFINE(KEXEC_START_ADDR, offsetof(struct kexec_relocate_data, kexec_start_address));
+ DEFINE(KEXEC_INDIR_PAGE, offsetof(struct kexec_relocate_data, kexec_indirection_page));
+ DEFINE(KEXEC_MACH_TYPE, offsetof(struct kexec_relocate_data, kexec_mach_type));
+ DEFINE(KEXEC_R2, offsetof(struct kexec_relocate_data, kexec_r2));
return 0;
}
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index e85a3af9ddeb..89e551eebff1 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -30,6 +30,7 @@
#include <asm/unistd.h>
#include <asm/tls.h>
#include <asm/system_info.h>
+#include <asm/uaccess-asm.h>
#include "entry-header.S"
#include <asm/entry-macro-multi.S>
@@ -182,15 +183,7 @@ ENDPROC(__und_invalid)
stmia r7, {r2 - r6}
get_thread_info tsk
- ldr r0, [tsk, #TI_ADDR_LIMIT]
- mov r1, #TASK_SIZE
- str r1, [tsk, #TI_ADDR_LIMIT]
- str r0, [sp, #SVC_ADDR_LIMIT]
-
- uaccess_save r0
- .if \uaccess
- uaccess_disable r0
- .endif
+ uaccess_entry tsk, r0, r1, r2, \uaccess
.if \trace
#ifdef CONFIG_TRACE_IRQFLAGS
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 62db1c9746cb..7b595f2d4a28 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -6,6 +6,7 @@
#include <asm/asm-offsets.h>
#include <asm/errno.h>
#include <asm/thread_info.h>
+#include <asm/uaccess-asm.h>
#include <asm/v7m.h>
@ Bad Abort numbers
@@ -217,9 +218,7 @@
blne trace_hardirqs_off
#endif
.endif
- ldr r1, [sp, #SVC_ADDR_LIMIT]
- uaccess_restore
- str r1, [tsk, #TI_ADDR_LIMIT]
+ uaccess_exit tsk, r0, r1
#ifndef CONFIG_THUMB2_KERNEL
@ ARM mode SVC restore
@@ -263,9 +262,7 @@
@ on the stack remains correct).
@
.macro svc_exit_via_fiq
- ldr r1, [sp, #SVC_ADDR_LIMIT]
- uaccess_restore
- str r1, [tsk, #TI_ADDR_LIMIT]
+ uaccess_exit tsk, r0, r1
#ifndef CONFIG_THUMB2_KERNEL
@ ARM mode restore
mov r0, sp
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 6b1148cafffd..90add5ded3f1 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -675,11 +675,7 @@ ARM_BE8(rev16 ip, ip)
bcc 1b
bx lr
#else
-#ifdef CONFIG_CPU_ENDIAN_BE8
- moveq r0, #0x00004000 @ set bit 22, mov to mvn instruction
-#else
moveq r0, #0x400000 @ set bit 22, mov to mvn instruction
-#endif
b 2f
1: ldr ip, [r7, r3]
#ifdef CONFIG_CPU_ENDIAN_BE8
@@ -688,7 +684,7 @@ ARM_BE8(rev16 ip, ip)
tst ip, #0x000f0000 @ check the rotation field
orrne ip, ip, r6, lsl #24 @ mask in offset bits 31-24
biceq ip, ip, #0x00004000 @ clear bit 22
- orreq ip, ip, r0 @ mask in offset bits 7-0
+ orreq ip, ip, r0, ror #8 @ mask in offset bits 7-0
#else
bic ip, ip, #0x000000ff
tst ip, #0xf00 @ check the rotation field
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 1d5fbf1d1c67..2ee5b7f5e7ad 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -688,26 +688,68 @@ static void disable_single_step(struct perf_event *bp)
arch_install_hw_breakpoint(bp);
}
+/*
+ * Arm32 hardware does not always report a watchpoint hit address that matches
+ * one of the watchpoints set. It can also report an address "near" the
+ * watchpoint if a single instruction access both watched and unwatched
+ * addresses. There is no straight-forward way, short of disassembling the
+ * offending instruction, to map that address back to the watchpoint. This
+ * function computes the distance of the memory access from the watchpoint as a
+ * heuristic for the likelyhood that a given access triggered the watchpoint.
+ *
+ * See this same function in the arm64 platform code, which has the same
+ * problem.
+ *
+ * The function returns the distance of the address from the bytes watched by
+ * the watchpoint. In case of an exact match, it returns 0.
+ */
+static u32 get_distance_from_watchpoint(unsigned long addr, u32 val,
+ struct arch_hw_breakpoint_ctrl *ctrl)
+{
+ u32 wp_low, wp_high;
+ u32 lens, lene;
+
+ lens = __ffs(ctrl->len);
+ lene = __fls(ctrl->len);
+
+ wp_low = val + lens;
+ wp_high = val + lene;
+ if (addr < wp_low)
+ return wp_low - addr;
+ else if (addr > wp_high)
+ return addr - wp_high;
+ else
+ return 0;
+}
+
+static int watchpoint_fault_on_uaccess(struct pt_regs *regs,
+ struct arch_hw_breakpoint *info)
+{
+ return !user_mode(regs) && info->ctrl.privilege == ARM_BREAKPOINT_USER;
+}
+
static void watchpoint_handler(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
{
- int i, access;
- u32 val, ctrl_reg, alignment_mask;
+ int i, access, closest_match = 0;
+ u32 min_dist = -1, dist;
+ u32 val, ctrl_reg;
struct perf_event *wp, **slots;
struct arch_hw_breakpoint *info;
struct arch_hw_breakpoint_ctrl ctrl;
slots = this_cpu_ptr(wp_on_reg);
+ /*
+ * Find all watchpoints that match the reported address. If no exact
+ * match is found. Attribute the hit to the closest watchpoint.
+ */
+ rcu_read_lock();
for (i = 0; i < core_num_wrps; ++i) {
- rcu_read_lock();
-
wp = slots[i];
-
if (wp == NULL)
- goto unlock;
+ continue;
- info = counter_arch_bp(wp);
/*
* The DFAR is an unknown value on debug architectures prior
* to 7.1. Since we only allow a single watchpoint on these
@@ -716,50 +758,69 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
*/
if (debug_arch < ARM_DEBUG_ARCH_V7_1) {
BUG_ON(i > 0);
+ info = counter_arch_bp(wp);
info->trigger = wp->attr.bp_addr;
} else {
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
- alignment_mask = 0x7;
- else
- alignment_mask = 0x3;
-
- /* Check if the watchpoint value matches. */
- val = read_wb_reg(ARM_BASE_WVR + i);
- if (val != (addr & ~alignment_mask))
- goto unlock;
-
- /* Possible match, check the byte address select. */
- ctrl_reg = read_wb_reg(ARM_BASE_WCR + i);
- decode_ctrl_reg(ctrl_reg, &ctrl);
- if (!((1 << (addr & alignment_mask)) & ctrl.len))
- goto unlock;
-
/* Check that the access type matches. */
if (debug_exception_updates_fsr()) {
access = (fsr & ARM_FSR_ACCESS_MASK) ?
HW_BREAKPOINT_W : HW_BREAKPOINT_R;
if (!(access & hw_breakpoint_type(wp)))
- goto unlock;
+ continue;
}
+ val = read_wb_reg(ARM_BASE_WVR + i);
+ ctrl_reg = read_wb_reg(ARM_BASE_WCR + i);
+ decode_ctrl_reg(ctrl_reg, &ctrl);
+ dist = get_distance_from_watchpoint(addr, val, &ctrl);
+ if (dist < min_dist) {
+ min_dist = dist;
+ closest_match = i;
+ }
+ /* Is this an exact match? */
+ if (dist != 0)
+ continue;
+
/* We have a winner. */
+ info = counter_arch_bp(wp);
info->trigger = addr;
}
pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
+
+ /*
+ * If we triggered a user watchpoint from a uaccess routine,
+ * then handle the stepping ourselves since userspace really
+ * can't help us with this.
+ */
+ if (watchpoint_fault_on_uaccess(regs, info))
+ goto step;
+
perf_bp_event(wp, regs);
/*
- * If no overflow handler is present, insert a temporary
- * mismatch breakpoint so we can single-step over the
- * watchpoint trigger.
+ * Defer stepping to the overflow handler if one is installed.
+ * Otherwise, insert a temporary mismatch breakpoint so that
+ * we can single-step over the watchpoint trigger.
*/
+ if (!is_default_overflow_handler(wp))
+ continue;
+step:
+ enable_single_step(wp, instruction_pointer(regs));
+ }
+
+ if (min_dist > 0 && min_dist != -1) {
+ /* No exact match found. */
+ wp = slots[closest_match];
+ info = counter_arch_bp(wp);
+ info->trigger = addr;
+ pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
+ perf_bp_event(wp, regs);
if (is_default_overflow_handler(wp))
enable_single_step(wp, instruction_pointer(regs));
-
-unlock:
- rcu_read_unlock();
}
+
+ rcu_read_unlock();
}
static void watchpoint_single_step_handler(unsigned long pc)
@@ -830,7 +891,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
info->trigger = addr;
pr_debug("breakpoint fired: address = 0x%x\n", addr);
perf_bp_event(bp, regs);
- if (!bp->overflow_handler)
+ if (is_default_overflow_handler(bp))
enable_single_step(bp, addr);
goto unlock;
}
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 76300f3813e8..734adeb42df8 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -15,6 +15,7 @@
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
+#include <asm/kexec-internal.h>
#include <asm/fncpy.h>
#include <asm/mach-types.h>
#include <asm/smp_plat.h>
@@ -24,11 +25,6 @@
extern void relocate_new_kernel(void);
extern const unsigned int relocate_new_kernel_size;
-extern unsigned long kexec_start_address;
-extern unsigned long kexec_indirection_page;
-extern unsigned long kexec_mach_type;
-extern unsigned long kexec_boot_atags;
-
static atomic_t waiting_for_crash_ipi;
/*
@@ -161,6 +157,7 @@ void (*kexec_reinit)(void);
void machine_kexec(struct kimage *image)
{
unsigned long page_list, reboot_entry_phys;
+ struct kexec_relocate_data *data;
void (*reboot_entry)(void);
void *reboot_code_buffer;
@@ -176,18 +173,17 @@ void machine_kexec(struct kimage *image)
reboot_code_buffer = page_address(image->control_code_page);
- /* Prepare parameters for reboot_code_buffer*/
- set_kernel_text_rw();
- kexec_start_address = image->start;
- kexec_indirection_page = page_list;
- kexec_mach_type = machine_arch_type;
- kexec_boot_atags = image->arch.kernel_r2;
-
/* copy our kernel relocation code to the control code page */
reboot_entry = fncpy(reboot_code_buffer,
&relocate_new_kernel,
relocate_new_kernel_size);
+ data = reboot_code_buffer + relocate_new_kernel_size;
+ data->kexec_start_address = image->start;
+ data->kexec_indirection_page = page_list;
+ data->kexec_mach_type = machine_arch_type;
+ data->kexec_r2 = image->arch.kernel_r2;
+
/* get the identity mapping physical address for the reboot code */
reboot_entry_phys = virt_to_idmap(reboot_entry);
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 36718a424358..492ac74a63f4 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -229,8 +229,8 @@ static struct undef_hook arm_break_hook = {
};
static struct undef_hook thumb_break_hook = {
- .instr_mask = 0xffff,
- .instr_val = 0xde01,
+ .instr_mask = 0xffffffff,
+ .instr_val = 0x0000de01,
.cpsr_mask = PSR_T_BIT,
.cpsr_val = PSR_T_BIT,
.fn = break_trap,
diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S
index 7eaa2ae7aff5..5e15b5912cb0 100644
--- a/arch/arm/kernel/relocate_kernel.S
+++ b/arch/arm/kernel/relocate_kernel.S
@@ -5,14 +5,16 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
#include <asm/kexec.h>
.align 3 /* not needed for this code, but keeps fncpy() happy */
ENTRY(relocate_new_kernel)
- ldr r0,kexec_indirection_page
- ldr r1,kexec_start_address
+ adr r7, relocate_new_kernel_end
+ ldr r0, [r7, #KEXEC_INDIR_PAGE]
+ ldr r1, [r7, #KEXEC_START_ADDR]
/*
* If there is no indirection page (we are doing crashdumps)
@@ -57,34 +59,16 @@ ENTRY(relocate_new_kernel)
2:
/* Jump to relocated kernel */
- mov lr,r1
- mov r0,#0
- ldr r1,kexec_mach_type
- ldr r2,kexec_boot_atags
- ARM( ret lr )
- THUMB( bx lr )
-
- .align
-
- .globl kexec_start_address
-kexec_start_address:
- .long 0x0
-
- .globl kexec_indirection_page
-kexec_indirection_page:
- .long 0x0
-
- .globl kexec_mach_type
-kexec_mach_type:
- .long 0x0
-
- /* phy addr of the atags for the new kernel */
- .globl kexec_boot_atags
-kexec_boot_atags:
- .long 0x0
+ mov lr, r1
+ mov r0, #0
+ ldr r1, [r7, #KEXEC_MACH_TYPE]
+ ldr r2, [r7, #KEXEC_R2]
+ ARM( ret lr )
+ THUMB( bx lr )
ENDPROC(relocate_new_kernel)
+ .align 3
relocate_new_kernel_end:
.globl relocate_new_kernel_size
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index b908382b69ff..1c01358b9b6d 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -697,18 +697,20 @@ struct page *get_signal_page(void)
addr = page_address(page);
+ /* Poison the entire page */
+ memset32(addr, __opcode_to_mem_arm(0xe7fddef1),
+ PAGE_SIZE / sizeof(u32));
+
/* Give the signal return code some randomness */
offset = 0x200 + (get_random_int() & 0x7fc);
signal_return_offset = offset;
- /*
- * Copy signal return handlers into the vector page, and
- * set sigreturn to be a pointer to these.
- */
+ /* Copy signal return handlers into the page */
memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
- ptr = (unsigned long)addr + offset;
- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
+ /* Flush out all instructions in this page */
+ ptr = (unsigned long)addr;
+ flush_icache_range(ptr, ptr + PAGE_SIZE);
return page;
}
diff --git a/arch/arm/kernel/smccc-call.S b/arch/arm/kernel/smccc-call.S
index e5d43066b889..13d307cd364c 100644
--- a/arch/arm/kernel/smccc-call.S
+++ b/arch/arm/kernel/smccc-call.S
@@ -12,7 +12,9 @@
*
*/
#include <linux/linkage.h>
+#include <linux/arm-smccc.h>
+#include <asm/asm-offsets.h>
#include <asm/opcodes-sec.h>
#include <asm/opcodes-virt.h>
#include <asm/unwind.h>
@@ -36,7 +38,14 @@ UNWIND( .fnstart)
UNWIND( .save {r4-r7})
ldm r12, {r4-r7}
\instr
- pop {r4-r7}
+ ldr r4, [sp, #36]
+ cmp r4, #0
+ beq 1f // No quirk structure
+ ldr r5, [r4, #ARM_SMCCC_QUIRK_ID_OFFS]
+ cmp r5, #ARM_SMCCC_QUIRK_QCOM_A6
+ bne 1f // No quirk present
+ str r6, [r4, #ARM_SMCCC_QUIRK_STATE_OFFS]
+1: pop {r4-r7}
ldr r12, [sp, #(4 * 4)]
stm r12, {r0-r3}
bx lr
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index a56e7c856ab5..d23ab9ec130a 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -21,6 +21,19 @@
* A simple function epilogue looks like this:
* ldm sp, {fp, sp, pc}
*
+ * When compiled with clang, pc and sp are not pushed. A simple function
+ * prologue looks like this when built with clang:
+ *
+ * stmdb {..., fp, lr}
+ * add fp, sp, #x
+ * sub sp, sp, #y
+ *
+ * A simple function epilogue looks like this when built with clang:
+ *
+ * sub sp, fp, #x
+ * ldm {..., fp, pc}
+ *
+ *
* Note that with framepointer enabled, even the leaf functions have the same
* prologue and epilogue, therefore we can ignore the LR value in this case.
*/
@@ -33,6 +46,16 @@ int notrace unwind_frame(struct stackframe *frame)
low = frame->sp;
high = ALIGN(low, THREAD_SIZE);
+#ifdef CONFIG_CC_IS_CLANG
+ /* check current frame pointer is within bounds */
+ if (fp < low + 4 || fp > high - 4)
+ return -EINVAL;
+
+ frame->sp = frame->fp;
+ frame->fp = *(unsigned long *)(fp);
+ frame->pc = frame->lr;
+ frame->lr = *(unsigned long *)(fp + 4);
+#else
/* check current frame pointer is within bounds */
if (fp < low + 12 || fp > high - 4)
return -EINVAL;
@@ -41,6 +64,7 @@ int notrace unwind_frame(struct stackframe *frame)
frame->fp = *(unsigned long *)(fp - 12);
frame->sp = *(unsigned long *)(fp - 8);
frame->pc = *(unsigned long *)(fp - 4);
+#endif
return 0;
}
@@ -91,6 +115,8 @@ static int save_trace(struct stackframe *frame, void *d)
return 0;
regs = (struct pt_regs *)frame->sp;
+ if ((unsigned long)&regs[1] > ALIGN(frame->sp, THREAD_SIZE))
+ return 0;
trace->entries[trace->nr_entries++] = regs->ARM_pc;
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index d08099269e35..e126386fb78a 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/ftrace.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mm_types.h>
@@ -27,12 +28,22 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
return -EINVAL;
/*
+ * Function graph tracer state gets incosistent when the kernel
+ * calls functions that never return (aka suspend finishers) hence
+ * disable graph tracing during their execution.
+ */
+ pause_graph_tracing();
+
+ /*
* Provide a temporary page table with an identity mapping for
* the MMU-enable code, required for resuming. On successful
* resume (indicated by a zero return code), we need to switch
* back to the correct page tables.
*/
ret = __cpu_suspend(arg, fn, __mpidr);
+
+ unpause_graph_tracing();
+
if (ret == 0) {
cpu_switch_mm(mm->pgd, mm);
local_flush_bp_all();
@@ -46,7 +57,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
{
u32 __mpidr = cpu_logical_map(smp_processor_id());
- return __cpu_suspend(arg, fn, __mpidr);
+ int ret;
+
+ pause_graph_tracing();
+ ret = __cpu_suspend(arg, fn, __mpidr);
+ unpause_graph_tracing();
+
+ return ret;
}
#define idmap_pgd NULL
#endif
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index badf02ca3693..aec533168f04 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -67,14 +67,16 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
{
+ unsigned long end = frame + 4 + sizeof(struct pt_regs);
+
#ifdef CONFIG_KALLSYMS
printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
#else
printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
#endif
- if (in_entry_text(from))
- dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
+ if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE))
+ dump_mem("", "Exception stack", frame + 4, end);
}
void dump_backtrace_stm(u32 *stack, u32 instruction)
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
index 93cddab73072..95bd35991288 100644
--- a/arch/arm/lib/bitops.h
+++ b/arch/arm/lib/bitops.h
@@ -7,7 +7,7 @@
ENTRY( \name )
UNWIND( .fnstart )
ands ip, r1, #3
- strneb r1, [ip] @ assert word-aligned
+ strbne r1, [ip] @ assert word-aligned
mov r2, #1
and r3, r0, #31 @ Get bit offset
mov r0, r0, lsr #5
@@ -32,7 +32,7 @@ ENDPROC(\name )
ENTRY( \name )
UNWIND( .fnstart )
ands ip, r1, #3
- strneb r1, [ip] @ assert word-aligned
+ strbne r1, [ip] @ assert word-aligned
mov r2, #1
and r3, r0, #31 @ Get bit offset
mov r0, r0, lsr #5
@@ -62,7 +62,7 @@ ENDPROC(\name )
ENTRY( \name )
UNWIND( .fnstart )
ands ip, r1, #3
- strneb r1, [ip] @ assert word-aligned
+ strbne r1, [ip] @ assert word-aligned
and r2, r0, #31
mov r0, r0, lsr #5
mov r3, #1
@@ -89,7 +89,7 @@ ENDPROC(\name )
ENTRY( \name )
UNWIND( .fnstart )
ands ip, r1, #3
- strneb r1, [ip] @ assert word-aligned
+ strbne r1, [ip] @ assert word-aligned
and r3, r0, #31
mov r0, r0, lsr #5
save_and_disable_irqs ip
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index e2e4df3d11e5..21bfe9b6e16a 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -542,13 +542,13 @@ static void __init at91_pm_sram_init(void)
sram_pool = gen_pool_get(&pdev->dev, NULL);
if (!sram_pool) {
pr_warn("%s: sram pool unavailable!\n", __func__);
- return;
+ goto out_put_device;
}
sram_base = gen_pool_alloc(sram_pool, at91_pm_suspend_in_sram_sz);
if (!sram_base) {
pr_warn("%s: unable to alloc sram!\n", __func__);
- return;
+ goto out_put_device;
}
sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base);
@@ -556,12 +556,17 @@ static void __init at91_pm_sram_init(void)
at91_pm_suspend_in_sram_sz, false);
if (!at91_suspend_sram_fn) {
pr_warn("SRAM: Could not map\n");
- return;
+ goto out_put_device;
}
/* Copy the pm suspend handler to SRAM */
at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn,
&at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz);
+ return;
+
+out_put_device:
+ put_device(&pdev->dev);
+ return;
}
static bool __init at91_is_pm_mode_active(int pm_mode)
diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S
index a7c6ae13c945..d650a5a1dfee 100644
--- a/arch/arm/mach-at91/pm_suspend.S
+++ b/arch/arm/mach-at91/pm_suspend.S
@@ -220,6 +220,10 @@ ENDPROC(at91_backup_mode)
orr tmp1, tmp1, #AT91_PMC_KEY
str tmp1, [pmc, #AT91_CKGR_MOR]
+ /* Quirk for SAM9X60's PMC */
+ nop
+ nop
+
wait_mckrdy
/* Enable the crystal oscillator */
diff --git a/arch/arm/mach-footbridge/cats-pci.c b/arch/arm/mach-footbridge/cats-pci.c
index 0b2fd7e2e9b4..90b1e9be430e 100644
--- a/arch/arm/mach-footbridge/cats-pci.c
+++ b/arch/arm/mach-footbridge/cats-pci.c
@@ -15,14 +15,14 @@
#include <asm/mach-types.h>
/* cats host-specific stuff */
-static int irqmap_cats[] __initdata = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 };
+static int irqmap_cats[] = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 };
static u8 cats_no_swizzle(struct pci_dev *dev, u8 *pin)
{
return 0;
}
-static int __init cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
if (dev->irq >= 255)
return -1; /* not a valid interrupt. */
diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
index 16d71bac0061..2d1f822700a1 100644
--- a/arch/arm/mach-footbridge/dc21285.c
+++ b/arch/arm/mach-footbridge/dc21285.c
@@ -69,15 +69,15 @@ dc21285_read_config(struct pci_bus *bus, unsigned int devfn, int where,
if (addr)
switch (size) {
case 1:
- asm("ldrb %0, [%1, %2]"
+ asm volatile("ldrb %0, [%1, %2]"
: "=r" (v) : "r" (addr), "r" (where) : "cc");
break;
case 2:
- asm("ldrh %0, [%1, %2]"
+ asm volatile("ldrh %0, [%1, %2]"
: "=r" (v) : "r" (addr), "r" (where) : "cc");
break;
case 4:
- asm("ldr %0, [%1, %2]"
+ asm volatile("ldr %0, [%1, %2]"
: "=r" (v) : "r" (addr), "r" (where) : "cc");
break;
}
@@ -103,17 +103,17 @@ dc21285_write_config(struct pci_bus *bus, unsigned int devfn, int where,
if (addr)
switch (size) {
case 1:
- asm("strb %0, [%1, %2]"
+ asm volatile("strb %0, [%1, %2]"
: : "r" (value), "r" (addr), "r" (where)
: "cc");
break;
case 2:
- asm("strh %0, [%1, %2]"
+ asm volatile("strh %0, [%1, %2]"
: : "r" (value), "r" (addr), "r" (where)
: "cc");
break;
case 4:
- asm("str %0, [%1, %2]"
+ asm volatile("str %0, [%1, %2]"
: : "r" (value), "r" (addr), "r" (where)
: "cc");
break;
diff --git a/arch/arm/mach-footbridge/ebsa285-pci.c b/arch/arm/mach-footbridge/ebsa285-pci.c
index 6f28aaa9ca79..c3f280d08fa7 100644
--- a/arch/arm/mach-footbridge/ebsa285-pci.c
+++ b/arch/arm/mach-footbridge/ebsa285-pci.c
@@ -14,9 +14,9 @@
#include <asm/mach/pci.h>
#include <asm/mach-types.h>
-static int irqmap_ebsa285[] __initdata = { IRQ_IN3, IRQ_IN1, IRQ_IN0, IRQ_PCI };
+static int irqmap_ebsa285[] = { IRQ_IN3, IRQ_IN1, IRQ_IN0, IRQ_PCI };
-static int __init ebsa285_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int ebsa285_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
if (dev->vendor == PCI_VENDOR_ID_CONTAQ &&
dev->device == PCI_DEVICE_ID_CONTAQ_82C693)
diff --git a/arch/arm/mach-footbridge/netwinder-pci.c b/arch/arm/mach-footbridge/netwinder-pci.c
index 9473aa0305e5..e8304392074b 100644
--- a/arch/arm/mach-footbridge/netwinder-pci.c
+++ b/arch/arm/mach-footbridge/netwinder-pci.c
@@ -18,7 +18,7 @@
* We now use the slot ID instead of the device identifiers to select
* which interrupt is routed where.
*/
-static int __init netwinder_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int netwinder_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
switch (slot) {
case 0: /* host bridge */
diff --git a/arch/arm/mach-footbridge/personal-pci.c b/arch/arm/mach-footbridge/personal-pci.c
index 4391e433a4b2..9d19aa98a663 100644
--- a/arch/arm/mach-footbridge/personal-pci.c
+++ b/arch/arm/mach-footbridge/personal-pci.c
@@ -14,13 +14,12 @@
#include <asm/mach/pci.h>
#include <asm/mach-types.h>
-static int irqmap_personal_server[] __initdata = {
+static int irqmap_personal_server[] = {
IRQ_IN0, IRQ_IN1, IRQ_IN2, IRQ_IN3, 0, 0, 0,
IRQ_DOORBELLHOST, IRQ_DMA1, IRQ_DMA2, IRQ_PCI
};
-static int __init personal_server_map_irq(const struct pci_dev *dev, u8 slot,
- u8 pin)
+static int personal_server_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
unsigned char line;
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index e9cfe8e86f33..02bf3eab4196 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -89,8 +89,10 @@ AFLAGS_suspend-imx6.o :=-Wa,-march=armv7-a
obj-$(CONFIG_SOC_IMX6) += suspend-imx6.o
obj-$(CONFIG_SOC_IMX53) += suspend-imx53.o
endif
+ifeq ($(CONFIG_ARM_CPU_SUSPEND),y)
AFLAGS_resume-imx6.o :=-Wa,-march=armv7-a
obj-$(CONFIG_SOC_IMX6) += resume-imx6.o
+endif
obj-$(CONFIG_SOC_IMX6) += pm-imx6.o
obj-$(CONFIG_SOC_IMX1) += mach-imx1.o
diff --git a/arch/arm/mach-imx/pm-imx5.c b/arch/arm/mach-imx/pm-imx5.c
index 868781fd460c..14c630c899c5 100644
--- a/arch/arm/mach-imx/pm-imx5.c
+++ b/arch/arm/mach-imx/pm-imx5.c
@@ -301,14 +301,14 @@ static int __init imx_suspend_alloc_ocram(
if (!ocram_pool) {
pr_warn("%s: ocram pool unavailable!\n", __func__);
ret = -ENODEV;
- goto put_node;
+ goto put_device;
}
ocram_base = gen_pool_alloc(ocram_pool, size);
if (!ocram_base) {
pr_warn("%s: unable to alloc ocram!\n", __func__);
ret = -ENOMEM;
- goto put_node;
+ goto put_device;
}
phys = gen_pool_virt_to_phys(ocram_pool, ocram_base);
@@ -318,6 +318,8 @@ static int __init imx_suspend_alloc_ocram(
if (virt_out)
*virt_out = virt;
+put_device:
+ put_device(&pdev->dev);
put_node:
of_node_put(node);
diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c
index 529f4b5bbd3a..4bfefbec971a 100644
--- a/arch/arm/mach-imx/pm-imx6.c
+++ b/arch/arm/mach-imx/pm-imx6.c
@@ -497,14 +497,14 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata)
if (!ocram_pool) {
pr_warn("%s: ocram pool unavailable!\n", __func__);
ret = -ENODEV;
- goto put_node;
+ goto put_device;
}
ocram_base = gen_pool_alloc(ocram_pool, MX6Q_SUSPEND_OCRAM_SIZE);
if (!ocram_base) {
pr_warn("%s: unable to alloc ocram!\n", __func__);
ret = -ENOMEM;
- goto put_node;
+ goto put_device;
}
ocram_pbase = gen_pool_virt_to_phys(ocram_pool, ocram_base);
@@ -527,7 +527,7 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata)
ret = imx6_pm_get_base(&pm_info->mmdc_base, socdata->mmdc_compat);
if (ret) {
pr_warn("%s: failed to get mmdc base %d!\n", __func__, ret);
- goto put_node;
+ goto put_device;
}
ret = imx6_pm_get_base(&pm_info->src_base, socdata->src_compat);
@@ -574,7 +574,7 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata)
&imx6_suspend,
MX6Q_SUSPEND_OCRAM_SIZE - sizeof(*pm_info));
- goto put_node;
+ goto put_device;
pl310_cache_map_failed:
iounmap(pm_info->gpc_base.vbase);
@@ -584,6 +584,8 @@ iomuxc_map_failed:
iounmap(pm_info->src_base.vbase);
src_map_failed:
iounmap(pm_info->mmdc_base.vbase);
+put_device:
+ put_device(&pdev->dev);
put_node:
of_node_put(node);
diff --git a/arch/arm/mach-imx/suspend-imx6.S b/arch/arm/mach-imx/suspend-imx6.S
index 7d84b617af48..99d2e296082c 100644
--- a/arch/arm/mach-imx/suspend-imx6.S
+++ b/arch/arm/mach-imx/suspend-imx6.S
@@ -73,6 +73,7 @@
#define MX6Q_CCM_CCR 0x0
.align 3
+ .arm
.macro sync_l2_cache
diff --git a/arch/arm/mach-integrator/Kconfig b/arch/arm/mach-integrator/Kconfig
index cefe44f6889b..ba124f8704fa 100644
--- a/arch/arm/mach-integrator/Kconfig
+++ b/arch/arm/mach-integrator/Kconfig
@@ -3,6 +3,8 @@ menuconfig ARCH_INTEGRATOR
depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V6
select ARM_AMBA
select COMMON_CLK_VERSATILE
+ select CMA
+ select DMA_CMA
select HAVE_TCM
select ICST
select MFD_SYSCON
@@ -34,14 +36,13 @@ config INTEGRATOR_IMPD1
select ARM_VIC
select GPIO_PL061
select GPIOLIB
+ select REGULATOR
+ select REGULATOR_FIXED_VOLTAGE
help
The IM-PD1 is an add-on logic module for the Integrator which
allows ARM(R) Ltd PrimeCells to be developed and evaluated.
The IM-PD1 can be found on the Integrator/PP2 platform.
- To compile this driver as a module, choose M here: the
- module will be called impd1.
-
config INTEGRATOR_CM7TDMI
bool "Integrator/CM7TDMI core module"
depends on ARCH_INTEGRATOR_AP
diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
index 84613abf35a3..79ff5b953431 100644
--- a/arch/arm/mach-keystone/keystone.c
+++ b/arch/arm/mach-keystone/keystone.c
@@ -65,7 +65,7 @@ static void __init keystone_init(void)
static long long __init keystone_pv_fixup(void)
{
long long offset;
- phys_addr_t mem_start, mem_end;
+ u64 mem_start, mem_end;
mem_start = memblock_start_of_DRAM();
mem_end = memblock_end_of_DRAM();
@@ -78,7 +78,7 @@ static long long __init keystone_pv_fixup(void)
if (mem_start < KEYSTONE_HIGH_PHYS_START ||
mem_end > KEYSTONE_HIGH_PHYS_END) {
pr_crit("Invalid address space for memory (%08llx-%08llx)\n",
- (u64)mem_start, (u64)mem_end);
+ mem_start, mem_end);
return 0;
}
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index 41c7b905980a..23e8146b9b32 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -239,10 +239,12 @@ static int _omap_device_notifier_call(struct notifier_block *nb,
break;
case BUS_NOTIFY_BIND_DRIVER:
od = to_omap_device(pdev);
- if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) &&
- pm_runtime_status_suspended(dev)) {
+ if (od) {
od->_driver_status = BUS_NOTIFY_BIND_DRIVER;
- pm_runtime_set_active(dev);
+ if (od->_state == OMAP_DEVICE_STATE_ENABLED &&
+ pm_runtime_status_suspended(dev)) {
+ pm_runtime_set_active(dev);
+ }
}
break;
case BUS_NOTIFY_ADD_DEVICE:
diff --git a/arch/arm/mach-shmobile/pm-rmobile.c b/arch/arm/mach-shmobile/pm-rmobile.c
index e348bcfe389d..cb8b02a1abe2 100644
--- a/arch/arm/mach-shmobile/pm-rmobile.c
+++ b/arch/arm/mach-shmobile/pm-rmobile.c
@@ -330,6 +330,7 @@ static int __init rmobile_init_pm_domains(void)
pmd = of_get_child_by_name(np, "pm-domains");
if (!pmd) {
+ iounmap(base);
pr_warn("%pOF lacks pm-domains node\n", np);
continue;
}
diff --git a/arch/arm/mach-socfpga/pm.c b/arch/arm/mach-socfpga/pm.c
index d4866788702c..b782294ee30b 100644
--- a/arch/arm/mach-socfpga/pm.c
+++ b/arch/arm/mach-socfpga/pm.c
@@ -60,14 +60,14 @@ static int socfpga_setup_ocram_self_refresh(void)
if (!ocram_pool) {
pr_warn("%s: ocram pool unavailable!\n", __func__);
ret = -ENODEV;
- goto put_node;
+ goto put_device;
}
ocram_base = gen_pool_alloc(ocram_pool, socfpga_sdram_self_refresh_sz);
if (!ocram_base) {
pr_warn("%s: unable to alloc ocram!\n", __func__);
ret = -ENOMEM;
- goto put_node;
+ goto put_device;
}
ocram_pbase = gen_pool_virt_to_phys(ocram_pool, ocram_base);
@@ -78,7 +78,7 @@ static int socfpga_setup_ocram_self_refresh(void)
if (!suspend_ocram_base) {
pr_warn("%s: __arm_ioremap_exec failed!\n", __func__);
ret = -ENOMEM;
- goto put_node;
+ goto put_device;
}
/* Copy the code that puts DDR in self refresh to ocram */
@@ -92,6 +92,8 @@ static int socfpga_setup_ocram_self_refresh(void)
if (!socfpga_sdram_self_refresh_in_ocram)
ret = -EFAULT;
+put_device:
+ put_device(&pdev->dev);
put_node:
of_node_put(np);
diff --git a/arch/arm/mach-sunxi/sunxi.c b/arch/arm/mach-sunxi/sunxi.c
index de4b0e932f22..aa08b8cb0152 100644
--- a/arch/arm/mach-sunxi/sunxi.c
+++ b/arch/arm/mach-sunxi/sunxi.c
@@ -66,6 +66,7 @@ static const char * const sun8i_board_dt_compat[] = {
"allwinner,sun8i-h2-plus",
"allwinner,sun8i-h3",
"allwinner,sun8i-r40",
+ "allwinner,sun8i-v3",
"allwinner,sun8i-v3s",
NULL,
};
diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c
index f9587be48235..37a2aaeb44cc 100644
--- a/arch/arm/mach-tegra/tegra.c
+++ b/arch/arm/mach-tegra/tegra.c
@@ -112,8 +112,8 @@ static const char * const tegra_dt_board_compat[] = {
};
DT_MACHINE_START(TEGRA_DT, "NVIDIA Tegra SoC (Flattened Device Tree)")
- .l2c_aux_val = 0x3c400001,
- .l2c_aux_mask = 0xc20fc3fe,
+ .l2c_aux_val = 0x3c400000,
+ .l2c_aux_mask = 0xc20fc3ff,
.smp = smp_ops(tegra_smp_ops),
.map_io = tegra_map_common_io,
.init_early = tegra_init_early,
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 808efbb89b88..02f613def40d 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -1261,20 +1261,28 @@ static void __init l2c310_of_parse(const struct device_node *np,
ret = of_property_read_u32(np, "prefetch-data", &val);
if (ret == 0) {
- if (val)
+ if (val) {
prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH;
- else
+ *aux_val |= L310_PREFETCH_CTRL_DATA_PREFETCH;
+ } else {
prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
+ *aux_val &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
+ }
+ *aux_mask &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
} else if (ret != -EINVAL) {
pr_err("L2C-310 OF prefetch-data property value is missing\n");
}
ret = of_property_read_u32(np, "prefetch-instr", &val);
if (ret == 0) {
- if (val)
+ if (val) {
prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
- else
+ *aux_val |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
+ } else {
prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
+ *aux_val &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
+ }
+ *aux_mask &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
} else if (ret != -EINVAL) {
pr_err("L2C-310 OF prefetch-instr property value is missing\n");
}
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 5461d589a1e2..60ac7c5999a9 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -5,6 +5,7 @@
* VMA_VM_FLAGS
* VM_EXEC
*/
+#include <linux/const.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
@@ -30,7 +31,7 @@
* act_mm - get current->active_mm
*/
.macro act_mm, rd
- bic \rd, sp, #8128
+ bic \rd, sp, #(THREAD_SIZE - 1) & ~63
bic \rd, \rd, #63
ldr \rd, [\rd, #TI_TASK]
.if (TSK_ACTIVE_MM > IMM12_MASK)
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 25b3ee85066e..328ced7bfaf2 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -930,7 +930,11 @@ static inline void emit_a32_rsh_i64(const s8 dst[],
rd = arm_bpf_get_reg64(dst, tmp, ctx);
/* Do LSR operation */
- if (val < 32) {
+ if (val == 0) {
+ /* An immediate value of 0 encodes a shift amount of 32
+ * for LSR. To shift by 0, don't do anything.
+ */
+ } else if (val < 32) {
emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx);
emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx);
emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_LSR, val), ctx);
@@ -956,7 +960,11 @@ static inline void emit_a32_arsh_i64(const s8 dst[],
rd = arm_bpf_get_reg64(dst, tmp, ctx);
/* Do ARSH operation */
- if (val < 32) {
+ if (val == 0) {
+ /* An immediate value of 0 encodes a shift amount of 32
+ * for ASR. To shift by 0, don't do anything.
+ */
+ } else if (val < 32) {
emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx);
emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx);
emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, val), ctx);
@@ -993,21 +1001,35 @@ static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[],
arm_bpf_put_reg32(dst_hi, rd[0], ctx);
}
+static bool is_ldst_imm(s16 off, const u8 size)
+{
+ s16 off_max = 0;
+
+ switch (size) {
+ case BPF_B:
+ case BPF_W:
+ off_max = 0xfff;
+ break;
+ case BPF_H:
+ off_max = 0xff;
+ break;
+ case BPF_DW:
+ /* Need to make sure off+4 does not overflow. */
+ off_max = 0xfff - 4;
+ break;
+ }
+ return -off_max <= off && off <= off_max;
+}
+
/* *(size *)(dst + off) = src */
static inline void emit_str_r(const s8 dst, const s8 src[],
- s32 off, struct jit_ctx *ctx, const u8 sz){
+ s16 off, struct jit_ctx *ctx, const u8 sz){
const s8 *tmp = bpf2a32[TMP_REG_1];
- s32 off_max;
s8 rd;
rd = arm_bpf_get_reg32(dst, tmp[1], ctx);
- if (sz == BPF_H)
- off_max = 0xff;
- else
- off_max = 0xfff;
-
- if (off < 0 || off > off_max) {
+ if (!is_ldst_imm(off, sz)) {
emit_a32_mov_i(tmp[0], off, ctx);
emit(ARM_ADD_R(tmp[0], tmp[0], rd), ctx);
rd = tmp[0];
@@ -1036,18 +1058,12 @@ static inline void emit_str_r(const s8 dst, const s8 src[],
/* dst = *(size*)(src + off) */
static inline void emit_ldx_r(const s8 dst[], const s8 src,
- s32 off, struct jit_ctx *ctx, const u8 sz){
+ s16 off, struct jit_ctx *ctx, const u8 sz){
const s8 *tmp = bpf2a32[TMP_REG_1];
const s8 *rd = is_stacked(dst_lo) ? tmp : dst;
s8 rm = src;
- s32 off_max;
-
- if (sz == BPF_H)
- off_max = 0xff;
- else
- off_max = 0xfff;
- if (off < 0 || off > off_max) {
+ if (!is_ldst_imm(off, sz)) {
emit_a32_mov_i(tmp[0], off, ctx);
emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx);
rm = tmp[0];
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index 377ff9cda667..c83baa51289f 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -240,6 +240,7 @@ config SAMSUNG_PM_DEBUG
bool "Samsung PM Suspend debug"
depends on PM && DEBUG_KERNEL
depends on DEBUG_EXYNOS_UART || DEBUG_S3C24XX_UART || DEBUG_S3C2410_UART
+ depends on DEBUG_LL && MMU
help
Say Y here if you want verbose debugging from the PM Suspend and
Resume code. See <file:Documentation/arm/Samsung-S3C24XX/Suspend.txt>
diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
index 0dc23fc227ed..cf08cb726767 100644
--- a/arch/arm/probes/kprobes/opt-arm.c
+++ b/arch/arm/probes/kprobes/opt-arm.c
@@ -98,21 +98,21 @@ asm (
"optprobe_template_end:\n");
#define TMPL_VAL_IDX \
- ((unsigned long *)&optprobe_template_val - (unsigned long *)&optprobe_template_entry)
+ ((unsigned long *)optprobe_template_val - (unsigned long *)optprobe_template_entry)
#define TMPL_CALL_IDX \
- ((unsigned long *)&optprobe_template_call - (unsigned long *)&optprobe_template_entry)
+ ((unsigned long *)optprobe_template_call - (unsigned long *)optprobe_template_entry)
#define TMPL_END_IDX \
- ((unsigned long *)&optprobe_template_end - (unsigned long *)&optprobe_template_entry)
+ ((unsigned long *)optprobe_template_end - (unsigned long *)optprobe_template_entry)
#define TMPL_ADD_SP \
- ((unsigned long *)&optprobe_template_add_sp - (unsigned long *)&optprobe_template_entry)
+ ((unsigned long *)optprobe_template_add_sp - (unsigned long *)optprobe_template_entry)
#define TMPL_SUB_SP \
- ((unsigned long *)&optprobe_template_sub_sp - (unsigned long *)&optprobe_template_entry)
+ ((unsigned long *)optprobe_template_sub_sp - (unsigned long *)optprobe_template_entry)
#define TMPL_RESTORE_BEGIN \
- ((unsigned long *)&optprobe_template_restore_begin - (unsigned long *)&optprobe_template_entry)
+ ((unsigned long *)optprobe_template_restore_begin - (unsigned long *)optprobe_template_entry)
#define TMPL_RESTORE_ORIGN_INSN \
- ((unsigned long *)&optprobe_template_restore_orig_insn - (unsigned long *)&optprobe_template_entry)
+ ((unsigned long *)optprobe_template_restore_orig_insn - (unsigned long *)optprobe_template_entry)
#define TMPL_RESTORE_END \
- ((unsigned long *)&optprobe_template_restore_end - (unsigned long *)&optprobe_template_entry)
+ ((unsigned long *)optprobe_template_restore_end - (unsigned long *)optprobe_template_entry)
/*
* ARM can always optimize an instruction when using ARM ISA, except
@@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or
}
/* Copy arch-dep-instance from template. */
- memcpy(code, (unsigned long *)&optprobe_template_entry,
+ memcpy(code, (unsigned long *)optprobe_template_entry,
TMPL_END_IDX * sizeof(kprobe_opcode_t));
/* Adjust buffer according to instruction. */
diff --git a/arch/arm/probes/uprobes/core.c b/arch/arm/probes/uprobes/core.c
index bf992264060e..b1b7b51c938c 100644
--- a/arch/arm/probes/uprobes/core.c
+++ b/arch/arm/probes/uprobes/core.c
@@ -207,7 +207,7 @@ unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
static struct undef_hook uprobes_arm_break_hook = {
.instr_mask = 0x0fffffff,
.instr_val = (UPROBE_SWBP_ARM_INSN & 0x0fffffff),
- .cpsr_mask = MODE_MASK,
+ .cpsr_mask = (PSR_T_BIT | MODE_MASK),
.cpsr_val = USR_MODE,
.fn = uprobe_trap_handler,
};
@@ -215,7 +215,7 @@ static struct undef_hook uprobes_arm_break_hook = {
static struct undef_hook uprobes_arm_ss_hook = {
.instr_mask = 0x0fffffff,
.instr_val = (UPROBE_SS_ARM_INSN & 0x0fffffff),
- .cpsr_mask = MODE_MASK,
+ .cpsr_mask = (PSR_T_BIT | MODE_MASK),
.cpsr_val = USR_MODE,
.fn = uprobe_trap_handler,
};
diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile
index f4efff9d3afb..1f5ec9741e6d 100644
--- a/arch/arm/vdso/Makefile
+++ b/arch/arm/vdso/Makefile
@@ -10,12 +10,13 @@ obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
ccflags-y := -fPIC -fno-common -fno-builtin -fno-stack-protector
ccflags-y += -DDISABLE_BRANCH_PROFILING
-VDSO_LDFLAGS := -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1
-VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
-VDSO_LDFLAGS += -nostdlib -shared
-VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
-VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id)
-VDSO_LDFLAGS += $(call cc-ldoption, -fuse-ld=bfd)
+ldflags-$(CONFIG_CPU_ENDIAN_BE8) := --be8
+ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \
+ -z max-page-size=4096 -z common-page-size=4096 \
+ -nostdlib -shared $(ldflags-y) \
+ $(call ld-option, --hash-style=sysv) \
+ $(call ld-option, --build-id) \
+ -T
obj-$(CONFIG_VDSO) += vdso.o
extra-$(CONFIG_VDSO) += vdso.lds
@@ -37,8 +38,8 @@ KCOV_INSTRUMENT := n
$(obj)/vdso.o : $(obj)/vdso.so
# Link rule for the .so file
-$(obj)/vdso.so.raw: $(src)/vdso.lds $(obj-vdso) FORCE
- $(call if_changed,vdsold)
+$(obj)/vdso.so.raw: $(obj)/vdso.lds $(obj-vdso) FORCE
+ $(call if_changed,ld)
$(obj)/vdso.so.dbg: $(obj)/vdso.so.raw $(obj)/vdsomunge FORCE
$(call if_changed,vdsomunge)
@@ -48,11 +49,6 @@ $(obj)/%.so: OBJCOPYFLAGS := -S
$(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy)
-# Actual build commands
-quiet_cmd_vdsold = VDSO $@
- cmd_vdsold = $(CC) $(c_flags) $(VDSO_LDFLAGS) \
- -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@
-
quiet_cmd_vdsomunge = MUNGE $@
cmd_vdsomunge = $(objtree)/$(obj)/vdsomunge $< $@
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 07060e5b5864..dd946c77e801 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -404,8 +404,6 @@ static int __init xen_guest_init(void)
return -ENOMEM;
}
gnttab_init();
- if (!xen_initial_domain())
- xenbus_probe(NULL);
/*
* Making sure board specific code will not set up ops for
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
index 0641ba54ab62..8a8a388549e7 100644
--- a/arch/arm/xen/p2m.c
+++ b/arch/arm/xen/p2m.c
@@ -91,10 +91,39 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
int i;
for (i = 0; i < count; i++) {
+ struct gnttab_unmap_grant_ref unmap;
+ int rc;
+
if (map_ops[i].status)
continue;
- set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT,
- map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT);
+ if (likely(set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT,
+ map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT)))
+ continue;
+
+ /*
+ * Signal an error for this slot. This in turn requires
+ * immediate unmapping.
+ */
+ map_ops[i].status = GNTST_general_error;
+ unmap.host_addr = map_ops[i].host_addr,
+ unmap.handle = map_ops[i].handle;
+ map_ops[i].handle = ~0;
+ if (map_ops[i].flags & GNTMAP_device_map)
+ unmap.dev_bus_addr = map_ops[i].dev_bus_addr;
+ else
+ unmap.dev_bus_addr = 0;
+
+ /*
+ * Pre-populate the status field, to be recognizable in
+ * the log message below.
+ */
+ unmap.status = 1;
+
+ rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
+ &unmap, 1);
+ if (rc || unmap.status != GNTST_okay)
+ pr_err_once("gnttab unmap failed: rc=%d st=%d\n",
+ rc, unmap.status);
}
return 0;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 51fe21f5d078..1daefa57e274 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -473,7 +473,7 @@ config ARM64_ERRATUM_1024718
help
This option adds work around for Arm Cortex-A55 Erratum 1024718.
- Affected Cortex-A55 cores (r0p0, r0p1, r1p0) could cause incorrect
+ Affected Cortex-A55 cores (all revisions) could cause incorrect
update of the hardware dirty bit when the DBM/AP bits are updated
without a break-before-make. The work around is to disable the usage
of hardware DBM locally on the affected cores. CPUs not affected by
@@ -499,6 +499,22 @@ config ARM64_ERRATUM_1463225
If unsure, say Y.
+config ARM64_ERRATUM_1542419
+ bool "Neoverse-N1: workaround mis-ordering of instruction fetches"
+ default y
+ help
+ This option adds a workaround for ARM Neoverse-N1 erratum
+ 1542419.
+
+ Affected Neoverse-N1 cores could execute a stale instruction when
+ modified by another CPU. The workaround depends on a firmware
+ counterpart.
+
+ Workaround the issue by hiding the DIC feature from EL0. This
+ forces user-space to perform cache maintenance.
+
+ If unsure, say Y.
+
config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313"
default y
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 393d2b524284..91c7ffad8541 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -46,6 +46,7 @@ config ARCH_BCM_IPROC
config ARCH_BERLIN
bool "Marvell Berlin SoC Family"
select DW_APB_ICTL
+ select DW_APB_TIMER_OF
select GPIOLIB
select PINCTRL
help
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 9a5e28141211..dc54a883513a 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -10,7 +10,7 @@
#
# Copyright (C) 1995-2001 by Russell King
-LDFLAGS_vmlinux :=--no-undefined -X
+LDFLAGS_vmlinux :=--no-undefined -X -z norelro
CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
GZFLAGS :=-9
@@ -18,7 +18,7 @@ ifeq ($(CONFIG_RELOCATABLE), y)
# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
# for relative relocs, since this leads to better Image compression
# with the relocation offsets always being zero.
-LDFLAGS_vmlinux += -shared -Bsymbolic -z notext -z norelro \
+LDFLAGS_vmlinux += -shared -Bsymbolic -z notext \
$(call ld-option, --no-apply-dynamic-relocs)
endif
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
index 094cfed13df9..13ce24e922ee 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
@@ -97,7 +97,7 @@
&emac {
pinctrl-names = "default";
pinctrl-0 = <&rgmii_pins>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&ext_rgmii_phy>;
phy-supply = <&reg_dc1sw>;
status = "okay";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts
index d5b6e8159a33..5d0905f0f1c1 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts
@@ -52,7 +52,7 @@
&emac {
pinctrl-names = "default";
pinctrl-0 = <&rgmii_pins>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-txid";
phy-handle = <&ext_rgmii_phy>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
index 897e60cbe38d..ecb3e10c85e0 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
@@ -67,8 +67,6 @@
};
&ehci0 {
- phys = <&usbphy 0>;
- phy-names = "usb";
status = "okay";
};
@@ -107,6 +105,7 @@
pinctrl-0 = <&mmc2_pins>;
vmmc-supply = <&reg_dcdc1>;
vqmmc-supply = <&reg_eldo1>;
+ max-frequency = <200000000>;
bus-width = <8>;
non-removable;
cap-mmc-hw-reset;
@@ -115,8 +114,6 @@
};
&ohci0 {
- phys = <&usbphy 0>;
- phy-names = "usb";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
index 6723b8695e0b..ca39084fddc0 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
@@ -51,7 +51,6 @@
pinctrl-names = "default";
pinctrl-0 = <&mmc0_pins>;
vmmc-supply = <&reg_dcdc1>;
- non-removable;
disable-wp;
bus-width = <4>;
cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
index 7abc4ea30541..30cc2e83a288 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
@@ -264,7 +264,7 @@
resets = <&ccu RST_BUS_MMC2>;
reset-names = "ahb";
interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
- max-frequency = <200000000>;
+ max-frequency = <150000000>;
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
@@ -312,6 +312,8 @@
<&ccu CLK_USB_OHCI0>;
resets = <&ccu RST_BUS_OHCI0>,
<&ccu RST_BUS_EHCI0>;
+ phys = <&usbphy 0>;
+ phy-names = "usb";
status = "disabled";
};
@@ -322,6 +324,8 @@
clocks = <&ccu CLK_BUS_OHCI0>,
<&ccu CLK_USB_OHCI0>;
resets = <&ccu RST_BUS_OHCI0>;
+ phys = <&usbphy 0>;
+ phy-names = "usb";
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts
index 3e0d5a9c096d..5fbfa76daae2 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts
@@ -157,7 +157,7 @@
pinctrl-0 = <&emac_rgmii_pins>;
phy-supply = <&reg_gmac_3v3>;
phy-handle = <&ext_rgmii_phy>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts
index b75ca4d7d001..7a30211d59ef 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts
@@ -164,7 +164,7 @@
pinctrl-0 = <&emac_rgmii_pins>;
phy-supply = <&reg_gmac_3v3>;
phy-handle = <&ext_rgmii_phy>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
index bd4391269611..6e4e45907738 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
@@ -70,8 +70,7 @@
};
pmu {
- compatible = "arm,cortex-a53-pmu",
- "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a53-pmu";
interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
index faa017d4cd56..636bab51de38 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
@@ -151,6 +151,7 @@
};
&qspi {
+ status = "okay";
flash@0 {
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
index 8f0bb3c44bd6..5d7724b3a612 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
@@ -266,6 +266,11 @@
};
};
+&hwrng {
+ clocks = <&clkc CLKID_RNG0>;
+ clock-names = "core";
+};
+
&i2c_A {
clocks = <&clkc CLKID_I2C>;
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
index bdf7c6c5983c..30fa9302a4dc 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
@@ -399,7 +399,7 @@
#size-cells = <1>;
compatible = "winbond,w25q16", "jedec,spi-nor";
reg = <0>;
- spi-max-frequency = <3000000>;
+ spi-max-frequency = <104000000>;
};
};
diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
index ea854f689fda..6bfb7bbd264a 100644
--- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
@@ -745,7 +745,7 @@
};
qspi: spi@66470200 {
- compatible = "brcm,spi-bcm-qspi", "brcm,spi-ns2-qspi";
+ compatible = "brcm,spi-ns2-qspi", "brcm,spi-bcm-qspi";
reg = <0x66470200 0x184>,
<0x66470000 0x124>,
<0x67017408 0x004>,
diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
index a1e3194b7483..d64f97d97c35 100644
--- a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
@@ -378,7 +378,7 @@
s2mps13-pmic@66 {
compatible = "samsung,s2mps13-pmic";
interrupt-parent = <&gpa0>;
- interrupts = <7 IRQ_TYPE_NONE>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
reg = <0x66>;
samsung,s2mps11-wrstbi-ground;
diff --git a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
index 00dd89b92b42..2ba62118ae90 100644
--- a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
+++ b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
@@ -85,7 +85,7 @@
s2mps15_pmic@66 {
compatible = "samsung,s2mps15-pmic";
reg = <0x66>;
- interrupts = <2 IRQ_TYPE_NONE>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
interrupt-parent = <&gpa0>;
pinctrl-names = "default";
pinctrl-0 = <&pmic_irq>;
@@ -152,6 +152,7 @@
regulator-min-microvolt = <700000>;
regulator-max-microvolt = <1150000>;
regulator-enable-ramp-delay = <125>;
+ regulator-always-on;
};
ldo8_reg: LDO8 {
diff --git a/arch/arm64/boot/dts/exynos/exynos7.dtsi b/arch/arm64/boot/dts/exynos/exynos7.dtsi
index 31b1a606cb66..5c5e57026c27 100644
--- a/arch/arm64/boot/dts/exynos/exynos7.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos7.dtsi
@@ -62,8 +62,10 @@
};
psci {
- compatible = "arm,psci-0.2";
+ compatible = "arm,psci";
method = "smc";
+ cpu_off = <0x84000002>;
+ cpu_on = <0xC4000003>;
};
soc: soc {
@@ -494,13 +496,6 @@
pmu_system_controller: system-controller@105c0000 {
compatible = "samsung,exynos7-pmu", "syscon";
reg = <0x105c0000 0x5000>;
-
- reboot: syscon-reboot {
- compatible = "syscon-reboot";
- regmap = <&pmu_system_controller>;
- offset = <0x0400>;
- mask = <0x1>;
- };
};
rtc: rtc@10590000 {
@@ -638,3 +633,4 @@
};
#include "exynos7-pinctrl.dtsi"
+#include "arm/exynos-syscon-restart.dtsi"
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
index 5da732f82fa0..712264e4e26e 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
@@ -177,6 +177,7 @@
ranges = <0x0 0x00 0x1700000 0x100000>;
reg = <0x00 0x1700000 0x0 0x100000>;
interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+ dma-coherent;
sec_jr0: jr@10000 {
compatible = "fsl,sec-v5.4-job-ring",
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index b9c0f2de8f12..3ec60e33f299 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -237,6 +237,7 @@
ranges = <0x0 0x00 0x1700000 0x100000>;
reg = <0x00 0x1700000 0x0 0x100000>;
interrupts = <0 75 0x4>;
+ dma-coherent;
sec_jr0: jr@10000 {
compatible = "fsl,sec-v5.4-job-ring",
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
index de6af453a6e1..3452971a8e1c 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
@@ -243,6 +243,7 @@
ranges = <0x0 0x00 0x1700000 0x100000>;
reg = <0x00 0x1700000 0x0 0x100000>;
interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+ dma-coherent;
sec_jr0: jr@10000 {
compatible = "fsl,sec-v5.4-job-ring",
@@ -303,7 +304,7 @@
dcfg: dcfg@1ee0000 {
compatible = "fsl,ls1046a-dcfg", "syscon";
- reg = <0x0 0x1ee0000 0x0 0x10000>;
+ reg = <0x0 0x1ee0000 0x0 0x1000>;
big-endian;
};
diff --git a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
index c98bcbc8dfba..53848e0e5e0c 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
@@ -530,6 +530,17 @@
status = "ok";
compatible = "adi,adv7533";
reg = <0x39>;
+ adi,dsi-lanes = <4>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+ port@1 {
+ reg = <1>;
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
index e80a792827ed..60568392d21e 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
@@ -515,7 +515,7 @@
reg = <0x39>;
interrupt-parent = <&gpio1>;
interrupts = <1 2>;
- pd-gpio = <&gpio0 4 0>;
+ pd-gpios = <&gpio0 4 0>;
adi,dsi-lanes = <4>;
#sound-dai-cells = <0>;
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
index 3ab25ad402b9..1a3e6e3b04eb 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
@@ -19,6 +19,16 @@
model = "Globalscale Marvell ESPRESSOBin Board";
compatible = "globalscale,espressobin", "marvell,armada3720", "marvell,armada3710";
+ aliases {
+ ethernet0 = &eth0;
+ /* for dsa slave device */
+ ethernet1 = &switch0port1;
+ ethernet2 = &switch0port2;
+ ethernet3 = &switch0port3;
+ serial0 = &uart0;
+ serial1 = &uart1;
+ };
+
chosen {
stdout-path = "serial0:115200n8";
};
@@ -130,25 +140,25 @@
#address-cells = <1>;
#size-cells = <0>;
- port@0 {
+ switch0port0: port@0 {
reg = <0>;
label = "cpu";
ethernet = <&eth0>;
};
- port@1 {
+ switch0port1: port@1 {
reg = <1>;
label = "wan";
phy-handle = <&switch0phy0>;
};
- port@2 {
+ switch0port2: port@2 {
reg = <2>;
label = "lan0";
phy-handle = <&switch0phy1>;
};
- port@3 {
+ switch0port3: port@3 {
reg = <3>;
label = "lan1";
phy-handle = <&switch0phy2>;
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
index d9531e242eb4..3a611250f598 100644
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
@@ -143,7 +143,8 @@
};
nb_periph_clk: nb-periph-clk@13000 {
- compatible = "marvell,armada-3700-periph-clock-nb";
+ compatible = "marvell,armada-3700-periph-clock-nb",
+ "syscon";
reg = <0x13000 0x100>;
clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
<&tbg 3>, <&xtalclk>;
diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
index de2c47bdbe64..2bcee994898a 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
@@ -641,6 +641,8 @@
clocks = <&pericfg CLK_PERI_MSDC30_1_PD>,
<&topckgen CLK_TOP_AXI_SEL>;
clock-names = "source", "hclk";
+ resets = <&pericfg MT7622_PERI_MSDC1_SW_RST>;
+ reset-names = "hrst";
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index abd2f15a544b..12837523dac8 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -223,21 +223,21 @@
cpu_on = <0x84000003>;
};
- clk26m: oscillator@0 {
+ clk26m: oscillator0 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <26000000>;
clock-output-names = "clk26m";
};
- clk32k: oscillator@1 {
+ clk32k: oscillator1 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <32000>;
clock-output-names = "clk32k";
};
- cpum_ck: oscillator@2 {
+ cpum_ck: oscillator2 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <0>;
@@ -253,19 +253,19 @@
sustainable-power = <1500>; /* milliwatts */
trips {
- threshold: trip-point@0 {
+ threshold: trip-point0 {
temperature = <68000>;
hysteresis = <2000>;
type = "passive";
};
- target: trip-point@1 {
+ target: trip-point1 {
temperature = <85000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_crit: cpu_crit@0 {
+ cpu_crit: cpu_crit0 {
temperature = <115000>;
hysteresis = <2000>;
type = "critical";
@@ -273,12 +273,12 @@
};
cooling-maps {
- map@0 {
+ map0 {
trip = <&target>;
cooling-device = <&cpu0 0 0>;
contribution = <3072>;
};
- map@1 {
+ map1 {
trip = <&target>;
cooling-device = <&cpu2 0 0>;
contribution = <1024>;
@@ -291,7 +291,7 @@
#address-cells = <2>;
#size-cells = <2>;
ranges;
- vpu_dma_reserved: vpu_dma_mem_region {
+ vpu_dma_reserved: vpu_dma_mem_region@b7000000 {
compatible = "shared-dma-pool";
reg = <0 0xb7000000 0 0x500000>;
alignment = <0x1000>;
@@ -343,7 +343,7 @@
reg = <0 0x10005000 0 0x1000>;
};
- pio: pinctrl@10005000 {
+ pio: pinctrl@1000b000 {
compatible = "mediatek,mt8173-pinctrl";
reg = <0 0x1000b000 0 0x1000>;
mediatek,pctl-regmap = <&syscfg_pctl_a>;
@@ -541,7 +541,7 @@
status = "disabled";
};
- gic: interrupt-controller@10220000 {
+ gic: interrupt-controller@10221000 {
compatible = "arm,gic-400";
#interrupt-cells = <3>;
interrupt-parent = <&gic>;
@@ -1111,7 +1111,7 @@
<&mmsys CLK_MM_DSI1_DIGITAL>,
<&mipi_tx1>;
clock-names = "engine", "digital", "hs";
- phy = <&mipi_tx1>;
+ phys = <&mipi_tx1>;
phy-names = "dphy";
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
index 57d3f00464ce..7352954e12be 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
@@ -32,7 +32,7 @@
phy-reset-gpios = <&gpio TEGRA194_MAIN_GPIO(G, 5) GPIO_ACTIVE_LOW>;
phy-handle = <&phy>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
mdio {
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
index 6597c0894137..4eecf3b02468 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
@@ -826,6 +826,7 @@
<&tegra_car 128>, /* hda2hdmi */
<&tegra_car 111>; /* hda2codec_2x */
reset-names = "hda", "hda2hdmi", "hda2codec_2x";
+ power-domains = <&pd_sor>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
index 390a2fa28514..6754817658fa 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
@@ -516,7 +516,7 @@
pins = "gpio63", "gpio64", "gpio65", "gpio66",
"gpio67", "gpio68";
drive-strength = <8>;
- bias-pull-none;
+ bias-disable;
};
};
cdc_pdm_lines_sus: pdm_lines_off {
@@ -529,7 +529,7 @@
pins = "gpio63", "gpio64", "gpio65", "gpio66",
"gpio67", "gpio68";
drive-strength = <2>;
- bias-disable;
+ bias-pull-down;
};
};
};
@@ -545,7 +545,7 @@
pins = "gpio113", "gpio114", "gpio115",
"gpio116";
drive-strength = <8>;
- bias-pull-none;
+ bias-disable;
};
};
@@ -573,7 +573,7 @@
pinconf {
pins = "gpio110";
drive-strength = <8>;
- bias-pull-none;
+ bias-disable;
};
};
@@ -599,7 +599,7 @@
pinconf {
pins = "gpio116";
drive-strength = <8>;
- bias-pull-none;
+ bias-disable;
};
};
ext_mclk_tlmm_lines_sus: mclk_lines_off {
@@ -627,7 +627,7 @@
pins = "gpio112", "gpio117", "gpio118",
"gpio119";
drive-strength = <8>;
- bias-pull-none;
+ bias-disable;
};
};
ext_sec_tlmm_lines_sus: tlmm_lines_off {
diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
index 8011e564a234..ba42c6239922 100644
--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
@@ -64,7 +64,7 @@
no-map;
};
- reserved@8668000 {
+ reserved@86680000 {
reg = <0x0 0x86680000 0x0 0x80000>;
no-map;
};
@@ -77,7 +77,7 @@
qcom,client-id = <1>;
};
- rfsa@867e00000 {
+ rfsa@867e0000 {
reg = <0x0 0x867e0000 0x0 0x20000>;
no-map;
};
@@ -877,7 +877,7 @@
reg-names = "mdp_phys";
interrupt-parent = <&mdss>;
- interrupts = <0 0>;
+ interrupts = <0>;
clocks = <&gcc GCC_MDSS_AHB_CLK>,
<&gcc GCC_MDSS_AXI_CLK>,
@@ -909,7 +909,7 @@
reg-names = "dsi_ctrl";
interrupt-parent = <&mdss>;
- interrupts = <4 0>;
+ interrupts = <4>;
assigned-clocks = <&gcc BYTE0_CLK_SRC>,
<&gcc PCLK0_CLK_SRC>;
diff --git a/arch/arm64/boot/dts/qcom/pm8916.dtsi b/arch/arm64/boot/dts/qcom/pm8916.dtsi
index 196b1c0ceb9b..b968afa8da17 100644
--- a/arch/arm64/boot/dts/qcom/pm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8916.dtsi
@@ -99,7 +99,7 @@
wcd_codec: codec@f000 {
compatible = "qcom,pm8916-wcd-analog-codec";
- reg = <0xf000 0x200>;
+ reg = <0xf000>;
reg-names = "pmic-codec-core";
clocks = <&gcc GCC_CODEC_DIGCODEC_CLK>;
clock-names = "mclk";
diff --git a/arch/arm64/boot/dts/renesas/r8a77980.dtsi b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
index b8c9a56562f2..da5453307d94 100644
--- a/arch/arm64/boot/dts/renesas/r8a77980.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
@@ -454,6 +454,7 @@
ipmmu_vip0: mmu@e7b00000 {
compatible = "renesas,ipmmu-r8a77980";
reg = <0 0xe7b00000 0 0x1000>;
+ renesas,ipmmu-main = <&ipmmu_mm 4>;
power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
#iommu-cells = <1>;
};
@@ -461,6 +462,7 @@
ipmmu_vip1: mmu@e7960000 {
compatible = "renesas,ipmmu-r8a77980";
reg = <0 0xe7960000 0 0x1000>;
+ renesas,ipmmu-main = <&ipmmu_mm 11>;
power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
#iommu-cells = <1>;
};
diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi
index 0ead552d7eae..600adc25eaef 100644
--- a/arch/arm64/boot/dts/renesas/ulcb.dtsi
+++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi
@@ -430,6 +430,7 @@
bus-width = <8>;
mmc-hs200-1_8v;
non-removable;
+ full-pwr-cycle-in-suspend;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-evb.dts b/arch/arm64/boot/dts/rockchip/rk3328-evb.dts
index 8302d86d35c4..d89f3451ace5 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-evb.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-evb.dts
@@ -86,13 +86,13 @@
assigned-clock-rate = <50000000>;
assigned-clocks = <&cru SCLK_MAC2PHY>;
assigned-clock-parents = <&cru SCLK_MAC2PHY_SRC>;
-
+ status = "okay";
};
&i2c1 {
status = "okay";
- rk805: rk805@18 {
+ rk805: pmic@18 {
compatible = "rockchip,rk805";
reg = <0x18>;
interrupt-parent = <&gpio2>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
index 91061d9cf78b..5b1ece4a68d6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
@@ -255,6 +255,7 @@
};
&usb20_otg {
+ dr_mode = "host";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
index e9147e35b739..03f2bc7d30d8 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
@@ -112,7 +112,7 @@
&i2c1 {
status = "okay";
- rk805: rk805@18 {
+ rk805: pmic@18 {
compatible = "rockchip,rk805";
reg = <0x18>;
interrupt-parent = <&gpio2>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index 92186edefeb9..6be7c67584ba 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -1085,8 +1085,8 @@
uart0 {
uart0_xfer: uart0-xfer {
- rockchip,pins = <1 RK_PB1 1 &pcfg_pull_up>,
- <1 RK_PB0 1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PB1 1 &pcfg_pull_none>,
+ <1 RK_PB0 1 &pcfg_pull_up>;
};
uart0_cts: uart0-cts {
@@ -1104,8 +1104,8 @@
uart1 {
uart1_xfer: uart1-xfer {
- rockchip,pins = <3 RK_PA4 4 &pcfg_pull_up>,
- <3 RK_PA6 4 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PA4 4 &pcfg_pull_none>,
+ <3 RK_PA6 4 &pcfg_pull_up>;
};
uart1_cts: uart1-cts {
@@ -1123,15 +1123,15 @@
uart2-0 {
uart2m0_xfer: uart2m0-xfer {
- rockchip,pins = <1 RK_PA0 2 &pcfg_pull_up>,
- <1 RK_PA1 2 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PA0 2 &pcfg_pull_none>,
+ <1 RK_PA1 2 &pcfg_pull_up>;
};
};
uart2-1 {
uart2m1_xfer: uart2m1-xfer {
- rockchip,pins = <2 RK_PA0 1 &pcfg_pull_up>,
- <2 RK_PA1 1 &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PA0 1 &pcfg_pull_none>,
+ <2 RK_PA1 1 &pcfg_pull_up>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi b/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi
index 1315972412df..23098c13ad83 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi
@@ -159,7 +159,7 @@
pinctrl-0 = <&rgmii_pins>;
snps,reset-active-low;
snps,reset-delays-us = <0 10000 50000>;
- snps,reset-gpio = <&gpio3 RK_PB3 GPIO_ACTIVE_HIGH>;
+ snps,reset-gpio = <&gpio3 RK_PB3 GPIO_ACTIVE_LOW>;
tx_delay = <0x10>;
rx_delay = <0x10>;
status = "okay";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
index 0130b9f98c9d..b155f657292b 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
@@ -101,7 +101,7 @@
vcc5v0_host: vcc5v0-host-regulator {
compatible = "regulator-fixed";
- gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_HIGH>;
+ gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_LOW>;
enable-active-low;
pinctrl-names = "default";
pinctrl-0 = <&vcc5v0_host_en>;
@@ -156,7 +156,7 @@
phy-mode = "rgmii";
pinctrl-names = "default";
pinctrl-0 = <&rgmii_pins>;
- snps,reset-gpio = <&gpio3 RK_PC0 GPIO_ACTIVE_HIGH>;
+ snps,reset-gpio = <&gpio3 RK_PC0 GPIO_ACTIVE_LOW>;
snps,reset-active-low;
snps,reset-delays-us = <0 10000 50000>;
tx_delay = <0x10>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index cea44a7c7cf9..b1c1a88a1c20 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -29,6 +29,9 @@
i2c6 = &i2c6;
i2c7 = &i2c7;
i2c8 = &i2c8;
+ mmc0 = &sdio0;
+ mmc1 = &sdmmc;
+ mmc2 = &sdhci;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
@@ -195,6 +198,7 @@
reg = <0x0 0xf8000000 0x0 0x2000000>,
<0x0 0xfd000000 0x0 0x1000000>;
reg-names = "axi-base", "apb-base";
+ device_type = "pci";
#address-cells = <3>;
#size-cells = <2>;
#interrupt-cells = <1>;
@@ -213,7 +217,6 @@
<0 0 0 2 &pcie0_intc 1>,
<0 0 0 3 &pcie0_intc 2>,
<0 0 0 4 &pcie0_intc 3>;
- linux,pci-domain = <0>;
max-link-speed = <1>;
msi-map = <0x0 &its 0x0 0x1000>;
phys = <&pcie_phy 0>, <&pcie_phy 1>,
@@ -376,7 +379,7 @@
reset-names = "usb3-otg";
status = "disabled";
- usbdrd_dwc3_0: dwc3 {
+ usbdrd_dwc3_0: usb@fe800000 {
compatible = "snps,dwc3";
reg = <0x0 0xfe800000 0x0 0x100000>;
interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH 0>;
@@ -409,7 +412,7 @@
reset-names = "usb3-otg";
status = "disabled";
- usbdrd_dwc3_1: dwc3 {
+ usbdrd_dwc3_1: usb@fe900000 {
compatible = "snps,dwc3";
reg = <0x0 0xfe900000 0x0 0x100000>;
interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH 0>;
@@ -1817,10 +1820,10 @@
gpu: gpu@ff9a0000 {
compatible = "rockchip,rk3399-mali", "arm,mali-t860";
reg = <0x0 0xff9a0000 0x0 0x10000>;
- interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH 0>;
- interrupt-names = "gpu", "job", "mmu";
+ interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "job", "mmu", "gpu";
clocks = <&cru ACLK_GPU>;
power-domains = <&power RK3399_PD_GPU>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
index caf112629caa..62429c412b33 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
@@ -610,7 +610,7 @@
clocks = <&sys_clk 6>;
reset-names = "ether";
resets = <&sys_rst 6>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
local-mac-address = [00 00 00 00 00 00];
socionext,syscon-phy-mode = <&soc_glue 0>;
diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
index 2a4cf427f5d3..8fe9a57b9562 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
@@ -416,7 +416,7 @@
clocks = <&sys_clk 6>;
reset-names = "ether";
resets = <&sys_rst 6>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
local-mac-address = [00 00 00 00 00 00];
socionext,syscon-phy-mode = <&soc_glue 0>;
@@ -437,7 +437,7 @@
clocks = <&sys_clk 7>;
reset-names = "ether";
resets = <&sys_rst 7>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
local-mac-address = [00 00 00 00 00 00];
socionext,syscon-phy-mode = <&soc_glue 1>;
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index a516c0e01429..8a885ae647b7 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -411,7 +411,7 @@
};
i2c0: i2c@ff020000 {
- compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
+ compatible = "cdns,i2c-r1p14";
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 17 4>;
@@ -421,7 +421,7 @@
};
i2c1: i2c@ff030000 {
- compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
+ compatible = "cdns,i2c-r1p14";
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 18 4>;
diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
index 483a7130cf0e..496c243de4ac 100644
--- a/arch/arm64/crypto/aes-modes.S
+++ b/arch/arm64/crypto/aes-modes.S
@@ -232,17 +232,19 @@ AES_ENTRY(aes_ctr_encrypt)
bmi .Lctr1x
cmn w6, #4 /* 32 bit overflow? */
bcs .Lctr1x
- ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */
- dup v7.4s, w6
+ add w7, w6, #1
mov v0.16b, v4.16b
- add v7.4s, v7.4s, v8.4s
+ add w8, w6, #2
mov v1.16b, v4.16b
- rev32 v8.16b, v7.16b
+ add w9, w6, #3
mov v2.16b, v4.16b
+ rev w7, w7
mov v3.16b, v4.16b
- mov v1.s[3], v8.s[0]
- mov v2.s[3], v8.s[1]
- mov v3.s[3], v8.s[2]
+ rev w8, w8
+ mov v1.s[3], w7
+ rev w9, w9
+ mov v2.s[3], w8
+ mov v3.s[3], w9
ld1 {v5.16b-v7.16b}, [x20], #48 /* get 3 input blocks */
bl aes_encrypt_block4x
eor v0.16b, v5.16b, v0.16b
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index d8c521c757e8..3f51743c01d8 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -21,6 +21,7 @@
MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("sha1");
struct sha1_ce_state {
struct sha1_state sst;
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index c47d1a28ff6b..4022c51a377b 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -21,6 +21,8 @@
MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("sha224");
+MODULE_ALIAS_CRYPTO("sha256");
struct sha256_ce_state {
struct sha256_state sst;
diff --git a/arch/arm64/crypto/sha3-ce-glue.c b/arch/arm64/crypto/sha3-ce-glue.c
index a336feac0f59..df20ab645487 100644
--- a/arch/arm64/crypto/sha3-ce-glue.c
+++ b/arch/arm64/crypto/sha3-ce-glue.c
@@ -22,6 +22,10 @@
MODULE_DESCRIPTION("SHA3 secure hash using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("sha3-224");
+MODULE_ALIAS_CRYPTO("sha3-256");
+MODULE_ALIAS_CRYPTO("sha3-384");
+MODULE_ALIAS_CRYPTO("sha3-512");
asmlinkage void sha3_ce_transform(u64 *st, const u8 *data, int blocks,
int md_len);
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
index f2c5f28c622a..2871e68ae3df 100644
--- a/arch/arm64/crypto/sha512-ce-glue.c
+++ b/arch/arm64/crypto/sha512-ce-glue.c
@@ -22,6 +22,8 @@
MODULE_DESCRIPTION("SHA-384/SHA-512 secure hash using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("sha384");
+MODULE_ALIAS_CRYPTO("sha512");
asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
int blocks);
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index 1a7ba3de7079..4fbbcdda70d7 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -73,13 +73,13 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(feature) \
".popsection\n" \
- ".pushsection .altinstr_replacement, \"a\"\n" \
+ ".subsection 1\n" \
"663:\n\t" \
newinstr "\n" \
"664:\n\t" \
- ".popsection\n\t" \
".org . - (664b-663b) + (662b-661b)\n\t" \
- ".org . - (662b-661b) + (664b-663b)\n" \
+ ".org . - (662b-661b) + (664b-663b)\n\t" \
+ ".previous\n" \
".endif\n"
#define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \
@@ -117,11 +117,11 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
662: .pushsection .altinstructions, "a"
altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f
.popsection
- .pushsection .altinstr_replacement, "ax"
+ .subsection 1
663: \insn2
-664: .popsection
- .org . - (664b-663b) + (662b-661b)
+664: .org . - (664b-663b) + (662b-661b)
.org . - (662b-661b) + (664b-663b)
+ .previous
.endif
.endm
@@ -160,7 +160,7 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
.pushsection .altinstructions, "a"
altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f
.popsection
- .pushsection .altinstr_replacement, "ax"
+ .subsection 1
.align 2 /* So GAS knows label 661 is suitably aligned */
661:
.endm
@@ -179,9 +179,9 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
.macro alternative_else
662:
.if .Lasm_alt_mode==0
- .pushsection .altinstr_replacement, "ax"
+ .subsection 1
.else
- .popsection
+ .previous
.endif
663:
.endm
@@ -191,11 +191,11 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
*/
.macro alternative_endif
664:
- .if .Lasm_alt_mode==0
- .popsection
- .endif
.org . - (664b-663b) + (662b-661b)
.org . - (662b-661b) + (664b-663b)
+ .if .Lasm_alt_mode==0
+ .previous
+ .endif
.endm
/*
diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
index f5a2d09afb38..1cc42441bc67 100644
--- a/arch/arm64/include/asm/atomic_ll_sc.h
+++ b/arch/arm64/include/asm/atomic_ll_sc.h
@@ -37,7 +37,7 @@
* (the optimize attribute silently ignores these options).
*/
-#define ATOMIC_OP(op, asm_op) \
+#define ATOMIC_OP(op, asm_op, constraint) \
__LL_SC_INLINE void \
__LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
{ \
@@ -51,11 +51,11 @@ __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
" stxr %w1, %w0, %2\n" \
" cbnz %w1, 1b" \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
- : "Ir" (i)); \
+ : #constraint "r" (i)); \
} \
__LL_SC_EXPORT(atomic_##op);
-#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
+#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
__LL_SC_INLINE int \
__LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \
{ \
@@ -70,14 +70,14 @@ __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \
" cbnz %w1, 1b\n" \
" " #mb \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
- : "Ir" (i) \
+ : #constraint "r" (i) \
: cl); \
\
return result; \
} \
__LL_SC_EXPORT(atomic_##op##_return##name);
-#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
+#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \
__LL_SC_INLINE int \
__LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \
{ \
@@ -92,7 +92,7 @@ __LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \
" cbnz %w2, 1b\n" \
" " #mb \
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
- : "Ir" (i) \
+ : #constraint "r" (i) \
: cl); \
\
return result; \
@@ -110,8 +110,8 @@ __LL_SC_EXPORT(atomic_fetch_##op##name);
ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
-ATOMIC_OPS(add, add)
-ATOMIC_OPS(sub, sub)
+ATOMIC_OPS(add, add, I)
+ATOMIC_OPS(sub, sub, J)
#undef ATOMIC_OPS
#define ATOMIC_OPS(...) \
@@ -121,17 +121,17 @@ ATOMIC_OPS(sub, sub)
ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
-ATOMIC_OPS(and, and)
-ATOMIC_OPS(andnot, bic)
-ATOMIC_OPS(or, orr)
-ATOMIC_OPS(xor, eor)
+ATOMIC_OPS(and, and, )
+ATOMIC_OPS(andnot, bic, )
+ATOMIC_OPS(or, orr, )
+ATOMIC_OPS(xor, eor, )
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define ATOMIC64_OP(op, asm_op) \
+#define ATOMIC64_OP(op, asm_op, constraint) \
__LL_SC_INLINE void \
__LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
{ \
@@ -145,11 +145,11 @@ __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
" stxr %w1, %0, %2\n" \
" cbnz %w1, 1b" \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
- : "Ir" (i)); \
+ : #constraint "r" (i)); \
} \
__LL_SC_EXPORT(atomic64_##op);
-#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
+#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
__LL_SC_INLINE long \
__LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \
{ \
@@ -164,14 +164,14 @@ __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \
" cbnz %w1, 1b\n" \
" " #mb \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
- : "Ir" (i) \
+ : #constraint "r" (i) \
: cl); \
\
return result; \
} \
__LL_SC_EXPORT(atomic64_##op##_return##name);
-#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
+#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\
__LL_SC_INLINE long \
__LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \
{ \
@@ -186,7 +186,7 @@ __LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \
" cbnz %w2, 1b\n" \
" " #mb \
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
- : "Ir" (i) \
+ : #constraint "r" (i) \
: cl); \
\
return result; \
@@ -204,8 +204,8 @@ __LL_SC_EXPORT(atomic64_fetch_##op##name);
ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
-ATOMIC64_OPS(add, add)
-ATOMIC64_OPS(sub, sub)
+ATOMIC64_OPS(add, add, I)
+ATOMIC64_OPS(sub, sub, J)
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(...) \
@@ -215,10 +215,10 @@ ATOMIC64_OPS(sub, sub)
ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
-ATOMIC64_OPS(and, and)
-ATOMIC64_OPS(andnot, bic)
-ATOMIC64_OPS(or, orr)
-ATOMIC64_OPS(xor, eor)
+ATOMIC64_OPS(and, and, L)
+ATOMIC64_OPS(andnot, bic, )
+ATOMIC64_OPS(or, orr, L)
+ATOMIC64_OPS(xor, eor, L)
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
@@ -248,48 +248,54 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
}
__LL_SC_EXPORT(atomic64_dec_if_positive);
-#define __CMPXCHG_CASE(w, sz, name, mb, acq, rel, cl) \
-__LL_SC_INLINE unsigned long \
-__LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \
- unsigned long old, \
- unsigned long new)) \
+#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint) \
+__LL_SC_INLINE u##sz \
+__LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \
+ unsigned long old, \
+ u##sz new)) \
{ \
- unsigned long tmp, oldval; \
+ unsigned long tmp; \
+ u##sz oldval; \
\
asm volatile( \
" prfm pstl1strm, %[v]\n" \
- "1: ld" #acq "xr" #sz "\t%" #w "[oldval], %[v]\n" \
+ "1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \
" eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
" cbnz %" #w "[tmp], 2f\n" \
- " st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \
+ " st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \
" cbnz %w[tmp], 1b\n" \
" " #mb "\n" \
"2:" \
: [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
- [v] "+Q" (*(unsigned long *)ptr) \
- : [old] "Lr" (old), [new] "r" (new) \
+ [v] "+Q" (*(u##sz *)ptr) \
+ : [old] #constraint "r" (old), [new] "r" (new) \
: cl); \
\
return oldval; \
} \
-__LL_SC_EXPORT(__cmpxchg_case_##name);
+__LL_SC_EXPORT(__cmpxchg_case_##name##sz);
-__CMPXCHG_CASE(w, b, 1, , , , )
-__CMPXCHG_CASE(w, h, 2, , , , )
-__CMPXCHG_CASE(w, , 4, , , , )
-__CMPXCHG_CASE( , , 8, , , , )
-__CMPXCHG_CASE(w, b, acq_1, , a, , "memory")
-__CMPXCHG_CASE(w, h, acq_2, , a, , "memory")
-__CMPXCHG_CASE(w, , acq_4, , a, , "memory")
-__CMPXCHG_CASE( , , acq_8, , a, , "memory")
-__CMPXCHG_CASE(w, b, rel_1, , , l, "memory")
-__CMPXCHG_CASE(w, h, rel_2, , , l, "memory")
-__CMPXCHG_CASE(w, , rel_4, , , l, "memory")
-__CMPXCHG_CASE( , , rel_8, , , l, "memory")
-__CMPXCHG_CASE(w, b, mb_1, dmb ish, , l, "memory")
-__CMPXCHG_CASE(w, h, mb_2, dmb ish, , l, "memory")
-__CMPXCHG_CASE(w, , mb_4, dmb ish, , l, "memory")
-__CMPXCHG_CASE( , , mb_8, dmb ish, , l, "memory")
+/*
+ * Earlier versions of GCC (no later than 8.1.0) appear to incorrectly
+ * handle the 'K' constraint for the value 4294967295 - thus we use no
+ * constraint for 32 bit operations.
+ */
+__CMPXCHG_CASE(w, b, , 8, , , , , )
+__CMPXCHG_CASE(w, h, , 16, , , , , )
+__CMPXCHG_CASE(w, , , 32, , , , , )
+__CMPXCHG_CASE( , , , 64, , , , , L)
+__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory", )
+__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory", )
+__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory", )
+__CMPXCHG_CASE( , , acq_, 64, , a, , "memory", L)
+__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory", )
+__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory", )
+__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory", )
+__CMPXCHG_CASE( , , rel_, 64, , , l, "memory", L)
+__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory", )
+__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory", )
+__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory", )
+__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L)
#undef __CMPXCHG_CASE
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index f9b0b09153e0..80cadc789f1a 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -32,7 +32,9 @@ static inline void atomic_##op(int i, atomic_t *v) \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op), \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op), \
" " #asm_op " %w[i], %[v]\n") \
: [i] "+r" (w0), [v] "+Q" (v->counter) \
: "r" (x1) \
@@ -52,7 +54,9 @@ static inline int atomic_fetch_##op##name(int i, atomic_t *v) \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC(fetch_##op##name), \
/* LSE atomics */ \
@@ -84,7 +88,9 @@ static inline int atomic_add_return##name(int i, atomic_t *v) \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC(add_return##name) \
__nops(1), \
@@ -110,7 +116,9 @@ static inline void atomic_and(int i, atomic_t *v)
register int w0 asm ("w0") = i;
register atomic_t *x1 asm ("x1") = v;
- asm volatile(ARM64_LSE_ATOMIC_INSN(
+ asm volatile(
+ __LSE_PREAMBLE
+ ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
__LL_SC_ATOMIC(and)
__nops(1),
@@ -128,7 +136,9 @@ static inline int atomic_fetch_and##name(int i, atomic_t *v) \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC(fetch_and##name) \
__nops(1), \
@@ -154,7 +164,9 @@ static inline void atomic_sub(int i, atomic_t *v)
register int w0 asm ("w0") = i;
register atomic_t *x1 asm ("x1") = v;
- asm volatile(ARM64_LSE_ATOMIC_INSN(
+ asm volatile(
+ __LSE_PREAMBLE
+ ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
__LL_SC_ATOMIC(sub)
__nops(1),
@@ -172,7 +184,9 @@ static inline int atomic_sub_return##name(int i, atomic_t *v) \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC(sub_return##name) \
__nops(2), \
@@ -200,7 +214,9 @@ static inline int atomic_fetch_sub##name(int i, atomic_t *v) \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC(fetch_sub##name) \
__nops(1), \
@@ -229,7 +245,9 @@ static inline void atomic64_##op(long i, atomic64_t *v) \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op), \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op), \
" " #asm_op " %[i], %[v]\n") \
: [i] "+r" (x0), [v] "+Q" (v->counter) \
: "r" (x1) \
@@ -249,7 +267,9 @@ static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC64(fetch_##op##name), \
/* LSE atomics */ \
@@ -281,7 +301,9 @@ static inline long atomic64_add_return##name(long i, atomic64_t *v) \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC64(add_return##name) \
__nops(1), \
@@ -307,7 +329,9 @@ static inline void atomic64_and(long i, atomic64_t *v)
register long x0 asm ("x0") = i;
register atomic64_t *x1 asm ("x1") = v;
- asm volatile(ARM64_LSE_ATOMIC_INSN(
+ asm volatile(
+ __LSE_PREAMBLE
+ ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
__LL_SC_ATOMIC64(and)
__nops(1),
@@ -325,7 +349,9 @@ static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC64(fetch_and##name) \
__nops(1), \
@@ -351,7 +377,9 @@ static inline void atomic64_sub(long i, atomic64_t *v)
register long x0 asm ("x0") = i;
register atomic64_t *x1 asm ("x1") = v;
- asm volatile(ARM64_LSE_ATOMIC_INSN(
+ asm volatile(
+ __LSE_PREAMBLE
+ ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
__LL_SC_ATOMIC64(sub)
__nops(1),
@@ -369,7 +397,9 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC64(sub_return##name) \
__nops(2), \
@@ -397,7 +427,9 @@ static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC64(fetch_sub##name) \
__nops(1), \
@@ -422,7 +454,9 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
{
register long x0 asm ("x0") = (long)v;
- asm volatile(ARM64_LSE_ATOMIC_INSN(
+ asm volatile(
+ __LSE_PREAMBLE
+ ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
__LL_SC_ATOMIC64(dec_if_positive)
__nops(6),
@@ -446,22 +480,24 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
#define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op)
-#define __CMPXCHG_CASE(w, sz, name, mb, cl...) \
-static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
- unsigned long old, \
- unsigned long new) \
+#define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...) \
+static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr, \
+ unsigned long old, \
+ u##sz new) \
{ \
register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
register unsigned long x1 asm ("x1") = old; \
- register unsigned long x2 asm ("x2") = new; \
+ register u##sz x2 asm ("x2") = new; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
- __LL_SC_CMPXCHG(name) \
+ __LL_SC_CMPXCHG(name##sz) \
__nops(2), \
/* LSE atomics */ \
" mov " #w "30, %" #w "[old]\n" \
- " cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n" \
+ " cas" #mb #sfx "\t" #w "30, %" #w "[new], %[v]\n" \
" mov %" #w "[ret], " #w "30") \
: [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \
: [old] "r" (x1), [new] "r" (x2) \
@@ -470,22 +506,22 @@ static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
return x0; \
}
-__CMPXCHG_CASE(w, b, 1, )
-__CMPXCHG_CASE(w, h, 2, )
-__CMPXCHG_CASE(w, , 4, )
-__CMPXCHG_CASE(x, , 8, )
-__CMPXCHG_CASE(w, b, acq_1, a, "memory")
-__CMPXCHG_CASE(w, h, acq_2, a, "memory")
-__CMPXCHG_CASE(w, , acq_4, a, "memory")
-__CMPXCHG_CASE(x, , acq_8, a, "memory")
-__CMPXCHG_CASE(w, b, rel_1, l, "memory")
-__CMPXCHG_CASE(w, h, rel_2, l, "memory")
-__CMPXCHG_CASE(w, , rel_4, l, "memory")
-__CMPXCHG_CASE(x, , rel_8, l, "memory")
-__CMPXCHG_CASE(w, b, mb_1, al, "memory")
-__CMPXCHG_CASE(w, h, mb_2, al, "memory")
-__CMPXCHG_CASE(w, , mb_4, al, "memory")
-__CMPXCHG_CASE(x, , mb_8, al, "memory")
+__CMPXCHG_CASE(w, b, , 8, )
+__CMPXCHG_CASE(w, h, , 16, )
+__CMPXCHG_CASE(w, , , 32, )
+__CMPXCHG_CASE(x, , , 64, )
+__CMPXCHG_CASE(w, b, acq_, 8, a, "memory")
+__CMPXCHG_CASE(w, h, acq_, 16, a, "memory")
+__CMPXCHG_CASE(w, , acq_, 32, a, "memory")
+__CMPXCHG_CASE(x, , acq_, 64, a, "memory")
+__CMPXCHG_CASE(w, b, rel_, 8, l, "memory")
+__CMPXCHG_CASE(w, h, rel_, 16, l, "memory")
+__CMPXCHG_CASE(w, , rel_, 32, l, "memory")
+__CMPXCHG_CASE(x, , rel_, 64, l, "memory")
+__CMPXCHG_CASE(w, b, mb_, 8, al, "memory")
+__CMPXCHG_CASE(w, h, mb_, 16, al, "memory")
+__CMPXCHG_CASE(w, , mb_, 32, al, "memory")
+__CMPXCHG_CASE(x, , mb_, 64, al, "memory")
#undef __LL_SC_CMPXCHG
#undef __CMPXCHG_CASE
@@ -507,7 +543,9 @@ static inline long __cmpxchg_double##name(unsigned long old1, \
register unsigned long x3 asm ("x3") = new2; \
register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_CMPXCHG_DBL(name) \
__nops(3), \
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 5ee5bca8c24b..baa684782358 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -22,6 +22,7 @@
#define CTR_L1IP_MASK 3
#define CTR_DMINLINE_SHIFT 16
#define CTR_IMINLINE_SHIFT 0
+#define CTR_IMINLINE_MASK 0xf
#define CTR_ERG_SHIFT 20
#define CTR_CWG_SHIFT 24
#define CTR_CWG_MASK 15
@@ -29,7 +30,7 @@
#define CTR_DIC_SHIFT 29
#define CTR_CACHE_MINLINE_MASK \
- (0xf << CTR_DMINLINE_SHIFT | 0xf << CTR_IMINLINE_SHIFT)
+ (0xf << CTR_DMINLINE_SHIFT | CTR_IMINLINE_MASK << CTR_IMINLINE_SHIFT)
#define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 19844211a4e6..a449a1c602d3 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -90,7 +90,7 @@ static inline void flush_icache_range(unsigned long start, unsigned long end)
* IPI all online CPUs so that they undergo a context synchronization
* event and are forced to refetch the new instructions.
*/
-#ifdef CONFIG_KGDB
+
/*
* KGDB performs cache maintenance with interrupts disabled, so we
* will deadlock trying to IPI the secondary CPUs. In theory, we can
@@ -100,9 +100,9 @@ static inline void flush_icache_range(unsigned long start, unsigned long end)
* the patching operation, so we don't need extra IPIs here anyway.
* In which case, add a KGDB-specific bodge and return early.
*/
- if (kgdb_connected && irqs_disabled())
+ if (in_dbg_master())
return;
-#endif
+
kick_all_cpus_sync();
}
diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h
index 0b6f5a7d4027..fd11e0d70e44 100644
--- a/arch/arm64/include/asm/checksum.h
+++ b/arch/arm64/include/asm/checksum.h
@@ -30,16 +30,17 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
__uint128_t tmp;
u64 sum;
+ int n = ihl; /* we want it signed */
tmp = *(const __uint128_t *)iph;
iph += 16;
- ihl -= 4;
+ n -= 4;
tmp += ((tmp >> 64) | (tmp << 64));
sum = tmp >> 64;
do {
sum += *(const u32 *)iph;
iph += 4;
- } while (--ihl);
+ } while (--n > 0);
sum += ((sum >> 32) | (sum << 32));
return csum_fold((__force u32)(sum >> 32));
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index d8b01c7c9cd3..94ccb3bfbd61 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -30,46 +30,46 @@
* barrier case is generated as release+dmb for the former and
* acquire+release for the latter.
*/
-#define __XCHG_CASE(w, sz, name, mb, nop_lse, acq, acq_lse, rel, cl) \
-static inline unsigned long __xchg_case_##name(unsigned long x, \
- volatile void *ptr) \
-{ \
- unsigned long ret, tmp; \
- \
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
- /* LL/SC */ \
- " prfm pstl1strm, %2\n" \
- "1: ld" #acq "xr" #sz "\t%" #w "0, %2\n" \
- " st" #rel "xr" #sz "\t%w1, %" #w "3, %2\n" \
- " cbnz %w1, 1b\n" \
- " " #mb, \
- /* LSE atomics */ \
- " swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n" \
- __nops(3) \
- " " #nop_lse) \
- : "=&r" (ret), "=&r" (tmp), "+Q" (*(unsigned long *)ptr) \
- : "r" (x) \
- : cl); \
- \
- return ret; \
+#define __XCHG_CASE(w, sfx, name, sz, mb, nop_lse, acq, acq_lse, rel, cl) \
+static inline u##sz __xchg_case_##name##sz(u##sz x, volatile void *ptr) \
+{ \
+ u##sz ret; \
+ unsigned long tmp; \
+ \
+ asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ /* LL/SC */ \
+ " prfm pstl1strm, %2\n" \
+ "1: ld" #acq "xr" #sfx "\t%" #w "0, %2\n" \
+ " st" #rel "xr" #sfx "\t%w1, %" #w "3, %2\n" \
+ " cbnz %w1, 1b\n" \
+ " " #mb, \
+ /* LSE atomics */ \
+ " swp" #acq_lse #rel #sfx "\t%" #w "3, %" #w "0, %2\n" \
+ __nops(3) \
+ " " #nop_lse) \
+ : "=&r" (ret), "=&r" (tmp), "+Q" (*(u##sz *)ptr) \
+ : "r" (x) \
+ : cl); \
+ \
+ return ret; \
}
-__XCHG_CASE(w, b, 1, , , , , , )
-__XCHG_CASE(w, h, 2, , , , , , )
-__XCHG_CASE(w, , 4, , , , , , )
-__XCHG_CASE( , , 8, , , , , , )
-__XCHG_CASE(w, b, acq_1, , , a, a, , "memory")
-__XCHG_CASE(w, h, acq_2, , , a, a, , "memory")
-__XCHG_CASE(w, , acq_4, , , a, a, , "memory")
-__XCHG_CASE( , , acq_8, , , a, a, , "memory")
-__XCHG_CASE(w, b, rel_1, , , , , l, "memory")
-__XCHG_CASE(w, h, rel_2, , , , , l, "memory")
-__XCHG_CASE(w, , rel_4, , , , , l, "memory")
-__XCHG_CASE( , , rel_8, , , , , l, "memory")
-__XCHG_CASE(w, b, mb_1, dmb ish, nop, , a, l, "memory")
-__XCHG_CASE(w, h, mb_2, dmb ish, nop, , a, l, "memory")
-__XCHG_CASE(w, , mb_4, dmb ish, nop, , a, l, "memory")
-__XCHG_CASE( , , mb_8, dmb ish, nop, , a, l, "memory")
+__XCHG_CASE(w, b, , 8, , , , , , )
+__XCHG_CASE(w, h, , 16, , , , , , )
+__XCHG_CASE(w, , , 32, , , , , , )
+__XCHG_CASE( , , , 64, , , , , , )
+__XCHG_CASE(w, b, acq_, 8, , , a, a, , "memory")
+__XCHG_CASE(w, h, acq_, 16, , , a, a, , "memory")
+__XCHG_CASE(w, , acq_, 32, , , a, a, , "memory")
+__XCHG_CASE( , , acq_, 64, , , a, a, , "memory")
+__XCHG_CASE(w, b, rel_, 8, , , , , l, "memory")
+__XCHG_CASE(w, h, rel_, 16, , , , , l, "memory")
+__XCHG_CASE(w, , rel_, 32, , , , , l, "memory")
+__XCHG_CASE( , , rel_, 64, , , , , l, "memory")
+__XCHG_CASE(w, b, mb_, 8, dmb ish, nop, , a, l, "memory")
+__XCHG_CASE(w, h, mb_, 16, dmb ish, nop, , a, l, "memory")
+__XCHG_CASE(w, , mb_, 32, dmb ish, nop, , a, l, "memory")
+__XCHG_CASE( , , mb_, 64, dmb ish, nop, , a, l, "memory")
#undef __XCHG_CASE
@@ -80,13 +80,13 @@ static __always_inline unsigned long __xchg##sfx(unsigned long x, \
{ \
switch (size) { \
case 1: \
- return __xchg_case##sfx##_1(x, ptr); \
+ return __xchg_case##sfx##_8(x, ptr); \
case 2: \
- return __xchg_case##sfx##_2(x, ptr); \
+ return __xchg_case##sfx##_16(x, ptr); \
case 4: \
- return __xchg_case##sfx##_4(x, ptr); \
+ return __xchg_case##sfx##_32(x, ptr); \
case 8: \
- return __xchg_case##sfx##_8(x, ptr); \
+ return __xchg_case##sfx##_64(x, ptr); \
default: \
BUILD_BUG(); \
} \
@@ -123,13 +123,13 @@ static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
{ \
switch (size) { \
case 1: \
- return __cmpxchg_case##sfx##_1(ptr, (u8)old, new); \
+ return __cmpxchg_case##sfx##_8(ptr, (u8)old, new); \
case 2: \
- return __cmpxchg_case##sfx##_2(ptr, (u16)old, new); \
+ return __cmpxchg_case##sfx##_16(ptr, (u16)old, new); \
case 4: \
- return __cmpxchg_case##sfx##_4(ptr, old, new); \
+ return __cmpxchg_case##sfx##_32(ptr, old, new); \
case 8: \
- return __cmpxchg_case##sfx##_8(ptr, old, new); \
+ return __cmpxchg_case##sfx##_64(ptr, old, new); \
default: \
BUILD_BUG(); \
} \
@@ -197,16 +197,16 @@ __CMPXCHG_GEN(_mb)
__ret; \
})
-#define __CMPWAIT_CASE(w, sz, name) \
-static inline void __cmpwait_case_##name(volatile void *ptr, \
- unsigned long val) \
+#define __CMPWAIT_CASE(w, sfx, sz) \
+static inline void __cmpwait_case_##sz(volatile void *ptr, \
+ unsigned long val) \
{ \
unsigned long tmp; \
\
asm volatile( \
" sevl\n" \
" wfe\n" \
- " ldxr" #sz "\t%" #w "[tmp], %[v]\n" \
+ " ldxr" #sfx "\t%" #w "[tmp], %[v]\n" \
" eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \
" cbnz %" #w "[tmp], 1f\n" \
" wfe\n" \
@@ -215,10 +215,10 @@ static inline void __cmpwait_case_##name(volatile void *ptr, \
: [val] "r" (val)); \
}
-__CMPWAIT_CASE(w, b, 1);
-__CMPWAIT_CASE(w, h, 2);
-__CMPWAIT_CASE(w, , 4);
-__CMPWAIT_CASE( , , 8);
+__CMPWAIT_CASE(w, b, 8);
+__CMPWAIT_CASE(w, h, 16);
+__CMPWAIT_CASE(w, , 32);
+__CMPWAIT_CASE( , , 64);
#undef __CMPWAIT_CASE
@@ -229,13 +229,13 @@ static __always_inline void __cmpwait##sfx(volatile void *ptr, \
{ \
switch (size) { \
case 1: \
- return __cmpwait_case##sfx##_1(ptr, (u8)val); \
+ return __cmpwait_case##sfx##_8(ptr, (u8)val); \
case 2: \
- return __cmpwait_case##sfx##_2(ptr, (u16)val); \
+ return __cmpwait_case##sfx##_16(ptr, (u16)val); \
case 4: \
- return __cmpwait_case##sfx##_4(ptr, val); \
+ return __cmpwait_case##sfx##_32(ptr, val); \
case 8: \
- return __cmpwait_case##sfx##_8(ptr, val); \
+ return __cmpwait_case##sfx##_64(ptr, val); \
default: \
BUILD_BUG(); \
} \
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index c3de0bbf0e9a..df8fe8ecc37e 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -53,7 +53,8 @@
#define ARM64_HAS_STAGE2_FWB 32
#define ARM64_WORKAROUND_1463225 33
#define ARM64_SSBS 34
+#define ARM64_WORKAROUND_1542419 35
-#define ARM64_NCAPS 35
+#define ARM64_NCAPS 36
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index fa770c070fdd..3cd936b1c79c 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -80,6 +80,7 @@
#define ARM_CPU_PART_CORTEX_A35 0xD04
#define ARM_CPU_PART_CORTEX_A55 0xD05
#define ARM_CPU_PART_CORTEX_A76 0xD0B
+#define ARM_CPU_PART_NEOVERSE_N1 0xD0C
#define APM_CPU_PART_POTENZA 0x000
@@ -107,6 +108,7 @@
#define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35)
#define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
+#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index a44cf5225429..41b065f1be88 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -119,6 +119,8 @@ void disable_debug_monitors(enum dbg_active_el el);
void user_rewind_single_step(struct task_struct *task);
void user_fastforward_single_step(struct task_struct *task);
+void user_regs_reset_single_step(struct user_pt_regs *regs,
+ struct task_struct *task);
void kernel_enable_single_step(struct pt_regs *regs);
void kernel_disable_single_step(void);
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 8b284cbf8162..6d43f7901da2 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -83,11 +83,12 @@
* IMO: Override CPSR.I and enable signaling with VI
* FMO: Override CPSR.F and enable signaling with VF
* SWIO: Turn set/way invalidates into set/way clean+invalidate
+ * PTW: Take a stage2 fault if a stage1 walk steps in device memory
*/
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
- HCR_FMO | HCR_IMO)
+ HCR_FMO | HCR_IMO | HCR_PTW )
#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
@@ -197,6 +198,7 @@
#define CPTR_EL2_DEFAULT CPTR_EL2_RES1
/* Hyp Debug Configuration Register bits */
+#define MDCR_EL2_TTRF (1 << 19)
#define MDCR_EL2_TPMS (1 << 14)
#define MDCR_EL2_E2PB_MASK (UL(0x3))
#define MDCR_EL2_E2PB_SHIFT (UL(12))
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 102b5a5c47b6..e3c0dba5bdde 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -87,6 +87,34 @@ extern u32 __init_stage2_translation(void);
*__hyp_this_cpu_ptr(sym); \
})
+#define __KVM_EXTABLE(from, to) \
+ " .pushsection __kvm_ex_table, \"a\"\n" \
+ " .align 3\n" \
+ " .long (" #from " - .), (" #to " - .)\n" \
+ " .popsection\n"
+
+
+#define __kvm_at(at_op, addr) \
+( { \
+ int __kvm_at_err = 0; \
+ u64 spsr, elr; \
+ asm volatile( \
+ " mrs %1, spsr_el2\n" \
+ " mrs %2, elr_el2\n" \
+ "1: at "at_op", %3\n" \
+ " isb\n" \
+ " b 9f\n" \
+ "2: msr spsr_el2, %1\n" \
+ " msr elr_el2, %2\n" \
+ " mov %w0, %4\n" \
+ "9:\n" \
+ __KVM_EXTABLE(1b, 2b) \
+ : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \
+ : "r" (addr), "i" (-EFAULT)); \
+ __kvm_at_err; \
+} )
+
+
#else /* __ASSEMBLY__ */
.macro hyp_adr_this_cpu reg, sym, tmp
@@ -111,6 +139,21 @@ extern u32 __init_stage2_translation(void);
kern_hyp_va \vcpu
.endm
+/*
+ * KVM extable for unexpected exceptions.
+ * In the same format _asm_extable, but output to a different section so that
+ * it can be mapped to EL2. The KVM version is not sorted. The caller must
+ * ensure:
+ * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
+ * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
+ */
+.macro _kvm_extable, from, to
+ .pushsection __kvm_ex_table, "a"
+ .align 3
+ .long (\from - .), (\to - .)
+ .popsection
+.endm
+
#endif
#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 778cb4f868d9..669c960dd069 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -303,7 +303,7 @@ static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
}
-static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
+static inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
}
@@ -311,7 +311,7 @@ static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
- kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
+ kvm_vcpu_abt_iss1tw(vcpu); /* AF/DBM update */
}
static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
@@ -340,6 +340,11 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
}
+static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
+}
+
static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 367b2e0b6d76..07472c138ced 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -165,6 +165,7 @@ enum vcpu_sysreg {
#define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */
#define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */
#define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */
+#define c2_TTBCR2 (c2_TTBCR + 1) /* Translation Table Base Control R. 2 */
#define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */
#define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */
#define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */
@@ -192,6 +193,7 @@ enum vcpu_sysreg {
#define cp14_DBGWCR0 (DBGWCR0_EL1 * 2)
#define cp14_DBGWVR0 (DBGWVR0_EL1 * 2)
#define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
+#define cp14_DBGVCR (DBGVCR32_EL2 * 2)
#define NR_COPRO_REGS (NR_SYS_REGS * 2)
@@ -335,8 +337,10 @@ void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
* CP14 and CP15 live in the same array, as they are backed by the
* same system registers.
*/
-#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)])
-#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)])
+#define CPx_BIAS IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)
+
+#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
+#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
struct kvm_vm_stat {
ulong remote_tlb_flush;
@@ -368,7 +372,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end);
+ unsigned long start, unsigned long end, bool blockable);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
@@ -451,6 +455,7 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
void kvm_arm_init_debug(void);
+void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
@@ -535,4 +540,6 @@ void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu);
struct kvm *kvm_arch_alloc_vm(void);
void kvm_arch_free_vm(struct kvm *kvm);
+#define kvm_arm_vcpu_loaded(vcpu) ((vcpu)->arch.sysregs_loaded_on_cpu)
+
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 384c34397619..5f52d6d670e9 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -144,6 +144,9 @@ void __sysreg32_restore_state(struct kvm_vcpu *vcpu);
void __debug_switch_to_guest(struct kvm_vcpu *vcpu);
void __debug_switch_to_host(struct kvm_vcpu *vcpu);
+void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu);
+void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
+
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
index 8262325e2fc6..13536c4da2c2 100644
--- a/arch/arm64/include/asm/lse.h
+++ b/arch/arm64/include/asm/lse.h
@@ -4,6 +4,8 @@
#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
+#define __LSE_PREAMBLE ".arch_extension lse\n"
+
#include <linux/compiler_types.h>
#include <linux/export.h>
#include <linux/stringify.h>
@@ -20,8 +22,6 @@
#else /* __ASSEMBLER__ */
-__asm__(".arch_extension lse");
-
/* Move the ll/sc atomics out-of-line */
#define __LL_SC_INLINE notrace
#define __LL_SC_PREFIX(x) __ll_sc_##x
@@ -33,7 +33,7 @@ __asm__(".arch_extension lse");
/* In-line patching at runtime */
#define ARM64_LSE_ATOMIC_INSN(llsc, lse) \
- ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS)
+ ALTERNATIVE(llsc, __LSE_PREAMBLE lse, ARM64_HAS_LSE_ATOMICS)
#endif /* __ASSEMBLER__ */
#else /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
diff --git a/arch/arm64/include/asm/numa.h b/arch/arm64/include/asm/numa.h
index 626ad01e83bf..dd870390d639 100644
--- a/arch/arm64/include/asm/numa.h
+++ b/arch/arm64/include/asm/numa.h
@@ -25,6 +25,9 @@ const struct cpumask *cpumask_of_node(int node);
/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
static inline const struct cpumask *cpumask_of_node(int node)
{
+ if (node == NUMA_NO_NODE)
+ return cpu_all_mask;
+
return node_to_cpumask_map[node];
}
#endif
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 5be015e2133a..af547be1779b 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -65,7 +65,7 @@
#define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
#define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
#define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
-#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
+#define PAGE_HYP_DEVICE __pgprot(_PROT_DEFAULT | PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_HYP | PTE_HYP_XN)
#define PAGE_S2_MEMATTR(attr) \
({ \
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 291fed0a9df8..f43519b71061 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -107,8 +107,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
#define pte_valid_not_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
-#define pte_valid_young(pte) \
- ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
#define pte_valid_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
@@ -116,9 +114,12 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
* Could the pte be present in the TLB? We must check mm_tlb_flush_pending
* so that we don't erroneously return false for pages that have been
* remapped as PROT_NONE but are yet to be flushed from the TLB.
+ * Note that we can't make any assumptions based on the state of the access
+ * flag, since ptep_clear_flush_young() elides a DSB when invalidating the
+ * TLB.
*/
#define pte_accessible(mm, pte) \
- (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
+ (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
/*
* p??_access_permitted() is true for valid user mappings (subject to the
@@ -144,13 +145,6 @@ static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
return pte;
}
-static inline pte_t pte_wrprotect(pte_t pte)
-{
- pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
- pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
- return pte;
-}
-
static inline pte_t pte_mkwrite(pte_t pte)
{
pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
@@ -176,6 +170,20 @@ static inline pte_t pte_mkdirty(pte_t pte)
return pte;
}
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ /*
+ * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
+ * clear), set the PTE_DIRTY bit.
+ */
+ if (pte_hw_dirty(pte))
+ pte = pte_mkdirty(pte);
+
+ pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
+ pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
+ return pte;
+}
+
static inline pte_t pte_mkold(pte_t pte)
{
return clear_pte_bit(pte, __pgprot(PTE_AF));
@@ -668,12 +676,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
pte = READ_ONCE(*ptep);
do {
old_pte = pte;
- /*
- * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
- * clear), set the PTE_DIRTY bit.
- */
- if (pte_hw_dirty(pte))
- pte = pte_mkdirty(pte);
pte = pte_wrprotect(pte);
pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
pte_val(old_pte), pte_val(pte));
diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h
index 58102652bf9e..615376030aba 100644
--- a/arch/arm64/include/asm/syscall.h
+++ b/arch/arm64/include/asm/syscall.h
@@ -45,6 +45,10 @@ static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs)
{
unsigned long error = regs->regs[0];
+
+ if (is_compat_thread(task_thread_info(task)))
+ error = sign_extend64(error, 31);
+
return IS_ERR_VALUE(error) ? error : 0;
}
@@ -58,7 +62,13 @@ static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs,
int error, long val)
{
- regs->regs[0] = (long) error ? error : val;
+ if (error)
+ val = error;
+
+ if (is_compat_thread(task_thread_info(task)))
+ val = lower_32_bits(val);
+
+ regs->regs[0] = val;
}
#define SYSCALL_MAX_ARGS 6
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 3091ae5975a3..ed99d941c462 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -60,7 +60,9 @@
#ifndef CONFIG_BROKEN_GAS_INST
#ifdef __ASSEMBLY__
-#define __emit_inst(x) .inst (x)
+// The space separator is omitted so that __emit_inst(x) can be parsed as
+// either an assembler directive or an assembler macro argument.
+#define __emit_inst(x) .inst(x)
#else
#define __emit_inst(x) ".inst " __stringify((x)) "\n\t"
#endif
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index cb2c10a8f0a8..69ac5262cccd 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -101,6 +101,7 @@ void arch_release_task_struct(struct task_struct *tsk);
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_FSCHECK (1 << TIF_FSCHECK)
+#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#define _TIF_32BIT (1 << TIF_32BIT)
#define _TIF_SVE (1 << TIF_SVE)
diff --git a/arch/arm64/include/asm/word-at-a-time.h b/arch/arm64/include/asm/word-at-a-time.h
index b0d708ff7f4e..a2601c1ccf43 100644
--- a/arch/arm64/include/asm/word-at-a-time.h
+++ b/arch/arm64/include/asm/word-at-a-time.h
@@ -64,7 +64,7 @@ static inline unsigned long find_zero(unsigned long mask)
*/
static inline unsigned long load_unaligned_zeropad(const void *addr)
{
- unsigned long ret, offset;
+ unsigned long ret, tmp;
/* Load word from unaligned pointer addr */
asm(
@@ -72,9 +72,9 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
"2:\n"
" .pushsection .fixup,\"ax\"\n"
" .align 2\n"
- "3: and %1, %2, #0x7\n"
- " bic %2, %2, #0x7\n"
- " ldr %0, [%2]\n"
+ "3: bic %1, %2, #0x7\n"
+ " ldr %0, [%1]\n"
+ " and %1, %2, #0x7\n"
" lsl %1, %1, #0x3\n"
#ifndef __AARCH64EB__
" lsr %0, %0, %1\n"
@@ -84,7 +84,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
" b 2b\n"
" .popsection\n"
_ASM_EXTABLE(1b, 3b)
- : "=&r" (ret), "=&r" (offset)
+ : "=&r" (ret), "=&r" (tmp)
: "r" (addr), "Q" (*(unsigned long *)addr));
return ret;
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index b5d603992d40..0d345622bbba 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -44,20 +44,8 @@ struct alt_region {
*/
static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
{
- unsigned long replptr;
-
- if (kernel_text_address(pc))
- return true;
-
- replptr = (unsigned long)ALT_REPL_PTR(alt);
- if (pc >= replptr && pc <= (replptr + alt->alt_len))
- return false;
-
- /*
- * Branching into *another* alternate sequence is doomed, and
- * we're not even trying to fix it up.
- */
- BUG();
+ unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt);
+ return !(pc >= replptr && pc <= (replptr + alt->alt_len));
}
#define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1))
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index 39dc98dd78eb..181c29af5617 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -604,7 +604,7 @@ static struct undef_hook setend_hooks[] = {
},
{
/* Thumb mode */
- .instr_mask = 0x0000fff7,
+ .instr_mask = 0xfffffff7,
.instr_val = 0x0000b650,
.pstate_mask = (PSR_AA32_T_BIT | PSR_AA32_MODE_MASK),
.pstate_val = (PSR_AA32_T_BIT | PSR_AA32_MODE_USR),
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 71888808ded7..d191ce8410db 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -619,6 +619,12 @@ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
return (need_wa > 0);
}
+static void
+cpu_enable_branch_predictor_hardening(const struct arm64_cpu_capabilities *cap)
+{
+ cap->matches(cap, SCOPE_LOCAL_CPU);
+}
+
static const __maybe_unused struct midr_range tx2_family_cpus[] = {
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
@@ -643,6 +649,18 @@ needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
return false;
}
+static bool __maybe_unused
+has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ u32 midr = read_cpuid_id();
+ bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT);
+ const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
+
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+ return is_midr_in_range(midr, &range) && has_dic;
+}
+
#ifdef CONFIG_HARDEN_EL2_VECTORS
static const struct midr_range arm64_harden_el2_vectors[] = {
@@ -801,9 +819,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
},
#endif
{
+ .desc = "Branch predictor hardening",
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.matches = check_branch_predictor,
+ .cpu_enable = cpu_enable_branch_predictor_hardening,
},
#ifdef CONFIG_HARDEN_EL2_VECTORS
{
@@ -835,6 +855,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.matches = needs_tx2_tvm_workaround,
},
#endif
+#ifdef CONFIG_ARM64_ERRATUM_1542419
+ {
+ /* we depend on the firmware portion for correctness */
+ .desc = "ARM erratum 1542419 (kernel portion)",
+ .capability = ARM64_WORKAROUND_1542419,
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .matches = has_neoverse_n1_erratum_1542419,
+ .cpu_enable = cpu_enable_trap_ctr_access,
+ },
+#endif
{
}
};
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index ac3126aba036..122d5e843ab6 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -155,11 +155,10 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
- /* Linux doesn't care about the EL3 */
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
ARM64_FTR_END,
};
@@ -259,7 +258,6 @@ static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
* of support.
*/
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
ARM64_FTR_END,
};
@@ -301,7 +299,7 @@ static const struct arm64_ftr_bits ftr_id_pfr0[] = {
};
static const struct arm64_ftr_bits ftr_id_dfr0[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
+ /* [31:28] TraceFilt */
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
@@ -671,9 +669,6 @@ void update_cpu_features(int cpu,
taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
- /*
- * EL3 is not our concern.
- */
taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
@@ -1016,7 +1011,7 @@ static bool cpu_has_broken_dbm(void)
/* List of CPUs which have broken DBM support. */
static const struct midr_range cpus[] = {
#ifdef CONFIG_ARM64_ERRATUM_1024718
- MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0), // A55 r0p0 -r1p0
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
#endif
{},
};
diff --git a/arch/arm64/kernel/crash_dump.c b/arch/arm64/kernel/crash_dump.c
index f46d57c31443..76905a258550 100644
--- a/arch/arm64/kernel/crash_dump.c
+++ b/arch/arm64/kernel/crash_dump.c
@@ -67,5 +67,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
{
memcpy(buf, phys_to_virt((phys_addr_t)*ppos), count);
+ *ppos += count;
+
return count;
}
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 262925b98f42..501e835c6500 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -152,17 +152,20 @@ postcore_initcall(debug_monitors_init);
/*
* Single step API and exception handling.
*/
-static void set_regs_spsr_ss(struct pt_regs *regs)
+static void set_user_regs_spsr_ss(struct user_pt_regs *regs)
{
regs->pstate |= DBG_SPSR_SS;
}
-NOKPROBE_SYMBOL(set_regs_spsr_ss);
+NOKPROBE_SYMBOL(set_user_regs_spsr_ss);
-static void clear_regs_spsr_ss(struct pt_regs *regs)
+static void clear_user_regs_spsr_ss(struct user_pt_regs *regs)
{
regs->pstate &= ~DBG_SPSR_SS;
}
-NOKPROBE_SYMBOL(clear_regs_spsr_ss);
+NOKPROBE_SYMBOL(clear_user_regs_spsr_ss);
+
+#define set_regs_spsr_ss(r) set_user_regs_spsr_ss(&(r)->user_regs)
+#define clear_regs_spsr_ss(r) clear_user_regs_spsr_ss(&(r)->user_regs)
/* EL1 Single Step Handler hooks */
static LIST_HEAD(step_hook);
@@ -389,17 +392,26 @@ void user_rewind_single_step(struct task_struct *task)
* If single step is active for this thread, then set SPSR.SS
* to 1 to avoid returning to the active-pending state.
*/
- if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP))
+ if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
set_regs_spsr_ss(task_pt_regs(task));
}
NOKPROBE_SYMBOL(user_rewind_single_step);
void user_fastforward_single_step(struct task_struct *task)
{
- if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP))
+ if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
clear_regs_spsr_ss(task_pt_regs(task));
}
+void user_regs_reset_single_step(struct user_pt_regs *regs,
+ struct task_struct *task)
+{
+ if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
+ set_user_regs_spsr_ss(regs);
+ else
+ clear_user_regs_spsr_ss(regs);
+}
+
/* Kernel API */
void kernel_enable_single_step(struct pt_regs *regs)
{
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 14fdbaa6ee3a..19c87b52c001 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -304,7 +304,7 @@ static unsigned int find_supported_vector_length(unsigned int vl)
return sve_vl_from_vq(bit_to_vq(bit));
}
-#ifdef CONFIG_SYSCTL
+#if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL)
static int sve_proc_do_default_vl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
@@ -350,9 +350,9 @@ static int __init sve_sysctl_init(void)
return 0;
}
-#else /* ! CONFIG_SYSCTL */
+#else /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
static int __init sve_sysctl_init(void) { return 0; }
-#endif /* ! CONFIG_SYSCTL */
+#endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
#define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
(SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index d22ab8d9edc9..c85ea70b9293 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -867,6 +867,7 @@ __primary_switch:
tlbi vmalle1 // Remove any stale TLB entries
dsb nsh
+ isb
msr sctlr_el1, x19 // re-enable the MMU
isb
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 7c0611f5d2ce..9f105fe58595 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -737,6 +737,27 @@ static u64 get_distance_from_watchpoint(unsigned long addr, u64 val,
return 0;
}
+static int watchpoint_report(struct perf_event *wp, unsigned long addr,
+ struct pt_regs *regs)
+{
+ int step = is_default_overflow_handler(wp);
+ struct arch_hw_breakpoint *info = counter_arch_bp(wp);
+
+ info->trigger = addr;
+
+ /*
+ * If we triggered a user watchpoint from a uaccess routine, then
+ * handle the stepping ourselves since userspace really can't help
+ * us with this.
+ */
+ if (!user_mode(regs) && info->ctrl.privilege == AARCH64_BREAKPOINT_EL0)
+ step = 1;
+ else
+ perf_bp_event(wp, regs);
+
+ return step;
+}
+
static int watchpoint_handler(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
@@ -746,7 +767,6 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
u64 val;
struct perf_event *wp, **slots;
struct debug_info *debug_info;
- struct arch_hw_breakpoint *info;
struct arch_hw_breakpoint_ctrl ctrl;
slots = this_cpu_ptr(wp_on_reg);
@@ -784,25 +804,13 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
if (dist != 0)
continue;
- info = counter_arch_bp(wp);
- info->trigger = addr;
- perf_bp_event(wp, regs);
-
- /* Do we need to handle the stepping? */
- if (is_default_overflow_handler(wp))
- step = 1;
+ step = watchpoint_report(wp, addr, regs);
}
- if (min_dist > 0 && min_dist != -1) {
- /* No exact match found. */
- wp = slots[closest_match];
- info = counter_arch_bp(wp);
- info->trigger = addr;
- perf_bp_event(wp, regs);
- /* Do we need to handle the stepping? */
- if (is_default_overflow_handler(wp))
- step = 1;
- }
+ /* No exact match found? */
+ if (min_dist > 0 && min_dist != -1)
+ step = watchpoint_report(slots[closest_match], addr, regs);
+
rcu_read_unlock();
if (!step)
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 3e6229e30109..cd37edbdedcb 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -1490,16 +1490,10 @@ static u32 aarch64_encode_immediate(u64 imm,
u32 insn)
{
unsigned int immr, imms, n, ones, ror, esz, tmp;
- u64 mask = ~0UL;
-
- /* Can't encode full zeroes or full ones */
- if (!imm || !~imm)
- return AARCH64_BREAK_FAULT;
+ u64 mask;
switch (variant) {
case AARCH64_INSN_VARIANT_32BIT:
- if (upper_32_bits(imm))
- return AARCH64_BREAK_FAULT;
esz = 32;
break;
case AARCH64_INSN_VARIANT_64BIT:
@@ -1511,6 +1505,12 @@ static u32 aarch64_encode_immediate(u64 imm,
return AARCH64_BREAK_FAULT;
}
+ mask = GENMASK(esz - 1, 0);
+
+ /* Can't encode full zeroes, full ones, or value wider than the mask */
+ if (!imm || imm == mask || imm & ~mask)
+ return AARCH64_BREAK_FAULT;
+
/*
* Inverse of Replicate(). Try to spot a repeating pattern
* with a pow2 stride.
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index 35f184a8fd85..8815b5457dd0 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -269,7 +269,7 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
if (user_mode(regs) || !kgdb_single_step)
return DBG_HOOK_ERROR;
- kgdb_handle_exception(1, SIGTRAP, 0, regs);
+ kgdb_handle_exception(0, SIGTRAP, 0, regs);
return DBG_HOOK_HANDLED;
}
NOKPROBE_SYMBOL(kgdb_step_brk_fn);
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index 922add8adb74..5e26ef007863 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -192,6 +192,7 @@ void machine_kexec(struct kimage *kimage)
* the offline CPUs. Therefore, we must use the __* variant here.
*/
__flush_icache_range((uintptr_t)reboot_code_buffer,
+ (uintptr_t)reboot_code_buffer +
arm64_relocate_new_kernel_size);
/* Flush the kimage list and its buffers. */
diff --git a/arch/arm64/kernel/module.lds b/arch/arm64/kernel/module.lds
index 22e36a21c113..09a0eef71d12 100644
--- a/arch/arm64/kernel/module.lds
+++ b/arch/arm64/kernel/module.lds
@@ -1,5 +1,5 @@
SECTIONS {
- .plt (NOLOAD) : { BYTE(0) }
- .init.plt (NOLOAD) : { BYTE(0) }
- .text.ftrace_trampoline (NOLOAD) : { BYTE(0) }
+ .plt 0 (NOLOAD) : { BYTE(0) }
+ .init.plt 0 (NOLOAD) : { BYTE(0) }
+ .text.ftrace_trampoline 0 (NOLOAD) : { BYTE(0) }
}
diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c
index 0bbac612146e..666b225aeb3a 100644
--- a/arch/arm64/kernel/perf_regs.c
+++ b/arch/arm64/kernel/perf_regs.c
@@ -15,15 +15,34 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
return 0;
/*
- * Compat (i.e. 32 bit) mode:
- * - PC has been set in the pt_regs struct in kernel_entry,
- * - Handle SP and LR here.
+ * Our handling of compat tasks (PERF_SAMPLE_REGS_ABI_32) is weird, but
+ * we're stuck with it for ABI compatability reasons.
+ *
+ * For a 32-bit consumer inspecting a 32-bit task, then it will look at
+ * the first 16 registers (see arch/arm/include/uapi/asm/perf_regs.h).
+ * These correspond directly to a prefix of the registers saved in our
+ * 'struct pt_regs', with the exception of the PC, so we copy that down
+ * (x15 corresponds to SP_hyp in the architecture).
+ *
+ * So far, so good.
+ *
+ * The oddity arises when a 64-bit consumer looks at a 32-bit task and
+ * asks for registers beyond PERF_REG_ARM_MAX. In this case, we return
+ * SP_usr, LR_usr and PC in the positions where the AArch64 SP, LR and
+ * PC registers would normally live. The initial idea was to allow a
+ * 64-bit unwinder to unwind a 32-bit task and, although it's not clear
+ * how well that works in practice, somebody might be relying on it.
+ *
+ * At the time we make a sample, we don't know whether the consumer is
+ * 32-bit or 64-bit, so we have to cater for both possibilities.
*/
if (compat_user_mode(regs)) {
if ((u32)idx == PERF_REG_ARM64_SP)
return regs->compat_sp;
if ((u32)idx == PERF_REG_ARM64_LR)
return regs->compat_lr;
+ if (idx == 15)
+ return regs->pc;
}
if ((u32)idx == PERF_REG_ARM64_SP)
diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c
index 636ca0119c0e..6aeb11aa7e28 100644
--- a/arch/arm64/kernel/probes/uprobes.c
+++ b/arch/arm64/kernel/probes/uprobes.c
@@ -41,7 +41,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
/* TODO: Currently we do not support AARCH32 instruction probing */
if (mm->context.flags & MMCF_AARCH32)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE))
return -EINVAL;
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 3856d51c645b..3ebb2a56e5f7 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -69,7 +69,6 @@ static int cpu_psci_cpu_disable(unsigned int cpu)
static void cpu_psci_cpu_die(unsigned int cpu)
{
- int ret;
/*
* There are no known implementations of PSCI actually using the
* power state field, pass a sensible default for now.
@@ -77,9 +76,7 @@ static void cpu_psci_cpu_die(unsigned int cpu)
u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN <<
PSCI_0_2_POWER_STATE_TYPE_SHIFT;
- ret = psci_ops.cpu_off(state);
-
- pr_crit("unable to power off CPU%u (%d)\n", cpu, ret);
+ psci_ops.cpu_off(state);
}
static int cpu_psci_cpu_kill(unsigned int cpu)
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 38aab5b34cc4..9579968e7222 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -1647,12 +1647,23 @@ static void tracehook_report_syscall(struct pt_regs *regs,
saved_reg = regs->regs[regno];
regs->regs[regno] = dir;
- if (dir == PTRACE_SYSCALL_EXIT)
+ if (dir == PTRACE_SYSCALL_ENTER) {
+ if (tracehook_report_syscall_entry(regs))
+ forget_syscall(regs);
+ regs->regs[regno] = saved_reg;
+ } else if (!test_thread_flag(TIF_SINGLESTEP)) {
tracehook_report_syscall_exit(regs, 0);
- else if (tracehook_report_syscall_entry(regs))
- forget_syscall(regs);
+ regs->regs[regno] = saved_reg;
+ } else {
+ regs->regs[regno] = saved_reg;
- regs->regs[regno] = saved_reg;
+ /*
+ * Signal a pseudo-step exception since we are stepping but
+ * tracer modifications to the registers may have rewound the
+ * state machine.
+ */
+ tracehook_report_syscall_exit(regs, 1);
+ }
}
int syscall_trace_enter(struct pt_regs *regs)
@@ -1675,12 +1686,14 @@ int syscall_trace_enter(struct pt_regs *regs)
void syscall_trace_exit(struct pt_regs *regs)
{
+ unsigned long flags = READ_ONCE(current_thread_info()->flags);
+
audit_syscall_exit(regs);
- if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ if (flags & _TIF_SYSCALL_TRACEPOINT)
trace_sys_exit(regs, regs_return_value(regs));
- if (test_thread_flag(TIF_SYSCALL_TRACE))
+ if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP))
tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
rseq_syscall(regs);
@@ -1758,8 +1771,8 @@ static int valid_native_regs(struct user_pt_regs *regs)
*/
int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
{
- if (!test_tsk_thread_flag(task, TIF_SINGLESTEP))
- regs->pstate &= ~DBG_SPSR_SS;
+ /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */
+ user_regs_reset_single_step(regs, task);
if (is_compat_thread(task_thread_info(task)))
return valid_compat_regs(regs);
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 5dcc942906db..ca565853dea6 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -798,7 +798,6 @@ static void setup_restart_syscall(struct pt_regs *regs)
*/
static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
- struct task_struct *tsk = current;
sigset_t *oldset = sigmask_to_save();
int usig = ksig->sig;
int ret;
@@ -822,14 +821,8 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
*/
ret |= !valid_user_regs(&regs->user_regs, current);
- /*
- * Fast forward the stepping logic so we step into the signal
- * handler.
- */
- if (!ret)
- user_fastforward_single_step(tsk);
-
- signal_setup_done(ret, ksig, 0);
+ /* Step into the signal handler if we are stepping */
+ signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
}
/*
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index 010212d35700..3ef9d0a3ac1d 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -19,6 +19,7 @@
*/
#include <linux/compat.h>
+#include <linux/cpufeature.h>
#include <linux/personality.h>
#include <linux/sched.h>
#include <linux/sched/signal.h>
@@ -28,6 +29,7 @@
#include <asm/cacheflush.h>
#include <asm/system_misc.h>
+#include <asm/tlbflush.h>
#include <asm/unistd.h>
static long
@@ -41,6 +43,15 @@ __do_compat_cache_op(unsigned long start, unsigned long end)
if (fatal_signal_pending(current))
return 0;
+ if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
+ /*
+ * The workaround requires an inner-shareable tlbi.
+ * We pick the reserved-ASID to minimise the impact.
+ */
+ __tlbi(aside1is, __TLBI_VADDR(0, 0));
+ dsb(ish);
+ }
+
ret = __flush_cache_user_range(start, start + chunk);
if (ret)
return ret;
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index 871c739f060a..f2d2dbbbfca2 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -50,6 +50,9 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
ret = do_ni_syscall(regs, scno);
}
+ if (is_compat_task())
+ ret = lower_32_bits(ret);
+
regs->regs[0] = ret;
}
@@ -99,8 +102,8 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
regs->syscallno = scno;
cortex_a76_erratum_1463225_svc_handler();
+ user_exit_irqoff();
local_daif_restore(DAIF_PROCCTX);
- user_exit();
if (has_syscall_work(flags)) {
/* set default errno for user-issued syscall(-1) */
@@ -121,7 +124,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
local_daif_mask();
flags = current_thread_info()->flags;
- if (!has_syscall_work(flags)) {
+ if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP)) {
/*
* We're off to userspace, where interrupts are
* always enabled after we restore the flags from
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 6106c49f84bc..655a308af9e3 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -272,21 +272,23 @@ void store_cpu_topology(unsigned int cpuid)
if (mpidr & MPIDR_UP_BITMASK)
return;
- /* Create cpu topology mapping based on MPIDR. */
- if (mpidr & MPIDR_MT_BITMASK) {
- /* Multiprocessor system : Multi-threads per core */
- cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
- cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) |
- MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8;
- } else {
- /* Multiprocessor system : Single-thread per core */
- cpuid_topo->thread_id = -1;
- cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) |
- MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 |
- MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16;
- }
+ /*
+ * This would be the place to create cpu topology based on MPIDR.
+ *
+ * However, it cannot be trusted to depict the actual topology; some
+ * pieces of the architecture enforce an artificial cap on Aff0 values
+ * (e.g. GICv3's ICC_SGI1R_EL1 limits it to 15), leading to an
+ * artificial cycling of Aff1, Aff2 and Aff3 values. IOW, these end up
+ * having absolutely no relationship to the actual underlying system
+ * topology, and cannot be reasonably used as core / package ID.
+ *
+ * If the MT bit is set, Aff0 *could* be used to define a thread ID, but
+ * we still wouldn't be able to obtain a sane core ID. This means we
+ * need to entirely ignore MPIDR for any topology deduction.
+ */
+ cpuid_topo->thread_id = -1;
+ cpuid_topo->core_id = cpuid;
+ cpuid_topo->package_id = cpu_to_node(cpuid);
pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index c8dc3a3640e7..965595fe6804 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -481,6 +481,15 @@ static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
+ if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
+ /* Hide DIC so that we can trap the unnecessary maintenance...*/
+ val &= ~BIT(CTR_DIC_SHIFT);
+
+ /* ... and fake IminLine to reduce the number of traps. */
+ val &= ~CTR_IMINLINE_MASK;
+ val |= (PAGE_SHIFT - 2) & CTR_IMINLINE_MASK;
+ }
+
pt_regs_write_reg(regs, rt, val);
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index 856fee6d3512..b6faf8b5d1fe 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -227,7 +227,7 @@ realtime:
seqcnt_check fail=realtime
get_ts_realtime res_sec=x10, res_nsec=x11, \
clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
- clock_gettime_return, shift=1
+ clock_gettime_return shift=1
ALIGN
monotonic:
@@ -250,7 +250,7 @@ monotonic:
clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
add_ts sec=x10, nsec=x11, ts_sec=x3, ts_nsec=x4, nsec_to_sec=x9
- clock_gettime_return, shift=1
+ clock_gettime_return shift=1
ALIGN
monotonic_raw:
@@ -271,7 +271,7 @@ monotonic_raw:
clock_nsec=x15, nsec_to_sec=x9
add_ts sec=x10, nsec=x11, ts_sec=x13, ts_nsec=x14, nsec_to_sec=x9
- clock_gettime_return, shift=1
+ clock_gettime_return shift=1
ALIGN
realtime_coarse:
diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S
index beca249bc2f3..b3e6c4d5b75c 100644
--- a/arch/arm64/kernel/vdso/vdso.lds.S
+++ b/arch/arm64/kernel/vdso/vdso.lds.S
@@ -39,6 +39,13 @@ SECTIONS
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
+ /*
+ * Discard .note.gnu.property sections which are unused and have
+ * different alignment requirement from vDSO note sections.
+ */
+ /DISCARD/ : {
+ *(.note.GNU-stack .note.gnu.property)
+ }
.note : { *(.note.*) } :text :note
. = ALIGN(16);
@@ -59,7 +66,6 @@ SECTIONS
PROVIDE(end = .);
/DISCARD/ : {
- *(.note.GNU-stack)
*(.data .data.* .gnu.linkonce.d.* .sdata*)
*(.bss .sbss .dynbss .dynsbss)
}
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 74e469f8a850..69e7c8d4a00f 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -24,6 +24,13 @@ ENTRY(_text)
jiffies = jiffies_64;
+
+#define HYPERVISOR_EXTABLE \
+ . = ALIGN(SZ_8); \
+ __start___kvm_ex_table = .; \
+ *(__kvm_ex_table) \
+ __stop___kvm_ex_table = .;
+
#define HYPERVISOR_TEXT \
/* \
* Align to 4 KB so that \
@@ -39,6 +46,7 @@ jiffies = jiffies_64;
__hyp_idmap_text_end = .; \
__hyp_text_start = .; \
*(.hyp.text) \
+ HYPERVISOR_EXTABLE \
__hyp_text_end = .;
#define IDMAP_TEXT \
@@ -154,9 +162,6 @@ SECTIONS
*(.altinstructions)
__alt_instructions_end = .;
}
- .altinstr_replacement : {
- *(.altinstr_replacement)
- }
. = ALIGN(PAGE_SIZE);
__inittext_end = .;
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
index 4e722d73a3c3..7fe195ef7c3f 100644
--- a/arch/arm64/kvm/debug.c
+++ b/arch/arm64/kvm/debug.c
@@ -80,6 +80,64 @@ void kvm_arm_init_debug(void)
}
/**
+ * kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value
+ *
+ * @vcpu: the vcpu pointer
+ *
+ * This ensures we will trap access to:
+ * - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
+ * - Debug ROM Address (MDCR_EL2_TDRA)
+ * - OS related registers (MDCR_EL2_TDOSA)
+ * - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
+ * - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
+ */
+static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
+{
+ /*
+ * This also clears MDCR_EL2_E2PB_MASK to disable guest access
+ * to the profiling buffer.
+ */
+ vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
+ vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
+ MDCR_EL2_TPMS |
+ MDCR_EL2_TTRF |
+ MDCR_EL2_TPMCR |
+ MDCR_EL2_TDRA |
+ MDCR_EL2_TDOSA);
+
+ /* Is the VM being debugged by userspace? */
+ if (vcpu->guest_debug)
+ /* Route all software debug exceptions to EL2 */
+ vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
+
+ /*
+ * Trap debug register access when one of the following is true:
+ * - Userspace is using the hardware to debug the guest
+ * (KVM_GUESTDBG_USE_HW is set).
+ * - The guest is not using debug (KVM_ARM64_DEBUG_DIRTY is clear).
+ */
+ if ((vcpu->guest_debug & KVM_GUESTDBG_USE_HW) ||
+ !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
+ vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
+
+ trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
+}
+
+/**
+ * kvm_arm_vcpu_init_debug - setup vcpu debug traps
+ *
+ * @vcpu: the vcpu pointer
+ *
+ * Set vcpu initial mdcr_el2 value.
+ */
+void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu)
+{
+ preempt_disable();
+ kvm_arm_setup_mdcr_el2(vcpu);
+ preempt_enable();
+}
+
+/**
* kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
*/
@@ -94,12 +152,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
* @vcpu: the vcpu pointer
*
* This is called before each entry into the hypervisor to setup any
- * debug related registers. Currently this just ensures we will trap
- * access to:
- * - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
- * - Debug ROM Address (MDCR_EL2_TDRA)
- * - OS related registers (MDCR_EL2_TDOSA)
- * - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
+ * debug related registers.
*
* Additionally, KVM only traps guest accesses to the debug registers if
* the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
@@ -111,27 +164,14 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
{
- bool trap_debug = !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY);
unsigned long mdscr, orig_mdcr_el2 = vcpu->arch.mdcr_el2;
trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
- /*
- * This also clears MDCR_EL2_E2PB_MASK to disable guest access
- * to the profiling buffer.
- */
- vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
- vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
- MDCR_EL2_TPMS |
- MDCR_EL2_TPMCR |
- MDCR_EL2_TDRA |
- MDCR_EL2_TDOSA);
+ kvm_arm_setup_mdcr_el2(vcpu);
/* Is Guest debugging in effect? */
if (vcpu->guest_debug) {
- /* Route all software debug exceptions to EL2 */
- vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
-
/* Save guest debug state */
save_guest_debug_regs(vcpu);
@@ -185,7 +225,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
- trap_debug = true;
trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
&vcpu->arch.debug_ptr->dbg_bcr[0],
@@ -200,10 +239,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
BUG_ON(!vcpu->guest_debug &&
vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
- /* Trap debug register access */
- if (trap_debug)
- vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
-
/* If KDE or MDE are set, perform a full save/restore cycle. */
if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
@@ -212,7 +247,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2)
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
- trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
}
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index a6c9fbaeaefc..870e594f95ed 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -179,6 +179,13 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
}
memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id));
+
+ if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
+ int i;
+
+ for (i = 0; i < 16; i++)
+ *vcpu_reg32(vcpu, i) = (u32)*vcpu_reg32(vcpu, i);
+ }
out:
return err;
}
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index ea9225160786..6ef670f189bc 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -144,11 +144,15 @@ ENTRY(__kvm_handle_stub_hvc)
1: cmp x0, #HVC_RESET_VECTORS
b.ne 1f
-reset:
+
/*
- * Reset kvm back to the hyp stub. Do not clobber x0-x4 in
- * case we coming via HVC_SOFT_RESTART.
+ * Set the HVC_RESET_VECTORS return code before entering the common
+ * path so that we do not clobber x0-x2 in case we are coming via
+ * HVC_SOFT_RESTART.
*/
+ mov x0, xzr
+reset:
+ /* Reset kvm back to the hyp stub. */
mrs x5, sctlr_el2
ldr x6, =SCTLR_ELx_FLAGS
bic x5, x5, x6 // Clear SCTL_M and etc
@@ -159,7 +163,6 @@ reset:
/* Install stub vectors */
adr_l x5, __hyp_stub_vectors
msr vbar_el2, x5
- mov x0, xzr
eret
1: /* Bad stub call */
diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
index 50009766e5e5..3c5414633bb7 100644
--- a/arch/arm64/kvm/hyp/debug-sr.c
+++ b/arch/arm64/kvm/hyp/debug-sr.c
@@ -149,6 +149,21 @@ static void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu,
write_sysreg(ctxt->sys_regs[MDCCINT_EL1], mdccint_el1);
}
+void __hyp_text __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
+{
+ /*
+ * Non-VHE: Disable and flush SPE data generation
+ * VHE: The vcpu can run, but it can't hide.
+ */
+ __debug_save_spe_nvhe(&vcpu->arch.host_debug_state.pmscr_el1);
+
+}
+
+void __hyp_text __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu)
+{
+ __debug_restore_spe_nvhe(vcpu->arch.host_debug_state.pmscr_el1);
+}
+
void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *host_ctxt;
@@ -156,13 +171,6 @@ void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu)
struct kvm_guest_debug_arch *host_dbg;
struct kvm_guest_debug_arch *guest_dbg;
- /*
- * Non-VHE: Disable and flush SPE data generation
- * VHE: The vcpu can run, but it can't hide.
- */
- if (!has_vhe())
- __debug_save_spe_nvhe(&vcpu->arch.host_debug_state.pmscr_el1);
-
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
return;
@@ -182,8 +190,6 @@ void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu)
struct kvm_guest_debug_arch *host_dbg;
struct kvm_guest_debug_arch *guest_dbg;
- if (!has_vhe())
- __debug_restore_spe_nvhe(vcpu->arch.host_debug_state.pmscr_el1);
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
return;
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index fad1e164fe48..fc83e932afbe 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -17,6 +17,7 @@
#include <linux/linkage.h>
+#include <asm/alternative.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <asm/fpsimdmacros.h>
@@ -62,6 +63,20 @@ ENTRY(__guest_enter)
// Store the host regs
save_callee_saved_regs x1
+ // Now the host state is stored if we have a pending RAS SError it must
+ // affect the host. If any asynchronous exception is pending we defer
+ // the guest entry. The DSB isn't necessary before v8.2 as any SError
+ // would be fatal.
+alternative_if ARM64_HAS_RAS_EXTN
+ dsb nshst
+ isb
+alternative_else_nop_endif
+ mrs x1, isr_el1
+ cbz x1, 1f
+ mov x0, #ARM_EXCEPTION_IRQ
+ ret
+
+1:
add x18, x0, #VCPU_CONTEXT
// Restore guest regs x0-x17
@@ -148,18 +163,22 @@ alternative_endif
// This is our single instruction exception window. A pending
// SError is guaranteed to occur at the earliest when we unmask
// it, and at the latest just after the ISB.
- .global abort_guest_exit_start
abort_guest_exit_start:
isb
- .global abort_guest_exit_end
abort_guest_exit_end:
+ msr daifset, #4 // Mask aborts
+ ret
+
+ _kvm_extable abort_guest_exit_start, 9997f
+ _kvm_extable abort_guest_exit_end, 9997f
+9997:
+ msr daifset, #4 // Mask aborts
+ mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
- // If the exception took place, restore the EL1 exception
- // context so that we can report some information.
- // Merge the exception code with the SError pending bit.
- tbz x0, #ARM_EXIT_WITH_SERROR_BIT, 1f
+ // restore the EL1 exception context so that we can report some
+ // information. Merge the exception code with the SError pending bit.
msr elr_el2, x2
msr esr_el2, x3
msr spsr_el2, x4
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 24b4fbafe3e4..ea063312bca1 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -26,6 +26,30 @@
#include <asm/kvm_mmu.h>
#include <asm/mmu.h>
+.macro save_caller_saved_regs_vect
+ /* x0 and x1 were saved in the vector entry */
+ stp x2, x3, [sp, #-16]!
+ stp x4, x5, [sp, #-16]!
+ stp x6, x7, [sp, #-16]!
+ stp x8, x9, [sp, #-16]!
+ stp x10, x11, [sp, #-16]!
+ stp x12, x13, [sp, #-16]!
+ stp x14, x15, [sp, #-16]!
+ stp x16, x17, [sp, #-16]!
+.endm
+
+.macro restore_caller_saved_regs_vect
+ ldp x16, x17, [sp], #16
+ ldp x14, x15, [sp], #16
+ ldp x12, x13, [sp], #16
+ ldp x10, x11, [sp], #16
+ ldp x8, x9, [sp], #16
+ ldp x6, x7, [sp], #16
+ ldp x4, x5, [sp], #16
+ ldp x2, x3, [sp], #16
+ ldp x0, x1, [sp], #16
+.endm
+
.text
.pushsection .hyp.text, "ax"
@@ -162,28 +186,24 @@ el1_error:
mov x0, #ARM_EXCEPTION_EL1_SERROR
b __guest_exit
+el2_sync:
+ save_caller_saved_regs_vect
+ stp x29, x30, [sp, #-16]!
+ bl kvm_unexpected_el2_exception
+ ldp x29, x30, [sp], #16
+ restore_caller_saved_regs_vect
+
+ eret
+
el2_error:
- ldp x0, x1, [sp], #16
+ save_caller_saved_regs_vect
+ stp x29, x30, [sp, #-16]!
+
+ bl kvm_unexpected_el2_exception
+
+ ldp x29, x30, [sp], #16
+ restore_caller_saved_regs_vect
- /*
- * Only two possibilities:
- * 1) Either we come from the exit path, having just unmasked
- * PSTATE.A: change the return code to an EL2 fault, and
- * carry on, as we're already in a sane state to handle it.
- * 2) Or we come from anywhere else, and that's a bug: we panic.
- *
- * For (1), x0 contains the original return code and x1 doesn't
- * contain anything meaningful at that stage. We can reuse them
- * as temp registers.
- * For (2), who cares?
- */
- mrs x0, elr_el2
- adr x1, abort_guest_exit_start
- cmp x0, x1
- adr x1, abort_guest_exit_end
- ccmp x0, x1, #4, ne
- b.ne __hyp_panic
- mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
eret
ENTRY(__hyp_do_panic)
@@ -212,7 +232,6 @@ ENDPROC(\label)
invalid_vector el2t_irq_invalid
invalid_vector el2t_fiq_invalid
invalid_vector el2t_error_invalid
- invalid_vector el2h_sync_invalid
invalid_vector el2h_irq_invalid
invalid_vector el2h_fiq_invalid
invalid_vector el1_fiq_invalid
@@ -240,7 +259,7 @@ ENTRY(__kvm_hyp_vector)
invalid_vect el2t_fiq_invalid // FIQ EL2t
invalid_vect el2t_error_invalid // Error EL2t
- invalid_vect el2h_sync_invalid // Synchronous EL2h
+ valid_vect el2_sync // Synchronous EL2h
invalid_vect el2h_irq_invalid // IRQ EL2h
invalid_vect el2h_fiq_invalid // FIQ EL2h
valid_vect el2_error // Error EL2h
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index f3978931aaf4..1d16ce0b7e0d 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -23,6 +23,7 @@
#include <kvm/arm_psci.h>
#include <asm/cpufeature.h>
+#include <asm/extable.h>
#include <asm/kprobes.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
@@ -34,6 +35,9 @@
#include <asm/processor.h>
#include <asm/thread_info.h>
+extern struct exception_table_entry __start___kvm_ex_table;
+extern struct exception_table_entry __stop___kvm_ex_table;
+
/* Check whether the FP regs were dirtied while in the host-side run loop: */
static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
{
@@ -264,10 +268,10 @@ static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
* saved the guest context yet, and we may return early...
*/
par = read_sysreg(par_el1);
- asm volatile("at s1e1r, %0" : : "r" (far));
- isb();
-
- tmp = read_sysreg(par_el1);
+ if (!__kvm_at("s1e1r", far))
+ tmp = read_sysreg(par_el1);
+ else
+ tmp = 1; /* back to the guest */
write_sysreg(par, par_el1);
if (unlikely(tmp & 1))
@@ -426,7 +430,7 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
kvm_vcpu_dabt_isvalid(vcpu) &&
!kvm_vcpu_dabt_isextabt(vcpu) &&
- !kvm_vcpu_dabt_iss1tw(vcpu);
+ !kvm_vcpu_abt_iss1tw(vcpu);
if (valid) {
int ret = __vgic_v2_perform_cpuif_access(vcpu);
@@ -556,6 +560,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
guest_ctxt = &vcpu->arch.ctxt;
__sysreg_save_state_nvhe(host_ctxt);
+ __debug_save_host_buffers_nvhe(vcpu);
__activate_traps(vcpu);
__activate_vm(kern_hyp_va(vcpu->kvm));
@@ -595,11 +600,12 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
__fpsimd_save_fpexc32(vcpu);
+ __debug_switch_to_host(vcpu);
/*
* This must come after restoring the host sysregs, since a non-VHE
* system may enable SPE here and make use of the TTBRs.
*/
- __debug_switch_to_host(vcpu);
+ __debug_restore_host_buffers_nvhe(vcpu);
return exit_code;
}
@@ -626,7 +632,7 @@ static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
* making sure it is a kernel address and not a PC-relative
* reference.
*/
- asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va));
+ asm volatile("ldr %0, =%1" : "=r" (str_va) : "S" (__hyp_panic_string));
__hyp_do_panic(str_va,
spsr, elr,
@@ -663,3 +669,30 @@ void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
unreachable();
}
+
+asmlinkage void __hyp_text kvm_unexpected_el2_exception(void)
+{
+ unsigned long addr, fixup;
+ struct kvm_cpu_context *host_ctxt;
+ struct exception_table_entry *entry, *end;
+ unsigned long elr_el2 = read_sysreg(elr_el2);
+
+ entry = hyp_symbol_addr(__start___kvm_ex_table);
+ end = hyp_symbol_addr(__stop___kvm_ex_table);
+ host_ctxt = __hyp_this_cpu_ptr(kvm_host_cpu_state);
+
+ while (entry < end) {
+ addr = (unsigned long)&entry->insn + entry->insn;
+ fixup = (unsigned long)&entry->fixup + entry->fixup;
+
+ if (addr != elr_el2) {
+ entry++;
+ continue;
+ }
+
+ write_sysreg(fixup, elr_el2);
+ return;
+ }
+
+ hyp_panic(host_ctxt);
+}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 0c073f3ca122..fe97b2ad82b9 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -619,6 +619,10 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
u64 pmcr, val;
+ /* No PMU available, PMCR_EL0 may UNDEF... */
+ if (!kvm_arm_support_pmu_v3())
+ return;
+
pmcr = read_sysreg(pmcr_el0);
/*
* Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
@@ -1555,9 +1559,9 @@ static const struct sys_reg_desc cp14_regs[] = {
{ Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
DBG_BCR_BVR_WCR_WVR(1),
/* DBGDCCINT */
- { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
+ { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32, NULL, cp14_DBGDCCINT },
/* DBGDSCRext */
- { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
+ { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32, NULL, cp14_DBGDSCRext },
DBG_BCR_BVR_WCR_WVR(2),
/* DBGDTR[RT]Xint */
{ Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
@@ -1572,7 +1576,7 @@ static const struct sys_reg_desc cp14_regs[] = {
{ Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
DBG_BCR_BVR_WCR_WVR(6),
/* DBGVCR */
- { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
+ { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32, NULL, cp14_DBGVCR },
DBG_BCR_BVR_WCR_WVR(7),
DBG_BCR_BVR_WCR_WVR(8),
DBG_BCR_BVR_WCR_WVR(9),
@@ -1661,6 +1665,7 @@ static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
+ { Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, c2_TTBCR2 },
{ Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
{ Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
{ Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
diff --git a/arch/arm64/lib/memcpy.S b/arch/arm64/lib/memcpy.S
index 67613937711f..dfedd4ab1a76 100644
--- a/arch/arm64/lib/memcpy.S
+++ b/arch/arm64/lib/memcpy.S
@@ -68,9 +68,8 @@
stp \ptr, \regB, [\regC], \val
.endm
- .weak memcpy
ENTRY(__memcpy)
-ENTRY(memcpy)
+WEAK(memcpy)
#include "copy_template.S"
ret
ENDPIPROC(memcpy)
diff --git a/arch/arm64/lib/memmove.S b/arch/arm64/lib/memmove.S
index a5a4459013b1..e3de8f05c21a 100644
--- a/arch/arm64/lib/memmove.S
+++ b/arch/arm64/lib/memmove.S
@@ -57,9 +57,8 @@ C_h .req x12
D_l .req x13
D_h .req x14
- .weak memmove
ENTRY(__memmove)
-ENTRY(memmove)
+WEAK(memmove)
cmp dstin, src
b.lo __memcpy
add tmp1, src, count
diff --git a/arch/arm64/lib/memset.S b/arch/arm64/lib/memset.S
index f2670a9f218c..316263c47c00 100644
--- a/arch/arm64/lib/memset.S
+++ b/arch/arm64/lib/memset.S
@@ -54,9 +54,8 @@ dst .req x8
tmp3w .req w9
tmp3 .req x9
- .weak memset
ENTRY(__memset)
-ENTRY(memset)
+WEAK(memset)
mov dst, dstin /* Preserve return value. */
and A_lw, val, #255
orr A_lw, A_lw, A_lw, lsl #8
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index f58ea503ad01..1d7656761316 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -218,6 +218,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
ptep = (pte_t *)pudp;
} else if (sz == (PAGE_SIZE * CONT_PTES)) {
pmdp = pmd_alloc(mm, pudp, addr);
+ if (!pmdp)
+ return NULL;
WARN_ON(addr & (sz - 1));
/*
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index 54529b4ed513..15eaf1e09d0c 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -58,7 +58,11 @@ EXPORT_SYMBOL(node_to_cpumask_map);
*/
const struct cpumask *cpumask_of_node(int node)
{
- if (WARN_ON(node >= nr_node_ids))
+
+ if (node == NUMA_NO_NODE)
+ return cpu_all_mask;
+
+ if (WARN_ON(node < 0 || node >= nr_node_ids))
return cpu_none_mask;
if (WARN_ON(node_to_cpumask_map[node] == NULL))
diff --git a/arch/h8300/kernel/asm-offsets.c b/arch/h8300/kernel/asm-offsets.c
index 85e60509f0a8..d4b53af657c8 100644
--- a/arch/h8300/kernel/asm-offsets.c
+++ b/arch/h8300/kernel/asm-offsets.c
@@ -63,6 +63,9 @@ int main(void)
OFFSET(TI_FLAGS, thread_info, flags);
OFFSET(TI_CPU, thread_info, cpu);
OFFSET(TI_PRE, thread_info, preempt_count);
+#ifdef CONFIG_PREEMPTION
+ DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
+#endif
return 0;
}
diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h
index e17262ad125e..bd429d6a3163 100644
--- a/arch/hexagon/include/asm/io.h
+++ b/arch/hexagon/include/asm/io.h
@@ -186,16 +186,10 @@ static inline void writel(u32 data, volatile void __iomem *addr)
#define mmiowb()
-/*
- * Need an mtype somewhere in here, for cache type deals?
- * This is probably too long for an inline.
- */
-void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size);
+void __iomem *ioremap(unsigned long phys_addr, unsigned long size);
+#define ioremap_nocache ioremap
+#define ioremap_uc(X, Y) ioremap((X), (Y))
-static inline void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
-{
- return ioremap_nocache(phys_addr, size);
-}
static inline void iounmap(volatile void __iomem *addr)
{
diff --git a/arch/hexagon/kernel/hexagon_ksyms.c b/arch/hexagon/kernel/hexagon_ksyms.c
index aa248f595431..d13d4a06ee38 100644
--- a/arch/hexagon/kernel/hexagon_ksyms.c
+++ b/arch/hexagon/kernel/hexagon_ksyms.c
@@ -33,7 +33,7 @@ EXPORT_SYMBOL(__vmgetie);
EXPORT_SYMBOL(__vmsetie);
EXPORT_SYMBOL(__vmyield);
EXPORT_SYMBOL(empty_zero_page);
-EXPORT_SYMBOL(ioremap_nocache);
+EXPORT_SYMBOL(ioremap);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
diff --git a/arch/hexagon/mm/ioremap.c b/arch/hexagon/mm/ioremap.c
index d27d67224046..370ade265e58 100644
--- a/arch/hexagon/mm/ioremap.c
+++ b/arch/hexagon/mm/ioremap.c
@@ -22,7 +22,7 @@
#include <linux/vmalloc.h>
#include <linux/mm.h>
-void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
+void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
{
unsigned long last_addr, addr;
unsigned long offset = phys_addr & ~PAGE_MASK;
diff --git a/arch/ia64/configs/zx1_defconfig b/arch/ia64/configs/zx1_defconfig
index b504c8e2fd52..7fd4176ea0f6 100644
--- a/arch/ia64/configs/zx1_defconfig
+++ b/arch/ia64/configs/zx1_defconfig
@@ -36,7 +36,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_CHR_DEV_OSST=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
diff --git a/arch/ia64/include/asm/module.h b/arch/ia64/include/asm/module.h
index f319144260ce..9fbf32e6e881 100644
--- a/arch/ia64/include/asm/module.h
+++ b/arch/ia64/include/asm/module.h
@@ -14,16 +14,20 @@
struct elf64_shdr; /* forward declration */
struct mod_arch_specific {
+ /* Used only at module load time. */
struct elf64_shdr *core_plt; /* core PLT section */
struct elf64_shdr *init_plt; /* init PLT section */
struct elf64_shdr *got; /* global offset table */
struct elf64_shdr *opd; /* official procedure descriptors */
struct elf64_shdr *unwind; /* unwind-table section */
unsigned long gp; /* global-pointer for module */
+ unsigned int next_got_entry; /* index of next available got entry */
+ /* Used at module run and cleanup time. */
void *core_unw_table; /* core unwind-table cookie returned by unwinder */
void *init_unw_table; /* init unwind-table cookie returned by unwinder */
- unsigned int next_got_entry; /* index of next available got entry */
+ void *opd_addr; /* symbolize uses .opd to get to actual function */
+ unsigned long opd_size;
};
#define MODULE_PROC_FAMILY "ia64"
diff --git a/arch/ia64/include/asm/ptrace.h b/arch/ia64/include/asm/ptrace.h
index 7ff574d56429..f31e07fc936d 100644
--- a/arch/ia64/include/asm/ptrace.h
+++ b/arch/ia64/include/asm/ptrace.h
@@ -54,8 +54,7 @@
static inline unsigned long user_stack_pointer(struct pt_regs *regs)
{
- /* FIXME: should this be bspstore + nr_dirty regs? */
- return regs->ar_bspstore;
+ return regs->r12;
}
static inline int is_syscall_success(struct pt_regs *regs)
@@ -79,11 +78,6 @@ static inline long regs_return_value(struct pt_regs *regs)
unsigned long __ip = instruction_pointer(regs); \
(__ip & ~3UL) + ((__ip & 3UL) << 2); \
})
-/*
- * Why not default? Because user_stack_pointer() on ia64 gives register
- * stack backing store instead...
- */
-#define current_user_stack_pointer() (current_pt_regs()->r12)
/* given a pointer to a task_struct, return the user's pt_regs */
# define task_pt_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
diff --git a/arch/ia64/include/asm/syscall.h b/arch/ia64/include/asm/syscall.h
index 1d0b875fec44..ec909eec0b4c 100644
--- a/arch/ia64/include/asm/syscall.h
+++ b/arch/ia64/include/asm/syscall.h
@@ -35,7 +35,7 @@ static inline void syscall_rollback(struct task_struct *task,
static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs)
{
- return regs->r10 == -1 ? regs->r8:0;
+ return regs->r10 == -1 ? -regs->r8:0;
}
static inline long syscall_get_return_value(struct task_struct *task,
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index d0c0ccdd656a..03ee3ff3cefa 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -42,7 +42,7 @@ obj-y += esi_stub.o # must be in kernel proper
endif
obj-$(CONFIG_INTEL_IOMMU) += pci-dma.o
-obj-$(CONFIG_BINFMT_ELF) += elfcore.o
+obj-$(CONFIG_ELF_CORE) += elfcore.o
# fp_emulate() expects f2-f5,f16-f31 to contain the user-level state.
CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31
diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
index 8b5b8e6bc9d9..dd5bfed52031 100644
--- a/arch/ia64/kernel/err_inject.c
+++ b/arch/ia64/kernel/err_inject.c
@@ -59,7 +59,7 @@ show_##name(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
u32 cpu=dev->id; \
- return sprintf(buf, "%lx\n", name[cpu]); \
+ return sprintf(buf, "%llx\n", name[cpu]); \
}
#define store(name) \
@@ -86,9 +86,9 @@ store_call_start(struct device *dev, struct device_attribute *attr,
#ifdef ERR_INJ_DEBUG
printk(KERN_DEBUG "pal_mc_err_inject for cpu%d:\n", cpu);
- printk(KERN_DEBUG "err_type_info=%lx,\n", err_type_info[cpu]);
- printk(KERN_DEBUG "err_struct_info=%lx,\n", err_struct_info[cpu]);
- printk(KERN_DEBUG "err_data_buffer=%lx, %lx, %lx.\n",
+ printk(KERN_DEBUG "err_type_info=%llx,\n", err_type_info[cpu]);
+ printk(KERN_DEBUG "err_struct_info=%llx,\n", err_struct_info[cpu]);
+ printk(KERN_DEBUG "err_data_buffer=%llx, %llx, %llx.\n",
err_data_buffer[cpu].data1,
err_data_buffer[cpu].data2,
err_data_buffer[cpu].data3);
@@ -117,8 +117,8 @@ store_call_start(struct device *dev, struct device_attribute *attr,
#ifdef ERR_INJ_DEBUG
printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]);
- printk(KERN_DEBUG "capabilities=%lx,\n", capabilities[cpu]);
- printk(KERN_DEBUG "resources=%lx\n", resources[cpu]);
+ printk(KERN_DEBUG "capabilities=%llx,\n", capabilities[cpu]);
+ printk(KERN_DEBUG "resources=%llx\n", resources[cpu]);
#endif
return size;
}
@@ -131,7 +131,7 @@ show_virtual_to_phys(struct device *dev, struct device_attribute *attr,
char *buf)
{
unsigned int cpu=dev->id;
- return sprintf(buf, "%lx\n", phys_addr[cpu]);
+ return sprintf(buf, "%llx\n", phys_addr[cpu]);
}
static ssize_t
@@ -145,7 +145,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL);
if (ret<=0) {
#ifdef ERR_INJ_DEBUG
- printk("Virtual address %lx is not existing.\n",virt_addr);
+ printk("Virtual address %llx is not existing.\n", virt_addr);
#endif
return -EINVAL;
}
@@ -163,7 +163,7 @@ show_err_data_buffer(struct device *dev,
{
unsigned int cpu=dev->id;
- return sprintf(buf, "%lx, %lx, %lx\n",
+ return sprintf(buf, "%llx, %llx, %llx\n",
err_data_buffer[cpu].data1,
err_data_buffer[cpu].data2,
err_data_buffer[cpu].data3);
@@ -178,13 +178,13 @@ store_err_data_buffer(struct device *dev,
int ret;
#ifdef ERR_INJ_DEBUG
- printk("write err_data_buffer=[%lx,%lx,%lx] on cpu%d\n",
+ printk("write err_data_buffer=[%llx,%llx,%llx] on cpu%d\n",
err_data_buffer[cpu].data1,
err_data_buffer[cpu].data2,
err_data_buffer[cpu].data3,
cpu);
#endif
- ret=sscanf(buf, "%lx, %lx, %lx",
+ ret = sscanf(buf, "%llx, %llx, %llx",
&err_data_buffer[cpu].data1,
&err_data_buffer[cpu].data2,
&err_data_buffer[cpu].data3);
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index aa41bd5cf9b7..8207b897b49d 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -409,83 +409,9 @@ static void kretprobe_trampoline(void)
{
}
-/*
- * At this point the target function has been tricked into
- * returning into our trampoline. Lookup the associated instance
- * and then:
- * - call the handler function
- * - cleanup by marking the instance as unused
- * - long jump back to the original return address
- */
int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
- struct kretprobe_instance *ri = NULL;
- struct hlist_head *head, empty_rp;
- struct hlist_node *tmp;
- unsigned long flags, orig_ret_address = 0;
- unsigned long trampoline_address =
- ((struct fnptr *)kretprobe_trampoline)->ip;
-
- INIT_HLIST_HEAD(&empty_rp);
- kretprobe_hash_lock(current, &head, &flags);
-
- /*
- * It is possible to have multiple instances associated with a given
- * task either because an multiple functions in the call path
- * have a return probe installed on them, and/or more than one return
- * return probe was registered for a target function.
- *
- * We can handle this because:
- * - instances are always inserted at the head of the list
- * - when multiple return probes are registered for the same
- * function, the first instance's ret_addr will point to the
- * real return address, and all the rest will point to
- * kretprobe_trampoline
- */
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
-
- orig_ret_address = (unsigned long)ri->ret_addr;
- if (orig_ret_address != trampoline_address)
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
-
- regs->cr_iip = orig_ret_address;
-
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
-
- if (ri->rp && ri->rp->handler)
- ri->rp->handler(ri, regs);
-
- orig_ret_address = (unsigned long)ri->ret_addr;
- recycle_rp_inst(ri, &empty_rp);
-
- if (orig_ret_address != trampoline_address)
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
-
- kretprobe_hash_unlock(current, &flags);
-
- hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
- hlist_del(&ri->hlist);
- kfree(ri);
- }
+ regs->cr_iip = __kretprobe_trampoline_handler(regs, kretprobe_trampoline, NULL);
/*
* By returning a non-zero value, we are telling
* kprobe_handler() that we don't want the post_handler
@@ -498,6 +424,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *)regs->b0;
+ ri->fp = NULL;
/* Replace the return addr with trampoline addr */
regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip;
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 6115464d5f03..d7400b2844f1 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1860,7 +1860,7 @@ ia64_mca_cpu_init(void *cpu_data)
data = mca_bootmem();
first_time = 0;
} else
- data = (void *)__get_free_pages(GFP_KERNEL,
+ data = (void *)__get_free_pages(GFP_ATOMIC,
get_order(sz));
if (!data)
panic("Could not allocate MCA memory for cpu %d\n",
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index 1a42ba885188..ee693c8cec49 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -905,9 +905,31 @@ register_unwind_table (struct module *mod)
int
module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
{
+ struct mod_arch_specific *mas = &mod->arch;
+
DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
- if (mod->arch.unwind)
+ if (mas->unwind)
register_unwind_table(mod);
+
+ /*
+ * ".opd" was already relocated to the final destination. Store
+ * it's address for use in symbolizer.
+ */
+ mas->opd_addr = (void *)mas->opd->sh_addr;
+ mas->opd_size = mas->opd->sh_size;
+
+ /*
+ * Module relocation was already done at this point. Section
+ * headers are about to be deleted. Wipe out load-time context.
+ */
+ mas->core_plt = NULL;
+ mas->init_plt = NULL;
+ mas->got = NULL;
+ mas->opd = NULL;
+ mas->unwind = NULL;
+ mas->gp = 0;
+ mas->next_got_entry = 0;
+
return 0;
}
@@ -926,10 +948,9 @@ module_arch_cleanup (struct module *mod)
void *dereference_module_function_descriptor(struct module *mod, void *ptr)
{
- Elf64_Shdr *opd = mod->arch.opd;
+ struct mod_arch_specific *mas = &mod->arch;
- if (ptr < (void *)opd->sh_addr ||
- ptr >= (void *)(opd->sh_addr + opd->sh_size))
+ if (ptr < mas->opd_addr || ptr >= mas->opd_addr + mas->opd_size)
return ptr;
return dereference_function_descriptor(ptr);
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 427cd565fd61..799400287cda 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -2147,27 +2147,39 @@ static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
{
struct syscall_get_set_args *args = data;
struct pt_regs *pt = args->regs;
- unsigned long *krbs, cfm, ndirty;
+ unsigned long *krbs, cfm, ndirty, nlocals, nouts;
int i, count;
if (unw_unwind_to_user(info) < 0)
return;
+ /*
+ * We get here via a few paths:
+ * - break instruction: cfm is shared with caller.
+ * syscall args are in out= regs, locals are non-empty.
+ * - epsinstruction: cfm is set by br.call
+ * locals don't exist.
+ *
+ * For both cases argguments are reachable in cfm.sof - cfm.sol.
+ * CFM: [ ... | sor: 17..14 | sol : 13..7 | sof : 6..0 ]
+ */
cfm = pt->cr_ifs;
+ nlocals = (cfm >> 7) & 0x7f; /* aka sol */
+ nouts = (cfm & 0x7f) - nlocals; /* aka sof - sol */
krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
count = 0;
if (in_syscall(pt))
- count = min_t(int, args->n, cfm & 0x7f);
+ count = min_t(int, args->n, nouts);
+ /* Iterate over outs. */
for (i = 0; i < count; i++) {
+ int j = ndirty + nlocals + i + args->i;
if (args->rw)
- *ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
- args->args[i];
+ *ia64_rse_skip_regs(krbs, j) = args->args[i];
else
- args->args[i] = *ia64_rse_skip_regs(krbs,
- ndirty + i + args->i);
+ args->args[i] = *ia64_rse_skip_regs(krbs, j);
}
if (!args->rw) {
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 1928d5719e41..db3104c9fac5 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -96,7 +96,7 @@ static int __init build_node_maps(unsigned long start, unsigned long len,
* acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
* called yet. Note that node 0 will also count all non-existent cpus.
*/
-static int __meminit early_nr_cpus_node(int node)
+static int early_nr_cpus_node(int node)
{
int cpu, n = 0;
@@ -111,7 +111,7 @@ static int __meminit early_nr_cpus_node(int node)
* compute_pernodesize - compute size of pernode data
* @node: the node id.
*/
-static unsigned long __meminit compute_pernodesize(int node)
+static unsigned long compute_pernodesize(int node)
{
unsigned long pernodesize = 0, cpus;
@@ -371,7 +371,7 @@ static void __init reserve_pernode_space(void)
}
}
-static void __meminit scatter_node_data(void)
+static void scatter_node_data(void)
{
pg_data_t **dst;
int node;
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 79e5cc70f1fd..561e2573bd34 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -499,7 +499,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
if (map_start < map_end)
memmap_init_zone((unsigned long)(map_end - map_start),
args->nid, args->zone, page_to_pfn(map_start),
- MEMMAP_EARLY, NULL);
+ MEMINIT_EARLY, NULL);
return 0;
}
@@ -508,8 +508,8 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn)
{
if (!vmem_map) {
- memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY,
- NULL);
+ memmap_init_zone(size, nid, zone, start_pfn,
+ MEMINIT_EARLY, NULL);
} else {
struct page *start;
struct memmap_init_callback_data args;
diff --git a/arch/ia64/scripts/unwcheck.py b/arch/ia64/scripts/unwcheck.py
index c55276e31b6b..bfd1b671e35f 100644
--- a/arch/ia64/scripts/unwcheck.py
+++ b/arch/ia64/scripts/unwcheck.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
# SPDX-License-Identifier: GPL-2.0
#
# Usage: unwcheck.py FILE
diff --git a/arch/m68k/coldfire/pci.c b/arch/m68k/coldfire/pci.c
index 62b0eb6cf69a..84eab0f5e00a 100644
--- a/arch/m68k/coldfire/pci.c
+++ b/arch/m68k/coldfire/pci.c
@@ -216,8 +216,10 @@ static int __init mcf_pci_init(void)
/* Keep a virtual mapping to IO/config space active */
iospace = (unsigned long) ioremap(PCI_IO_PA, PCI_IO_SIZE);
- if (iospace == 0)
+ if (iospace == 0) {
+ pci_free_host_bridge(bridge);
return -ENODEV;
+ }
pr_info("Coldfire: PCI IO/config window mapped to 0x%x\n",
(u32) iospace);
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 93a3c3c0238c..bd55d9ba7049 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -341,7 +341,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SAS_ATTRS=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index e3d0efd6397d..83789fd6780d 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -328,7 +328,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SAS_ATTRS=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 75ac0c76e884..db0c0fa6075b 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -336,7 +336,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SAS_ATTRS=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index c6e492700188..6a64904a5a4c 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -326,7 +326,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SAS_ATTRS=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index b00d1c477432..dcfa9a3d1a0b 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -328,7 +328,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SAS_ATTRS=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 85cac3770d89..916dbe2d61c3 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -335,7 +335,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SAS_ATTRS=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index b3a5d1e99d27..2f5c02089574 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -358,7 +358,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SAS_ATTRS=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 0ca22608453f..40240c32418e 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -325,7 +325,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SAS_ATTRS=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 8e3d10d12d9c..34b7038f7de7 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -326,7 +326,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SAS_ATTRS=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index ff7e653ec7fa..6375a7c86471 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -333,7 +333,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SAS_ATTRS=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 612cf46f6d0c..0c38799e9a67 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -323,7 +323,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SAS_ATTRS=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index a6a7bb6dc3fd..8180ef6b6983 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -323,7 +323,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SAS_ATTRS=m
diff --git a/arch/m68k/include/asm/m53xxacr.h b/arch/m68k/include/asm/m53xxacr.h
index 9138a624c5c8..692f90e7fecc 100644
--- a/arch/m68k/include/asm/m53xxacr.h
+++ b/arch/m68k/include/asm/m53xxacr.h
@@ -89,9 +89,9 @@
* coherency though in all cases. And for copyback caches we will need
* to push cached data as well.
*/
-#define CACHE_INIT CACR_CINVA
-#define CACHE_INVALIDATE CACR_CINVA
-#define CACHE_INVALIDATED CACR_CINVA
+#define CACHE_INIT (CACHE_MODE + CACR_CINVA - CACR_EC)
+#define CACHE_INVALIDATE (CACHE_MODE + CACR_CINVA)
+#define CACHE_INVALIDATED (CACHE_MODE + CACR_CINVA)
#define ACR0_MODE ((CONFIG_RAMBASE & 0xff000000) + \
(0x000f0000) + \
diff --git a/arch/m68k/include/asm/mac_via.h b/arch/m68k/include/asm/mac_via.h
index de1470c4d829..1149251ea58d 100644
--- a/arch/m68k/include/asm/mac_via.h
+++ b/arch/m68k/include/asm/mac_via.h
@@ -257,6 +257,7 @@ extern int rbv_present,via_alt_mapping;
struct irq_desc;
+extern void via_l2_flush(int writeback);
extern void via_register_interrupts(void);
extern void via_irq_enable(int);
extern void via_irq_disable(int);
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index cfd5475bfc31..4625acc1e7c4 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -140,7 +140,8 @@ void __init setup_arch(char **cmdline_p)
pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ",
__bss_stop, memory_start, memory_start, memory_end);
- memblock_add(memory_start, memory_end - memory_start);
+ memblock_add(_rambase, memory_end - _rambase);
+ memblock_reserve(_rambase, memory_start - _rambase);
/* Keep a copy of command line */
*cmdline_p = &command_line[0];
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index cd9317d53276..a4f91bea6c88 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -61,7 +61,6 @@ extern void iop_preinit(void);
extern void iop_init(void);
extern void via_init(void);
extern void via_init_clock(irq_handler_t func);
-extern void via_flush_cache(void);
extern void oss_init(void);
extern void psc_init(void);
extern void baboon_init(void);
@@ -132,21 +131,6 @@ int __init mac_parse_bootinfo(const struct bi_record *record)
return unknown;
}
-/*
- * Flip into 24bit mode for an instant - flushes the L2 cache card. We
- * have to disable interrupts for this. Our IRQ handlers will crap
- * themselves if they take an IRQ in 24bit mode!
- */
-
-static void mac_cache_card_flush(int writeback)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- via_flush_cache();
- local_irq_restore(flags);
-}
-
void __init config_mac(void)
{
if (!MACH_IS_MAC)
@@ -178,9 +162,8 @@ void __init config_mac(void)
* not.
*/
- if (macintosh_config->ident == MAC_MODEL_IICI
- || macintosh_config->ident == MAC_MODEL_IIFX)
- mach_l2_flush = mac_cache_card_flush;
+ if (macintosh_config->ident == MAC_MODEL_IICI)
+ mach_l2_flush = via_l2_flush;
}
diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c
index 9bfa17015768..c432bfafe63e 100644
--- a/arch/m68k/mac/iop.c
+++ b/arch/m68k/mac/iop.c
@@ -183,7 +183,7 @@ static __inline__ void iop_writeb(volatile struct mac_iop *iop, __u16 addr, __u8
static __inline__ void iop_stop(volatile struct mac_iop *iop)
{
- iop->status_ctrl &= ~IOP_RUN;
+ iop->status_ctrl = IOP_AUTOINC;
}
static __inline__ void iop_start(volatile struct mac_iop *iop)
@@ -191,14 +191,9 @@ static __inline__ void iop_start(volatile struct mac_iop *iop)
iop->status_ctrl = IOP_RUN | IOP_AUTOINC;
}
-static __inline__ void iop_bypass(volatile struct mac_iop *iop)
-{
- iop->status_ctrl |= IOP_BYPASS;
-}
-
static __inline__ void iop_interrupt(volatile struct mac_iop *iop)
{
- iop->status_ctrl |= IOP_IRQ;
+ iop->status_ctrl = IOP_IRQ | IOP_RUN | IOP_AUTOINC;
}
static int iop_alive(volatile struct mac_iop *iop)
@@ -244,7 +239,6 @@ void __init iop_preinit(void)
} else {
iop_base[IOP_NUM_SCC] = (struct mac_iop *) SCC_IOP_BASE_QUADRA;
}
- iop_base[IOP_NUM_SCC]->status_ctrl = 0x87;
iop_scc_present = 1;
} else {
iop_base[IOP_NUM_SCC] = NULL;
@@ -256,7 +250,7 @@ void __init iop_preinit(void)
} else {
iop_base[IOP_NUM_ISM] = (struct mac_iop *) ISM_IOP_BASE_QUADRA;
}
- iop_base[IOP_NUM_ISM]->status_ctrl = 0;
+ iop_stop(iop_base[IOP_NUM_ISM]);
iop_ism_present = 1;
} else {
iop_base[IOP_NUM_ISM] = NULL;
@@ -416,7 +410,8 @@ static void iop_handle_send(uint iop_num, uint chan)
msg->status = IOP_MSGSTATUS_UNUSED;
msg = msg->next;
iop_send_queue[iop_num][chan] = msg;
- if (msg) iop_do_send(msg);
+ if (msg && iop_readb(iop, IOP_ADDR_SEND_STATE + chan) == IOP_MSG_IDLE)
+ iop_do_send(msg);
}
/*
@@ -490,16 +485,12 @@ int iop_send_message(uint iop_num, uint chan, void *privdata,
if (!(q = iop_send_queue[iop_num][chan])) {
iop_send_queue[iop_num][chan] = msg;
+ iop_do_send(msg);
} else {
while (q->next) q = q->next;
q->next = msg;
}
- if (iop_readb(iop_base[iop_num],
- IOP_ADDR_SEND_STATE + chan) == IOP_MSG_IDLE) {
- iop_do_send(msg);
- }
-
return 0;
}
diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c
index 038d5a1c4d48..8307da441a10 100644
--- a/arch/m68k/mac/via.c
+++ b/arch/m68k/mac/via.c
@@ -289,10 +289,14 @@ void via_debug_dump(void)
* the system into 24-bit mode for an instant.
*/
-void via_flush_cache(void)
+void via_l2_flush(int writeback)
{
+ unsigned long flags;
+
+ local_irq_save(flags);
via2[gBufB] &= ~VIA2B_vMode32;
via2[gBufB] |= VIA2B_vMode32;
+ local_irq_restore(flags);
}
/*
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index f5453d944ff5..c5e26222979a 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -160,7 +160,7 @@ void __init cf_bootmem_alloc(void)
m68k_memory[0].addr = _rambase;
m68k_memory[0].size = _ramend - _rambase;
- memblock_add(m68k_memory[0].addr, m68k_memory[0].size);
+ memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0);
/* compute total pages in system */
num_pages = PFN_DOWN(_ramend - _rambase);
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
index 96810d91da2b..4a25ce6a1823 100644
--- a/arch/m68k/q40/config.c
+++ b/arch/m68k/q40/config.c
@@ -273,6 +273,7 @@ static int q40_get_rtc_pll(struct rtc_pll_info *pll)
{
int tmp = Q40_RTC_CTRL;
+ pll->pll_ctrl = 0;
pll->pll_value = tmp & Q40_RTC_PLL_MASK;
if (tmp & Q40_RTC_PLL_SIGN)
pll->pll_value = -pll->pll_value;
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index a830a9701e50..cc8c8d22afaf 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -852,6 +852,7 @@ config SNI_RM
select I8253
select I8259
select ISA
+ select MIPS_L1_CACHE_SHIFT_6
select SWAP_IO_SPACE if CPU_BIG_ENDIAN
select SYS_HAS_CPU_R4X00
select SYS_HAS_CPU_R5000
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index ad0a92f95af1..63e2ad43bd6a 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -290,12 +290,23 @@ ifdef CONFIG_64BIT
endif
endif
+# When linking a 32-bit executable the LLVM linker cannot cope with a
+# 32-bit load address that has been sign-extended to 64 bits. Simply
+# remove the upper 32 bits then, as it is safe to do so with other
+# linkers.
+ifdef CONFIG_64BIT
+ load-ld = $(load-y)
+else
+ load-ld = $(subst 0xffffffff,0x,$(load-y))
+endif
+
KBUILD_AFLAGS += $(cflags-y)
KBUILD_CFLAGS += $(cflags-y)
-KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y)
+KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y) -DLINKER_LOAD_ADDRESS=$(load-ld)
KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)
bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \
+ LINKER_LOAD_ADDRESS=$(load-ld) \
VMLINUX_ENTRY_ADDRESS=$(entry-y) \
PLATFORM="$(platform-y)" \
ITS_INPUTS="$(its-y)"
diff --git a/arch/mips/alchemy/board-xxs1500.c b/arch/mips/alchemy/board-xxs1500.c
index 5f05b8714385..b968cff5baa7 100644
--- a/arch/mips/alchemy/board-xxs1500.c
+++ b/arch/mips/alchemy/board-xxs1500.c
@@ -31,6 +31,7 @@
#include <asm/reboot.h>
#include <asm/setup.h>
#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
#include <prom.h>
const char *get_system_type(void)
diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c
index d129475fd40d..4254ba13c5c5 100644
--- a/arch/mips/alchemy/common/clock.c
+++ b/arch/mips/alchemy/common/clock.c
@@ -152,6 +152,7 @@ static struct clk __init *alchemy_clk_setup_cpu(const char *parent_name,
{
struct clk_init_data id;
struct clk_hw *h;
+ struct clk *clk;
h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
@@ -164,7 +165,13 @@ static struct clk __init *alchemy_clk_setup_cpu(const char *parent_name,
id.ops = &alchemy_clkops_cpu;
h->init = &id;
- return clk_register(NULL, h);
+ clk = clk_register(NULL, h);
+ if (IS_ERR(clk)) {
+ pr_err("failed to register clock\n");
+ kfree(h);
+ }
+
+ return clk;
}
/* AUXPLLs ************************************************************/
diff --git a/arch/mips/bcm47xx/Kconfig b/arch/mips/bcm47xx/Kconfig
index 29471038d817..c6b99845fb37 100644
--- a/arch/mips/bcm47xx/Kconfig
+++ b/arch/mips/bcm47xx/Kconfig
@@ -27,6 +27,7 @@ config BCM47XX_BCMA
select BCMA
select BCMA_HOST_SOC
select BCMA_DRIVER_MIPS
+ select BCMA_DRIVER_PCI if PCI
select BCMA_DRIVER_PCI_HOSTMODE if PCI
select BCMA_DRIVER_GPIO
default y
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index d859f079b771..378cbfb31ee7 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -90,7 +90,7 @@ ifneq ($(zload-y),)
VMLINUZ_LOAD_ADDRESS := $(zload-y)
else
VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \
- $(obj)/vmlinux.bin $(VMLINUX_LOAD_ADDRESS))
+ $(obj)/vmlinux.bin $(LINKER_LOAD_ADDRESS))
endif
UIMAGE_LOADADDR = $(VMLINUZ_LOAD_ADDRESS)
diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c
index 81df9047e110..40218be0b7ce 100644
--- a/arch/mips/boot/compressed/decompress.c
+++ b/arch/mips/boot/compressed/decompress.c
@@ -17,6 +17,7 @@
#include <linux/libfdt.h>
#include <asm/addrspace.h>
+#include <asm/unaligned.h>
/*
* These two variables specify the free mem region
@@ -117,7 +118,7 @@ void decompress_kernel(unsigned long boot_heap_start)
dtb_size = fdt_totalsize((void *)&__appended_dtb);
/* last four bytes is always image size in little endian */
- image_size = le32_to_cpup((void *)&__image_end - 4);
+ image_size = get_unaligned_le32((void *)&__image_end - 4);
/* copy dtb to where the booted kernel will expect it */
memcpy((void *)VMLINUX_LOAD_ADDRESS_ULL + image_size,
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 8272d8c648ca..43e4fc1b373c 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -2199,6 +2199,9 @@ static int octeon_irq_cib_map(struct irq_domain *d,
}
cd = kzalloc(sizeof(*cd), GFP_KERNEL);
+ if (!cd)
+ return -ENOMEM;
+
cd->host_data = host_data;
cd->bit = hw;
diff --git a/arch/mips/cavium-octeon/octeon-usb.c b/arch/mips/cavium-octeon/octeon-usb.c
index bfdfaf32d2c4..75189ff2f3c7 100644
--- a/arch/mips/cavium-octeon/octeon-usb.c
+++ b/arch/mips/cavium-octeon/octeon-usb.c
@@ -517,6 +517,7 @@ static int __init dwc3_octeon_device_init(void)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
+ put_device(&pdev->dev);
dev_err(&pdev->dev, "No memory resources\n");
return -ENXIO;
}
@@ -528,8 +529,10 @@ static int __init dwc3_octeon_device_init(void)
* know the difference.
*/
base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base))
+ if (IS_ERR(base)) {
+ put_device(&pdev->dev);
return PTR_ERR(base);
+ }
mutex_lock(&dwc3_octeon_clocks_mutex);
dwc3_octeon_clocks_start(&pdev->dev, (u64)base);
diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig
index 5e73fe755be6..bd6654299a9c 100644
--- a/arch/mips/configs/bigsur_defconfig
+++ b/arch/mips/configs/bigsur_defconfig
@@ -123,7 +123,6 @@ CONFIG_BLK_DEV_TC86C001=m
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_CHR_DEV_SCH=m
CONFIG_ATA=y
diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig
index 499f51498ecb..8f3e08d8607b 100644
--- a/arch/mips/configs/fuloong2e_defconfig
+++ b/arch/mips/configs/fuloong2e_defconfig
@@ -111,7 +111,6 @@ CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
# CONFIG_SCSI_LOWLEVEL is not set
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
index 91a9c13e2c82..9855addcd7d5 100644
--- a/arch/mips/configs/ip27_defconfig
+++ b/arch/mips/configs/ip27_defconfig
@@ -107,7 +107,6 @@ CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=m
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_CHR_DEV_SCH=m
CONFIG_SCSI_CONSTANTS=y
diff --git a/arch/mips/configs/ip32_defconfig b/arch/mips/configs/ip32_defconfig
index ebff297328ae..a5f6be4b949b 100644
--- a/arch/mips/configs/ip32_defconfig
+++ b/arch/mips/configs/ip32_defconfig
@@ -54,7 +54,6 @@ CONFIG_RAID_ATTRS=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig
index 9ad1c94376c8..aa101c27ed25 100644
--- a/arch/mips/configs/jazz_defconfig
+++ b/arch/mips/configs/jazz_defconfig
@@ -206,7 +206,6 @@ CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=m
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_FC_ATTRS=y
diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig
index 324dfee23dfb..c871e40b8878 100644
--- a/arch/mips/configs/loongson3_defconfig
+++ b/arch/mips/configs/loongson3_defconfig
@@ -250,7 +250,7 @@ CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_MEDIA_USB_SUPPORT=y
CONFIG_USB_VIDEO_CLASS=m
CONFIG_DRM=y
-CONFIG_DRM_RADEON=y
+CONFIG_DRM_RADEON=m
CONFIG_FB_RADEON=y
CONFIG_LCD_CLASS_DEVICE=y
CONFIG_LCD_PLATFORM=m
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index 81058295d35f..4338c2189c0f 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -245,7 +245,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
index 5c10cddc39d3..224718f45393 100644
--- a/arch/mips/configs/malta_kvm_defconfig
+++ b/arch/mips/configs/malta_kvm_defconfig
@@ -252,7 +252,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig
index bb694f5065f1..03cf2abe2d65 100644
--- a/arch/mips/configs/malta_kvm_guest_defconfig
+++ b/arch/mips/configs/malta_kvm_guest_defconfig
@@ -254,7 +254,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
diff --git a/arch/mips/configs/maltaup_xpa_defconfig b/arch/mips/configs/maltaup_xpa_defconfig
index 99a19cf5f9ba..f64074aa06b6 100644
--- a/arch/mips/configs/maltaup_xpa_defconfig
+++ b/arch/mips/configs/maltaup_xpa_defconfig
@@ -250,7 +250,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
index 1a3e1fec4e86..194df200daad 100644
--- a/arch/mips/configs/rm200_defconfig
+++ b/arch/mips/configs/rm200_defconfig
@@ -218,7 +218,6 @@ CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=m
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_FC_ATTRS=y
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
index a45af3de075d..d43e4ab20b23 100644
--- a/arch/mips/include/asm/cpu-type.h
+++ b/arch/mips/include/asm/cpu-type.h
@@ -47,6 +47,7 @@ static inline int __pure __get_cpu_type(const int cpu_type)
case CPU_34K:
case CPU_1004K:
case CPU_74K:
+ case CPU_1074K:
case CPU_M14KC:
case CPU_M14KEC:
case CPU_INTERAPTIV:
diff --git a/arch/mips/include/asm/div64.h b/arch/mips/include/asm/div64.h
index dc5ea5736440..ceece76fc971 100644
--- a/arch/mips/include/asm/div64.h
+++ b/arch/mips/include/asm/div64.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2000, 2004 Maciej W. Rozycki
+ * Copyright (C) 2000, 2004, 2021 Maciej W. Rozycki
* Copyright (C) 2003, 07 Ralf Baechle (ralf@linux-mips.org)
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -9,25 +9,18 @@
#ifndef __ASM_DIV64_H
#define __ASM_DIV64_H
-#include <asm-generic/div64.h>
-
-#if BITS_PER_LONG == 64
+#include <asm/bitsperlong.h>
-#include <linux/types.h>
+#if BITS_PER_LONG == 32
/*
* No traps on overflows for any of these...
*/
-#define __div64_32(n, base) \
-({ \
+#define do_div64_32(res, high, low, base) ({ \
unsigned long __cf, __tmp, __tmp2, __i; \
unsigned long __quot32, __mod32; \
- unsigned long __high, __low; \
- unsigned long long __n; \
\
- __high = *__n >> 32; \
- __low = __n; \
__asm__( \
" .set push \n" \
" .set noat \n" \
@@ -51,18 +44,48 @@
" subu %0, %0, %z6 \n" \
" addiu %2, %2, 1 \n" \
"3: \n" \
- " bnez %4, 0b\n\t" \
- " srl %5, %1, 0x1f\n\t" \
+ " bnez %4, 0b \n" \
+ " srl %5, %1, 0x1f \n" \
" .set pop" \
: "=&r" (__mod32), "=&r" (__tmp), \
"=&r" (__quot32), "=&r" (__cf), \
"=&r" (__i), "=&r" (__tmp2) \
- : "Jr" (base), "0" (__high), "1" (__low)); \
+ : "Jr" (base), "0" (high), "1" (low)); \
\
- (__n) = __quot32; \
+ (res) = __quot32; \
__mod32; \
})
-#endif /* BITS_PER_LONG == 64 */
+#define __div64_32(n, base) ({ \
+ unsigned long __upper, __low, __high, __radix; \
+ unsigned long long __quot; \
+ unsigned long long __div; \
+ unsigned long __mod; \
+ \
+ __div = (*n); \
+ __radix = (base); \
+ \
+ __high = __div >> 32; \
+ __low = __div; \
+ \
+ if (__high < __radix) { \
+ __upper = __high; \
+ __high = 0; \
+ } else { \
+ __upper = __high % __radix; \
+ __high /= __radix; \
+ } \
+ \
+ __mod = do_div64_32(__low, __upper, __low, __radix); \
+ \
+ __quot = __high; \
+ __quot = __quot << 32 | __low; \
+ (*n) = __quot; \
+ __mod; \
+})
+
+#endif /* BITS_PER_LONG == 32 */
+
+#include <asm-generic/div64.h>
#endif /* __ASM_DIV64_H */
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index f567ace7a9e9..c254761cb8ad 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -274,8 +274,12 @@ enum emulation_result {
#define MIPS3_PG_SHIFT 6
#define MIPS3_PG_FRAME 0x3fffffc0
+#if defined(CONFIG_64BIT)
+#define VPN2_MASK GENMASK(cpu_vmbits - 1, 13)
+#else
#define VPN2_MASK 0xffffe000
-#define KVM_ENTRYHI_ASID MIPS_ENTRYHI_ASID
+#endif
+#define KVM_ENTRYHI_ASID cpu_asid_mask(&boot_cpu_data)
#define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G)
#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
#define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID)
@@ -932,7 +936,7 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end);
+ unsigned long start, unsigned long end, bool blockable);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 1bb9448777c5..f9a7c137be9f 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -749,7 +749,7 @@
/* MAAR bit definitions */
#define MIPS_MAAR_VH (_U64CAST_(1) << 63)
-#define MIPS_MAAR_ADDR ((BIT_ULL(BITS_PER_LONG - 12) - 1) << 12)
+#define MIPS_MAAR_ADDR GENMASK_ULL(55, 12)
#define MIPS_MAAR_ADDR_SHIFT 12
#define MIPS_MAAR_S (_ULCAST_(1) << 1)
#define MIPS_MAAR_VL (_ULCAST_(1) << 0)
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 6c257b52f57f..7fad007fe025 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -477,20 +477,20 @@ NESTED(nmi_handler, PT_SIZE, sp)
.endm
.macro __build_clear_fpe
+ CLI
+ TRACE_IRQS_OFF
.set push
/* gas fails to assemble cfc1 for some archs (octeon).*/ \
.set mips1
SET_HARDFLOAT
cfc1 a1, fcr31
.set pop
- CLI
- TRACE_IRQS_OFF
.endm
.macro __build_clear_msa_fpe
- _cfcmsa a1, MSA_CSR
CLI
TRACE_IRQS_OFF
+ _cfcmsa a1, MSA_CSR
.endm
.macro __build_clear_ade
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index 7f3f136572de..50d3d74001cb 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -123,9 +123,9 @@ static char *cm2_causes[32] = {
"COH_RD_ERR", "MMIO_WR_ERR", "MMIO_RD_ERR", "0x07",
"0x08", "0x09", "0x0a", "0x0b",
"0x0c", "0x0d", "0x0e", "0x0f",
- "0x10", "0x11", "0x12", "0x13",
- "0x14", "0x15", "0x16", "INTVN_WR_ERR",
- "INTVN_RD_ERR", "0x19", "0x1a", "0x1b",
+ "0x10", "INTVN_WR_ERR", "INTVN_RD_ERR", "0x13",
+ "0x14", "0x15", "0x16", "0x17",
+ "0x18", "0x19", "0x1a", "0x1b",
"0x1c", "0x1d", "0x1e", "0x1f"
};
diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
index cbf4cc0b0b6c..934caf204078 100644
--- a/arch/mips/kernel/relocate.c
+++ b/arch/mips/kernel/relocate.c
@@ -187,8 +187,14 @@ static int __init relocate_exception_table(long offset)
static inline __init unsigned long rotate_xor(unsigned long hash,
const void *area, size_t size)
{
- size_t i;
- unsigned long *ptr = (unsigned long *)area;
+ const typeof(hash) *ptr = PTR_ALIGN(area, sizeof(hash));
+ size_t diff, i;
+
+ diff = (void *)ptr - area;
+ if (unlikely(size < diff + sizeof(hash)))
+ return hash;
+
+ size = ALIGN_DOWN(size - diff, sizeof(hash));
for (i = 0; i < size / sizeof(hash); i++) {
/* Rotate by odd number of bits and XOR. */
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index e87c98b8a72c..2c2480be3f36 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -933,7 +933,17 @@ static void __init arch_mem_init(char **cmdline_p)
BOOTMEM_DEFAULT);
#endif
device_tree_init();
+
+ /*
+ * In order to reduce the possibility of kernel panic when failed to
+ * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
+ * low memory as small as possible before plat_swiotlb_setup(), so
+ * make sparse_init() using top-down allocation.
+ */
+ memblock_set_bottom_up(false);
sparse_init();
+ memblock_set_bottom_up(true);
+
plat_swiotlb_setup();
dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 5ec546b5eed1..d16e6654a655 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -240,6 +240,8 @@ static int bmips_boot_secondary(int cpu, struct task_struct *idle)
*/
static void bmips_init_secondary(void)
{
+ bmips_cpu_setup();
+
switch (current_cpu_type()) {
case CPU_BMIPS4350:
case CPU_BMIPS4380:
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index bfe02ded25d1..a0d994a4aaa9 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -22,12 +22,77 @@
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/export.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
#include <asm/cpu-features.h>
#include <asm/cpu-type.h>
#include <asm/div64.h>
#include <asm/time.h>
+#ifdef CONFIG_CPU_FREQ
+
+static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref);
+static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref_freq);
+static unsigned long glb_lpj_ref;
+static unsigned long glb_lpj_ref_freq;
+
+static int cpufreq_callback(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ int cpu;
+ struct cpufreq_freqs *freq = data;
+
+ /*
+ * Skip lpj numbers adjustment if the CPU-freq transition is safe for
+ * the loops delay. (Is this possible?)
+ */
+ if (freq->flags & CPUFREQ_CONST_LOOPS)
+ return NOTIFY_OK;
+
+ /* Save the initial values of the lpjes for future scaling. */
+ if (!glb_lpj_ref) {
+ glb_lpj_ref = boot_cpu_data.udelay_val;
+ glb_lpj_ref_freq = freq->old;
+
+ for_each_online_cpu(cpu) {
+ per_cpu(pcp_lpj_ref, cpu) =
+ cpu_data[cpu].udelay_val;
+ per_cpu(pcp_lpj_ref_freq, cpu) = freq->old;
+ }
+ }
+
+ cpu = freq->cpu;
+ /*
+ * Adjust global lpj variable and per-CPU udelay_val number in
+ * accordance with the new CPU frequency.
+ */
+ if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
+ (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
+ loops_per_jiffy = cpufreq_scale(glb_lpj_ref,
+ glb_lpj_ref_freq,
+ freq->new);
+
+ cpu_data[cpu].udelay_val = cpufreq_scale(per_cpu(pcp_lpj_ref, cpu),
+ per_cpu(pcp_lpj_ref_freq, cpu), freq->new);
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cpufreq_notifier = {
+ .notifier_call = cpufreq_callback,
+};
+
+static int __init register_cpufreq_notifier(void)
+{
+ return cpufreq_register_notifier(&cpufreq_notifier,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+core_initcall(register_cpufreq_notifier);
+
+#endif /* CONFIG_CPU_FREQ */
+
/*
* forward reference
*/
diff --git a/arch/mips/kernel/topology.c b/arch/mips/kernel/topology.c
index cd3e1f82e1a5..08ad6371fbe0 100644
--- a/arch/mips/kernel/topology.c
+++ b/arch/mips/kernel/topology.c
@@ -20,7 +20,7 @@ static int __init topology_init(void)
for_each_present_cpu(i) {
struct cpu *c = &per_cpu(cpu_devices, i);
- c->hotpluggable = 1;
+ c->hotpluggable = !!i;
ret = register_cpu(c, i);
if (ret)
printk(KERN_WARNING "topology_init: register_cpu %d "
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 9dab0ed1b227..b9da2cefb564 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -2096,6 +2096,7 @@ static void configure_status(void)
change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
status_set);
+ back_to_back_c0_hazard();
}
unsigned int hwrena;
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 36f2e860ba3e..968c5765020c 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -50,7 +50,7 @@ SECTIONS
/* . = 0xa800000000300000; */
. = 0xffffffff80300000;
#endif
- . = VMLINUX_LOAD_ADDRESS;
+ . = LINKER_LOAD_ADDRESS;
/* read-only */
_text = .; /* Text and read-only data */
.text : {
@@ -93,6 +93,7 @@ SECTIONS
INIT_TASK_DATA(THREAD_SIZE)
NOSAVE_DATA
+ PAGE_ALIGNED_DATA(PAGE_SIZE)
CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
DATA_DATA
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index e7f5ef6bed0f..79485790f7b5 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -131,6 +131,8 @@ void kvm_arch_check_processor_compat(void *rtn)
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
switch (type) {
+ case KVM_VM_MIPS_AUTO:
+ break;
#ifdef CONFIG_KVM_MIPS_VZ
case KVM_VM_MIPS_VZ:
#else
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index d8dcdb350405..098a7afd4d38 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -512,7 +512,8 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
return 1;
}
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
+ bool blockable)
{
handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index 37caeadb2964..0476d7e97a03 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -244,7 +244,7 @@ static void ltq_hw_irq_handler(struct irq_desc *desc)
generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
/* if this is a EBU irq, we need to ack it or get a deadlock */
- if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
+ if (irq == LTQ_ICU_EBU_IRQ && !module && LTQ_EBU_PCC_ISTAT != 0)
ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
LTQ_EBU_PCC_ISTAT);
}
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 05a539d3a597..60fe72170856 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1673,7 +1673,7 @@ static int probe_scache(void)
return 1;
}
-static void __init loongson2_sc_init(void)
+static void loongson2_sc_init(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
@@ -1789,7 +1789,11 @@ static void setup_scache(void)
printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
scache_size >> 10,
way_string[c->scache.ways], c->scache.linesz);
+
+ if (current_cpu_type() == CPU_BMIPS5000)
+ c->options |= MIPS_CPU_INCLUSIVE_CACHES;
}
+
#else
if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 0596505770db..11985399c469 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -424,6 +424,7 @@ int has_transparent_hugepage(void)
}
return mask == PM_HUGE_MASK;
}
+EXPORT_SYMBOL(has_transparent_hugepage);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 3944c49eee0c..620abc968624 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -1479,6 +1479,7 @@ static void build_r4000_tlb_refill_handler(void)
static void setup_pw(void)
{
+ unsigned int pwctl;
unsigned long pgd_i, pgd_w;
#ifndef __PAGETABLE_PMD_FOLDED
unsigned long pmd_i, pmd_w;
@@ -1505,6 +1506,7 @@ static void setup_pw(void)
pte_i = ilog2(_PAGE_GLOBAL);
pte_w = 0;
+ pwctl = 1 << 30; /* Set PWDirExt */
#ifndef __PAGETABLE_PMD_FOLDED
write_c0_pwfield(pgd_i << 24 | pmd_i << 12 | pt_i << 6 | pte_i);
@@ -1515,8 +1517,9 @@ static void setup_pw(void)
#endif
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
- write_c0_pwctl(1 << 6 | psn);
+ pwctl |= (1 << 6 | psn);
#endif
+ write_c0_pwctl(pwctl);
write_c0_kpgd((long)swapper_pg_dir);
kscratch_used_mask |= (1 << 7); /* KScratch6 is used for KPGD */
}
diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c
index 3c3b1e6abb53..e8b0751d5b76 100644
--- a/arch/mips/pci/pci-legacy.c
+++ b/arch/mips/pci/pci-legacy.c
@@ -169,8 +169,13 @@ void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node)
res = hose->mem_resource;
break;
}
- if (res != NULL)
- of_pci_range_to_resource(&range, node, res);
+ if (res != NULL) {
+ res->name = node->full_name;
+ res->flags = range.flags;
+ res->start = range.cpu_addr;
+ res->end = range.cpu_addr + range.size - 1;
+ res->parent = res->child = res->sibling = NULL;
+ }
}
}
diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c
index f6b77788124a..5ae11aeff4b7 100644
--- a/arch/mips/pci/pci-mt7620.c
+++ b/arch/mips/pci/pci-mt7620.c
@@ -33,6 +33,7 @@
#define RALINK_GPIOMODE 0x60
#define PPLL_CFG1 0x9c
+#define PPLL_LD BIT(23)
#define PPLL_DRV 0xa0
#define PDRV_SW_SET BIT(31)
@@ -242,8 +243,8 @@ static int mt7620_pci_hw_init(struct platform_device *pdev)
rt_sysc_m32(0, RALINK_PCIE0_CLK_EN, RALINK_CLKCFG1);
mdelay(100);
- if (!(rt_sysc_r32(PPLL_CFG1) & PDRV_SW_SET)) {
- dev_err(&pdev->dev, "MT7620 PPLL unlock\n");
+ if (!(rt_sysc_r32(PPLL_CFG1) & PPLL_LD)) {
+ dev_err(&pdev->dev, "pcie PLL not locked, aborting init\n");
reset_control_assert(rstpcie0);
rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1);
return -1;
diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c
index 711cdccdf65b..f7926a964441 100644
--- a/arch/mips/pci/pci-rt2880.c
+++ b/arch/mips/pci/pci-rt2880.c
@@ -183,7 +183,6 @@ static inline void rt2880_pci_write_u32(unsigned long reg, u32 val)
int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- u16 cmd;
int irq = -1;
if (dev->bus->number != 0)
@@ -191,8 +190,6 @@ int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
switch (PCI_SLOT(dev->devfn)) {
case 0x00:
- rt2880_pci_write_u32(PCI_BASE_ADDRESS_0, 0x08000000);
- (void) rt2880_pci_read_u32(PCI_BASE_ADDRESS_0);
break;
case 0x11:
irq = RT288X_CPU_IRQ_PCI;
@@ -204,16 +201,6 @@ int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
break;
}
- pci_write_config_byte((struct pci_dev *) dev,
- PCI_CACHE_LINE_SIZE, 0x14);
- pci_write_config_byte((struct pci_dev *) dev, PCI_LATENCY_TIMER, 0xFF);
- pci_read_config_word((struct pci_dev *) dev, PCI_COMMAND, &cmd);
- cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
- PCI_COMMAND_INVALIDATE | PCI_COMMAND_FAST_BACK |
- PCI_COMMAND_SERR | PCI_COMMAND_WAIT | PCI_COMMAND_PARITY;
- pci_write_config_word((struct pci_dev *) dev, PCI_COMMAND, cmd);
- pci_write_config_byte((struct pci_dev *) dev, PCI_INTERRUPT_LINE,
- dev->irq);
return irq;
}
@@ -252,6 +239,30 @@ static int rt288x_pci_probe(struct platform_device *pdev)
int pcibios_plat_dev_init(struct pci_dev *dev)
{
+ static bool slot0_init;
+
+ /*
+ * Nobody seems to initialize slot 0, but this platform requires it, so
+ * do it once when some other slot is being enabled. The PCI subsystem
+ * should configure other slots properly, so no need to do anything
+ * special for those.
+ */
+ if (!slot0_init && dev->bus->number == 0) {
+ u16 cmd;
+ u32 bar0;
+
+ slot0_init = true;
+
+ pci_bus_write_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0,
+ 0x08000000);
+ pci_bus_read_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0,
+ &bar0);
+
+ pci_bus_read_config_word(dev->bus, 0, PCI_COMMAND, &cmd);
+ cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
+ pci_bus_write_config_word(dev->bus, 0, PCI_COMMAND, cmd);
+ }
+
return 0;
}
diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
index 1ada8492733b..92b3d4849996 100644
--- a/arch/mips/ralink/of.c
+++ b/arch/mips/ralink/of.c
@@ -10,6 +10,7 @@
#include <linux/io.h>
#include <linux/clk.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/sizes.h>
#include <linux/of_fdt.h>
@@ -27,6 +28,7 @@
__iomem void *rt_sysc_membase;
__iomem void *rt_memc_membase;
+EXPORT_SYMBOL_GPL(rt_sysc_membase);
__iomem void *plat_of_remap_node(const char *node)
{
diff --git a/arch/mips/sni/a20r.c b/arch/mips/sni/a20r.c
index f9407e170476..c6af7047eb0d 100644
--- a/arch/mips/sni/a20r.c
+++ b/arch/mips/sni/a20r.c
@@ -143,7 +143,10 @@ static struct platform_device sc26xx_pdev = {
},
};
-static u32 a20r_ack_hwint(void)
+/*
+ * Trigger chipset to update CPU's CAUSE IP field
+ */
+static u32 a20r_update_cause_ip(void)
{
u32 status = read_c0_status();
@@ -205,12 +208,14 @@ static void a20r_hwint(void)
int irq;
clear_c0_status(IE_IRQ0);
- status = a20r_ack_hwint();
+ status = a20r_update_cause_ip();
cause = read_c0_cause();
irq = ffs(((cause & status) >> 8) & 0xf8);
if (likely(irq > 0))
do_IRQ(SNI_A20R_IRQ_BASE + irq - 1);
+
+ a20r_update_cause_ip();
set_c0_status(IE_IRQ0);
}
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index c99fa1c1bd9c..a876b1657bf4 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -10,12 +10,9 @@ ccflags-vdso := \
$(filter -march=%,$(KBUILD_CFLAGS)) \
$(filter -m%-float,$(KBUILD_CFLAGS)) \
$(filter -mno-loongson-%,$(KBUILD_CFLAGS)) \
+ $(CLANG_FLAGS) \
-D__VDSO__
-ifeq ($(cc-name),clang)
-ccflags-vdso += $(filter --target=%,$(KBUILD_CFLAGS))
-endif
-
cflags-vdso := $(ccflags-vdso) \
$(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
-O2 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \
diff --git a/arch/mips/vdso/genvdso.c b/arch/mips/vdso/genvdso.c
index 530a36f465ce..afcc86726448 100644
--- a/arch/mips/vdso/genvdso.c
+++ b/arch/mips/vdso/genvdso.c
@@ -126,6 +126,7 @@ static void *map_vdso(const char *path, size_t *_size)
if (fstat(fd, &stat) != 0) {
fprintf(stderr, "%s: Failed to stat '%s': %s\n", program_name,
path, strerror(errno));
+ close(fd);
return NULL;
}
@@ -134,6 +135,7 @@ static void *map_vdso(const char *path, size_t *_size)
if (addr == MAP_FAILED) {
fprintf(stderr, "%s: Failed to map '%s': %s\n", program_name,
path, strerror(errno));
+ close(fd);
return NULL;
}
@@ -143,6 +145,7 @@ static void *map_vdso(const char *path, size_t *_size)
if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) {
fprintf(stderr, "%s: '%s' is not an ELF file\n", program_name,
path);
+ close(fd);
return NULL;
}
@@ -154,6 +157,7 @@ static void *map_vdso(const char *path, size_t *_size)
default:
fprintf(stderr, "%s: '%s' has invalid ELF class\n",
program_name, path);
+ close(fd);
return NULL;
}
@@ -165,6 +169,7 @@ static void *map_vdso(const char *path, size_t *_size)
default:
fprintf(stderr, "%s: '%s' has invalid ELF data order\n",
program_name, path);
+ close(fd);
return NULL;
}
@@ -172,15 +177,18 @@ static void *map_vdso(const char *path, size_t *_size)
fprintf(stderr,
"%s: '%s' has invalid ELF machine (expected EM_MIPS)\n",
program_name, path);
+ close(fd);
return NULL;
} else if (swap_uint16(ehdr->e_type) != ET_DYN) {
fprintf(stderr,
"%s: '%s' has invalid ELF type (expected ET_DYN)\n",
program_name, path);
+ close(fd);
return NULL;
}
*_size = stat.st_size;
+ close(fd);
return addr;
}
@@ -284,10 +292,12 @@ int main(int argc, char **argv)
/* Calculate and write symbol offsets to <output file> */
if (!get_symbols(dbg_vdso_path, dbg_vdso)) {
unlink(out_path);
+ fclose(out_file);
return EXIT_FAILURE;
}
fprintf(out_file, "};\n");
+ fclose(out_file);
return EXIT_SUCCESS;
}
diff --git a/arch/mips/vdso/gettimeofday.c b/arch/mips/vdso/gettimeofday.c
index e22b422f282c..9fdc84fc3985 100644
--- a/arch/mips/vdso/gettimeofday.c
+++ b/arch/mips/vdso/gettimeofday.c
@@ -18,6 +18,12 @@
#include <asm/unistd.h>
#include <asm/vdso.h>
+#if MIPS_ISA_REV < 6
+#define VDSO_SYSCALL_CLOBBERS "hi", "lo",
+#else
+#define VDSO_SYSCALL_CLOBBERS
+#endif
+
#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
static __always_inline long gettimeofday_fallback(struct timeval *_tv,
@@ -34,7 +40,9 @@ static __always_inline long gettimeofday_fallback(struct timeval *_tv,
: "=r" (ret), "=r" (error)
: "r" (tv), "r" (tz), "r" (nr)
: "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
- "$14", "$15", "$24", "$25", "hi", "lo", "memory");
+ "$14", "$15", "$24", "$25",
+ VDSO_SYSCALL_CLOBBERS
+ "memory");
return error ? -ret : ret;
}
@@ -55,7 +63,9 @@ static __always_inline long clock_gettime_fallback(clockid_t _clkid,
: "=r" (ret), "=r" (error)
: "r" (clkid), "r" (ts), "r" (nr)
: "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
- "$14", "$15", "$24", "$25", "hi", "lo", "memory");
+ "$14", "$15", "$24", "$25",
+ VDSO_SYSCALL_CLOBBERS
+ "memory");
return error ? -ret : ret;
}
diff --git a/arch/nds32/mm/cacheflush.c b/arch/nds32/mm/cacheflush.c
index 254703653b6f..f34dc9bc6758 100644
--- a/arch/nds32/mm/cacheflush.c
+++ b/arch/nds32/mm/cacheflush.c
@@ -239,7 +239,7 @@ void flush_dcache_page(struct page *page)
{
struct address_space *mapping;
- mapping = page_mapping(page);
+ mapping = page_mapping_file(page);
if (mapping && !mapping_mapped(mapping))
set_bit(PG_dcache_dirty, &page->flags);
else {
diff --git a/arch/openrisc/include/asm/barrier.h b/arch/openrisc/include/asm/barrier.h
new file mode 100644
index 000000000000..7538294721be
--- /dev/null
+++ b/arch/openrisc/include/asm/barrier.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#define mb() asm volatile ("l.msync" ::: "memory")
+
+#include <asm-generic/barrier.h>
+
+#endif /* __ASM_BARRIER_H */
diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
index bbf5c79cce7a..8b204cd1f531 100644
--- a/arch/openrisc/include/asm/uaccess.h
+++ b/arch/openrisc/include/asm/uaccess.h
@@ -58,8 +58,12 @@
/* Ensure that addr is below task's addr_limit */
#define __addr_ok(addr) ((unsigned long) addr < get_fs())
-#define access_ok(type, addr, size) \
- __range_ok((unsigned long)addr, (unsigned long)size)
+#define access_ok(type, addr, size) \
+({ \
+ unsigned long __ao_addr = (unsigned long)(addr); \
+ unsigned long __ao_size = (unsigned long)(size); \
+ __range_ok(__ao_addr, __ao_size); \
+})
/*
* These are the main single-value transfer routines. They automatically
diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
index ee6159d2ed22..01b59d2ce174 100644
--- a/arch/openrisc/kernel/entry.S
+++ b/arch/openrisc/kernel/entry.S
@@ -1170,13 +1170,13 @@ ENTRY(__sys_clone)
l.movhi r29,hi(sys_clone)
l.ori r29,r29,lo(sys_clone)
l.j _fork_save_extra_regs_and_call
- l.addi r7,r1,0
+ l.nop
ENTRY(__sys_fork)
l.movhi r29,hi(sys_fork)
l.ori r29,r29,lo(sys_fork)
l.j _fork_save_extra_regs_and_call
- l.addi r3,r1,0
+ l.nop
ENTRY(sys_rt_sigreturn)
l.jal _sys_rt_sigreturn
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
index 9d28ab14d139..f3a7375ac3cd 100644
--- a/arch/openrisc/kernel/setup.c
+++ b/arch/openrisc/kernel/setup.c
@@ -281,6 +281,8 @@ void calibrate_delay(void)
pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
loops_per_jiffy / (500000 / HZ),
(loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy);
+
+ of_node_put(cpu);
}
void __init setup_arch(char **cmdline_p)
diff --git a/arch/openrisc/kernel/stacktrace.c b/arch/openrisc/kernel/stacktrace.c
index 43f140a28bc7..54d38809e22c 100644
--- a/arch/openrisc/kernel/stacktrace.c
+++ b/arch/openrisc/kernel/stacktrace.c
@@ -13,6 +13,7 @@
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
+#include <linux/sched/task_stack.h>
#include <linux/stacktrace.h>
#include <asm/processor.h>
@@ -68,12 +69,25 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
unsigned long *sp = NULL;
+ if (!try_get_task_stack(tsk))
+ return;
+
if (tsk == current)
sp = (unsigned long *) &sp;
- else
- sp = (unsigned long *) KSTK_ESP(tsk);
+ else {
+ unsigned long ksp;
+
+ /* Locate stack from kernel context */
+ ksp = task_thread_info(tsk)->ksp;
+ ksp += STACK_FRAME_OVERHEAD; /* redzone */
+ ksp += sizeof(struct pt_regs);
+
+ sp = (unsigned long *) ksp;
+ }
unwind_stack(trace, sp, save_stack_address_nosched);
+
+ put_task_stack(tsk);
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/openrisc/mm/cache.c b/arch/openrisc/mm/cache.c
index b747bf1fc1b6..4272d9123f9e 100644
--- a/arch/openrisc/mm/cache.c
+++ b/arch/openrisc/mm/cache.c
@@ -20,7 +20,7 @@
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
-static void cache_loop(struct page *page, const unsigned int reg)
+static __always_inline void cache_loop(struct page *page, const unsigned int reg)
{
unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT;
unsigned long line = paddr & ~(L1_CACHE_BYTES - 1);
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 118953d41763..6dd4171c9530 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -212,6 +212,8 @@ atomic64_set(atomic64_t *v, s64 i)
_atomic_spin_unlock_irqrestore(v, flags);
}
+#define atomic64_set_release(v, i) atomic64_set((v), (i))
+
static __inline__ s64
atomic64_read(const atomic64_t *v)
{
diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h
index dbaaca84f27f..640d46edf32e 100644
--- a/arch/parisc/include/asm/barrier.h
+++ b/arch/parisc/include/asm/barrier.h
@@ -26,6 +26,67 @@
#define __smp_rmb() mb()
#define __smp_wmb() mb()
+#define __smp_store_release(p, v) \
+do { \
+ typeof(p) __p = (p); \
+ union { typeof(*p) __val; char __c[1]; } __u = \
+ { .__val = (__force typeof(*p)) (v) }; \
+ compiletime_assert_atomic_type(*p); \
+ switch (sizeof(*p)) { \
+ case 1: \
+ asm volatile("stb,ma %0,0(%1)" \
+ : : "r"(*(__u8 *)__u.__c), "r"(__p) \
+ : "memory"); \
+ break; \
+ case 2: \
+ asm volatile("sth,ma %0,0(%1)" \
+ : : "r"(*(__u16 *)__u.__c), "r"(__p) \
+ : "memory"); \
+ break; \
+ case 4: \
+ asm volatile("stw,ma %0,0(%1)" \
+ : : "r"(*(__u32 *)__u.__c), "r"(__p) \
+ : "memory"); \
+ break; \
+ case 8: \
+ if (IS_ENABLED(CONFIG_64BIT)) \
+ asm volatile("std,ma %0,0(%1)" \
+ : : "r"(*(__u64 *)__u.__c), "r"(__p) \
+ : "memory"); \
+ break; \
+ } \
+} while (0)
+
+#define __smp_load_acquire(p) \
+({ \
+ union { typeof(*p) __val; char __c[1]; } __u; \
+ typeof(p) __p = (p); \
+ compiletime_assert_atomic_type(*p); \
+ switch (sizeof(*p)) { \
+ case 1: \
+ asm volatile("ldb,ma 0(%1),%0" \
+ : "=r"(*(__u8 *)__u.__c) : "r"(__p) \
+ : "memory"); \
+ break; \
+ case 2: \
+ asm volatile("ldh,ma 0(%1),%0" \
+ : "=r"(*(__u16 *)__u.__c) : "r"(__p) \
+ : "memory"); \
+ break; \
+ case 4: \
+ asm volatile("ldw,ma 0(%1),%0" \
+ : "=r"(*(__u32 *)__u.__c) : "r"(__p) \
+ : "memory"); \
+ break; \
+ case 8: \
+ if (IS_ENABLED(CONFIG_64BIT)) \
+ asm volatile("ldd,ma 0(%1),%0" \
+ : "=r"(*(__u64 *)__u.__c) : "r"(__p) \
+ : "memory"); \
+ break; \
+ } \
+ __u.__val; \
+})
#include <asm-generic/barrier.h>
#endif /* !__ASSEMBLY__ */
diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h
index ab5c215cf46c..a736dc59bbef 100644
--- a/arch/parisc/include/asm/cmpxchg.h
+++ b/arch/parisc/include/asm/cmpxchg.h
@@ -60,6 +60,7 @@ extern void __cmpxchg_called_with_bad_pointer(void);
extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old,
unsigned int new_);
extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_);
+extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_);
/* don't worry...optimizer will get rid of most of this */
static inline unsigned long
@@ -71,6 +72,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
#endif
case 4: return __cmpxchg_u32((unsigned int *)ptr,
(unsigned int)old, (unsigned int)new_);
+ case 1: return __cmpxchg_u8((u8 *)ptr, old & 0xff, new_ & 0xff);
}
__cmpxchg_called_with_bad_pointer();
return old;
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 0ca254085a66..c152c30c2d06 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -380,7 +380,11 @@ static inline int eirr_to_irq(unsigned long eirr)
/*
* IRQ STACK - used for irq handler
*/
+#ifdef CONFIG_64BIT
+#define IRQ_STACK_SIZE (4096 << 4) /* 64k irq stack size */
+#else
#define IRQ_STACK_SIZE (4096 << 3) /* 32k irq stack size */
+#endif
union irq_stack_union {
unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c
index 70ffbcf889b8..2e4d1f05a926 100644
--- a/arch/parisc/lib/bitops.c
+++ b/arch/parisc/lib/bitops.c
@@ -79,3 +79,15 @@ unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsign
_atomic_spin_unlock_irqrestore(ptr, flags);
return (unsigned long)prev;
}
+
+u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new)
+{
+ unsigned long flags;
+ u8 prev;
+
+ _atomic_spin_lock_irqsave(ptr, flags);
+ if ((prev = *ptr) == old)
+ *ptr = new;
+ _atomic_spin_unlock_irqrestore(ptr, flags);
+ return prev;
+}
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index aae9b0d71c1e..10a52664e29f 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -607,7 +607,7 @@ void __init mem_init(void)
> BITS_PER_LONG);
high_memory = __va((max_pfn << PAGE_SHIFT));
- set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1);
+ set_max_mapnr(max_low_pfn);
free_all_bootmem();
#ifdef CONFIG_PA11
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 6f475dc5829b..f0e09d5f0bed 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -139,7 +139,7 @@ config PPC
select ARCH_HAS_MEMBARRIER_CALLBACKS
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
select ARCH_HAS_SG_CHAIN
- select ARCH_HAS_STRICT_KERNEL_RWX if ((PPC_BOOK3S_64 || PPC32) && !RELOCATABLE && !HIBERNATION)
+ select ARCH_HAS_STRICT_KERNEL_RWX if (PPC32 && !HIBERNATION)
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UACCESS_FLUSHCACHE if PPC64
select ARCH_HAS_UBSAN_SANITIZE_ALL
@@ -152,6 +152,7 @@ config PPC
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
select ARCH_WANT_IPC_PARSE_VERSION
+ select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
select ARCH_WEAK_RELEASE_ACQUIRE
select BINFMT_ELF
select BUILDTIME_EXTABLE_SORT
@@ -207,7 +208,7 @@ config PPC
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S)
- select HAVE_HARDLOCKUP_DETECTOR_ARCH if (PPC64 && PPC_BOOK3S)
+ select HAVE_HARDLOCKUP_DETECTOR_ARCH if PPC64 && PPC_BOOK3S && SMP
select HAVE_OPROFILE
select HAVE_OPTPROBES if PPC64
select HAVE_PERF_EVENTS
@@ -708,7 +709,7 @@ config PPC_64K_PAGES
config PPC_256K_PAGES
bool "256k page size"
- depends on 44x && !STDBINUTILS
+ depends on 44x && !STDBINUTILS && !PPC_47x
help
Make the page size 256k.
@@ -1009,6 +1010,19 @@ config FSL_RIO
source "drivers/rapidio/Kconfig"
+config PPC_RTAS_FILTER
+ bool "Enable filtering of RTAS syscalls"
+ default y
+ depends on PPC_RTAS
+ help
+ The RTAS syscall API has security issues that could be used to
+ compromise system integrity. This option enforces restrictions on the
+ RTAS calls and arguments passed by userspace programs to mitigate
+ these issues.
+
+ Say Y unless you know what you are doing and the filter is causing
+ problems for you.
+
endmenu
config NONSTATIC_KERNEL
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index fd63cd914a74..ffe0cf0f0bea 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -350,6 +350,7 @@ config PPC_EARLY_DEBUG_CPM_ADDR
config FAIL_IOMMU
bool "Fault-injection capability for IOMMU"
depends on FAULT_INJECTION
+ depends on PCI || IBMVIO
help
Provide fault-injection capability for IOMMU. Each device can
be selectively enabled via the fail_iommu property.
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 8954108df457..f51e21ea5349 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -251,7 +251,6 @@ endif
cpu-as-$(CONFIG_4xx) += -Wa,-m405
cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec)
-cpu-as-$(CONFIG_E200) += -Wa,-me200
cpu-as-$(CONFIG_E500) += -Wa,-me500
# When using '-many -mpower4' gas will first try and find a matching power4
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 7d5ddf53750c..7a83b5e136e0 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -122,7 +122,7 @@ src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \
elf_util.c $(zlib-y) devtree.c stdlib.c \
oflib.c ofconsole.c cuboot.c
-src-wlib-$(CONFIG_PPC_MPC52XX) += mpc52xx-psc.c
+src-wlib-$(CONFIG_PPC_MPC52xx) += mpc52xx-psc.c
src-wlib-$(CONFIG_PPC64_BOOT_WRAPPER) += opal-calls.S opal.c
ifndef CONFIG_PPC64_BOOT_WRAPPER
src-wlib-y += crtsavres.S
diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c
index 48e3743faedf..83c78427c20b 100644
--- a/arch/powerpc/boot/serial.c
+++ b/arch/powerpc/boot/serial.c
@@ -127,7 +127,7 @@ int serial_console_init(void)
dt_is_compatible(devp, "fsl,cpm2-smc-uart"))
rc = cpm_console_init(devp, &serial_cd);
#endif
-#ifdef CONFIG_PPC_MPC52XX
+#ifdef CONFIG_PPC_MPC52xx
else if (dt_is_compatible(devp, "fsl,mpc5200-psc-uart"))
rc = mpc5200_psc_console_init(devp, &serial_cd);
#endif
diff --git a/arch/powerpc/configs/85xx-hw.config b/arch/powerpc/configs/85xx-hw.config
index c03d0fb16665..9144e4cbc42e 100644
--- a/arch/powerpc/configs/85xx-hw.config
+++ b/arch/powerpc/configs/85xx-hw.config
@@ -2,7 +2,6 @@ CONFIG_AQUANTIA_PHY=y
CONFIG_AT803X_PHY=y
CONFIG_ATA=y
CONFIG_BLK_DEV_SD=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_BLK_DEV_SR=y
CONFIG_BROADCOM_PHY=y
CONFIG_C293_PCIE=y
diff --git a/arch/powerpc/configs/amigaone_defconfig b/arch/powerpc/configs/amigaone_defconfig
index 12f397d403c6..8ddab7c240d9 100644
--- a/arch/powerpc/configs/amigaone_defconfig
+++ b/arch/powerpc/configs/amigaone_defconfig
@@ -48,7 +48,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SYM53C8XX_2=y
diff --git a/arch/powerpc/configs/chrp32_defconfig b/arch/powerpc/configs/chrp32_defconfig
index a203b1cf67d3..796d34734b91 100644
--- a/arch/powerpc/configs/chrp32_defconfig
+++ b/arch/powerpc/configs/chrp32_defconfig
@@ -46,7 +46,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SYM53C8XX_2=y
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index 67c39f4acede..0123e3b00901 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -63,7 +63,6 @@ CONFIG_CDROM_PKTCDVD=m
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SPI_ATTRS=y
diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig
index 59e47ec85336..9df11592880d 100644
--- a/arch/powerpc/configs/maple_defconfig
+++ b/arch/powerpc/configs/maple_defconfig
@@ -42,7 +42,6 @@ CONFIG_BLK_DEV_RAM_SIZE=8192
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_IPR=y
CONFIG_ATA=y
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index 6daa56f8895c..60839eeada8b 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -61,7 +61,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_CHR_DEV_OSST=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=y
CONFIG_SCSI_CONSTANTS=y
@@ -111,7 +110,6 @@ CONFIG_FB_NVIDIA=y
CONFIG_FB_NVIDIA_I2C=y
CONFIG_FB_RADEON=y
# CONFIG_LCD_CLASS_DEVICE is not set
-CONFIG_VGACON_SOFT_SCROLLBACK=y
CONFIG_LOGO=y
CONFIG_SOUND=y
CONFIG_SND=y
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig
index 62948d198d7f..212a68b65d9f 100644
--- a/arch/powerpc/configs/pmac32_defconfig
+++ b/arch/powerpc/configs/pmac32_defconfig
@@ -121,7 +121,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_FC_ATTRS=y
diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig
index 6ab34e60495f..5a533c9f6b3d 100644
--- a/arch/powerpc/configs/powernv_defconfig
+++ b/arch/powerpc/configs/powernv_defconfig
@@ -109,7 +109,6 @@ CONFIG_BLK_DEV_NVME=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=m
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SCAN_ASYNC=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 5033e630afea..d9fdf05599c9 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -93,7 +93,6 @@ CONFIG_VIRTIO_BLK=m
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_FC_ATTRS=y
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig
index 41d85cb3c9a2..a5e36faef391 100644
--- a/arch/powerpc/configs/ppc64e_defconfig
+++ b/arch/powerpc/configs/ppc64e_defconfig
@@ -61,7 +61,6 @@ CONFIG_BLK_DEV_RAM_SIZE=65536
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_FC_ATTRS=y
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 7ee736f20774..e30af76f4753 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -374,7 +374,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=m
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=m
CONFIG_SCSI_ENCLOSURE=m
@@ -780,7 +779,6 @@ CONFIG_FB_TRIDENT=m
CONFIG_FB_SM501=m
CONFIG_FB_IBM_GXT4500=y
CONFIG_LCD_PLATFORM=m
-CONFIG_VGACON_SOFT_SCROLLBACK=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
CONFIG_LOGO=y
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 0dd5cf7b566d..780552be29f9 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -97,7 +97,6 @@ CONFIG_VIRTIO_BLK=m
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_FC_ATTRS=y
diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig
index ffeaed63675b..82611dea62bd 100644
--- a/arch/powerpc/configs/skiroot_defconfig
+++ b/arch/powerpc/configs/skiroot_defconfig
@@ -77,7 +77,6 @@ CONFIG_EEPROM_AT24=y
# CONFIG_CXL is not set
CONFIG_BLK_DEV_SD=m
CONFIG_BLK_DEV_SR=m
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SCAN_ASYNC=y
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index ff71566dadee..76db1c5000bd 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -221,15 +221,34 @@ static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr)
*/
static __inline__ int fls(unsigned int x)
{
- return 32 - __builtin_clz(x);
+ int lz;
+
+ if (__builtin_constant_p(x))
+ return x ? 32 - __builtin_clz(x) : 0;
+ asm("cntlzw %0,%1" : "=r" (lz) : "r" (x));
+ return 32 - lz;
}
#include <asm-generic/bitops/builtin-__fls.h>
+/*
+ * 64-bit can do this using one cntlzd (count leading zeroes doubleword)
+ * instruction; for 32-bit we use the generic version, which does two
+ * 32-bit fls calls.
+ */
+#ifdef CONFIG_PPC64
static __inline__ int fls64(__u64 x)
{
- return 64 - __builtin_clzll(x);
+ int lz;
+
+ if (__builtin_constant_p(x))
+ return x ? 64 - __builtin_clzll(x) : 0;
+ asm("cntlzd %0,%1" : "=r" (lz) : "r" (x));
+ return 64 - lz;
}
+#else
+#include <asm-generic/bitops/fls64.h>
+#endif
#ifdef CONFIG_PPC64
unsigned int __arch_hweight8(unsigned int w);
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 751cf931bb3f..a4f4820826b7 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -434,9 +434,9 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
if (pte_val(*ptep) & _PAGE_HASHPTE)
flush_hash_entry(mm, ptep, addr);
__asm__ __volatile__("\
- stw%U0%X0 %2,%0\n\
+ stw%X0 %2,%0\n\
eieio\n\
- stw%U0%X0 %L2,%1"
+ stw%X1 %L2,%1"
: "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
: "r" (pte) : "memory");
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index 9a3798660cef..fc68c0fc08b5 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -145,6 +145,12 @@ extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
extern int hash__has_transparent_hugepage(void);
#endif
+static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd)
+{
+ BUG();
+ return pmd;
+}
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_HASH_4K_H */
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
index f82ee8a3b561..790e4a946f6e 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
@@ -233,7 +233,7 @@ static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
*/
static inline int hash__pmd_trans_huge(pmd_t pmd)
{
- return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE)) ==
+ return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP)) ==
(_PAGE_PTE | H_PAGE_THP_HUGE));
}
@@ -259,6 +259,12 @@ extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp);
extern int hash__has_transparent_hugepage(void);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd)
+{
+ return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP));
+}
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */
diff --git a/arch/powerpc/include/asm/book3s/64/kup-radix.h b/arch/powerpc/include/asm/book3s/64/kup-radix.h
new file mode 100644
index 000000000000..aa54ac2e5659
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/kup-radix.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H
+#define _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H
+
+DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
+
+/* Prototype for function defined in exceptions-64s.S */
+void do_uaccess_flush(void);
+
+static __always_inline void allow_user_access(void __user *to, const void __user *from,
+ unsigned long size)
+{
+}
+
+static inline void prevent_user_access(void __user *to, const void __user *from,
+ unsigned long size)
+{
+ if (static_branch_unlikely(&uaccess_flush_key))
+ do_uaccess_flush();
+}
+
+#endif /* _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 855dbae6d351..008eb63fa851 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -985,10 +985,25 @@ extern struct page *pgd_page(pgd_t pgd);
#define pud_page_vaddr(pud) __va(pud_val(pud) & ~PUD_MASKED_BITS)
#define pgd_page_vaddr(pgd) __va(pgd_val(pgd) & ~PGD_MASKED_BITS)
-#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1))
-#define pud_index(address) (((address) >> (PUD_SHIFT)) & (PTRS_PER_PUD - 1))
-#define pmd_index(address) (((address) >> (PMD_SHIFT)) & (PTRS_PER_PMD - 1))
-#define pte_index(address) (((address) >> (PAGE_SHIFT)) & (PTRS_PER_PTE - 1))
+static inline unsigned long pgd_index(unsigned long address)
+{
+ return (address >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1);
+}
+
+static inline unsigned long pud_index(unsigned long address)
+{
+ return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
+}
+
+static inline unsigned long pmd_index(unsigned long address)
+{
+ return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
+}
+
+static inline unsigned long pte_index(unsigned long address)
+{
+ return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
+}
/*
* Find an entry in a page-table-directory. We combine the address region
@@ -1253,7 +1268,9 @@ extern void serialize_against_pte_lookup(struct mm_struct *mm);
static inline pmd_t pmd_mkdevmap(pmd_t pmd)
{
- return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP));
+ if (radix_enabled())
+ return radix__pmd_mkdevmap(pmd);
+ return hash__pmd_mkdevmap(pmd);
}
static inline int pmd_devmap(pmd_t pmd)
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index 7d1a3d1543fc..04b2bffbc5c9 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -204,8 +204,10 @@ static inline void radix__set_pte_at(struct mm_struct *mm, unsigned long addr,
* from ptesync, it should probably go into update_mmu_cache, rather
* than set_pte_at (which is used to set ptes unrelated to faults).
*
- * Spurious faults to vmalloc region are not tolerated, so there is
- * a ptesync in flush_cache_vmap.
+ * Spurious faults from the kernel memory are not tolerated, so there
+ * is a ptesync in flush_cache_vmap, and __map_kernel_page() follows
+ * the pte update sequence from ISA Book III 6.10 Translation Table
+ * Update Synchronization Requirements.
*/
}
@@ -255,6 +257,11 @@ extern pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
extern int radix__has_transparent_hugepage(void);
#endif
+static inline pmd_t radix__pmd_mkdevmap(pmd_t pmd)
+{
+ return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP));
+}
+
extern int __meminit radix__vmemmap_create_mapping(unsigned long start,
unsigned long page_size,
unsigned long phys);
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index 31733a95bbd0..4cd6e19ee90f 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -52,7 +52,7 @@ void __patch_exception(int exc, unsigned long addr);
#endif
#define OP_RT_RA_MASK 0xffff0000UL
-#define LIS_R2 0x3c020000UL
+#define LIS_R2 0x3c400000UL
#define ADDIS_R2_R12 0x3c4c0000UL
#define ADDI_R2_R2 0x38420000UL
diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h
index 7897d16e0990..727d4b321937 100644
--- a/arch/powerpc/include/asm/cpu_has_feature.h
+++ b/arch/powerpc/include/asm/cpu_has_feature.h
@@ -7,7 +7,7 @@
#include <linux/bug.h>
#include <asm/cputable.h>
-static inline bool early_cpu_has_feature(unsigned long feature)
+static __always_inline bool early_cpu_has_feature(unsigned long feature)
{
return !!((CPU_FTRS_ALWAYS & feature) ||
(CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature));
@@ -46,7 +46,7 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
return static_branch_likely(&cpu_feature_keys[i]);
}
#else
-static inline bool cpu_has_feature(unsigned long feature)
+static __always_inline bool cpu_has_feature(unsigned long feature)
{
return early_cpu_has_feature(feature);
}
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 59b35b93eade..d90093a88e09 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -411,7 +411,6 @@ static inline void cpu_feature_keys_init(void) { }
CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_ALTIVEC_COMP | \
CPU_FTR_CELL_TB_BUG | CPU_FTR_SMT)
-#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
/* 64-bit CPUs */
#define CPU_FTRS_PPC970 (CPU_FTR_LWSYNC | \
@@ -509,8 +508,6 @@ enum {
CPU_FTRS_7447 | CPU_FTRS_7447A | CPU_FTRS_82XX |
CPU_FTRS_G2_LE | CPU_FTRS_E300 | CPU_FTRS_E300C2 |
CPU_FTRS_CLASSIC32 |
-#else
- CPU_FTRS_GENERIC_32 |
#endif
#ifdef CONFIG_PPC_8xx
CPU_FTRS_8XX |
@@ -585,8 +582,6 @@ enum {
CPU_FTRS_7447 & CPU_FTRS_7447A & CPU_FTRS_82XX &
CPU_FTRS_G2_LE & CPU_FTRS_E300 & CPU_FTRS_E300C2 &
CPU_FTRS_CLASSIC32 &
-#else
- CPU_FTRS_GENERIC_32 &
#endif
#ifdef CONFIG_PPC_8xx
CPU_FTRS_8XX &
diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h
index 151dff555f50..9d9e323f5e9b 100644
--- a/arch/powerpc/include/asm/dcr-native.h
+++ b/arch/powerpc/include/asm/dcr-native.h
@@ -66,8 +66,8 @@ static inline void mtdcrx(unsigned int reg, unsigned int val)
#define mfdcr(rn) \
({unsigned int rval; \
if (__builtin_constant_p(rn) && rn < 1024) \
- asm volatile("mfdcr %0," __stringify(rn) \
- : "=r" (rval)); \
+ asm volatile("mfdcr %0, %1" : "=r" (rval) \
+ : "n" (rn)); \
else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \
rval = mfdcrx(rn); \
else \
@@ -77,8 +77,8 @@ static inline void mtdcrx(unsigned int reg, unsigned int val)
#define mtdcr(rn, v) \
do { \
if (__builtin_constant_p(rn) && rn < 1024) \
- asm volatile("mtdcr " __stringify(rn) ",%0" \
- : : "r" (v)); \
+ asm volatile("mtdcr %0, %1" \
+ : : "n" (rn), "r" (v)); \
else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \
mtdcrx(rn, v); \
else \
diff --git a/arch/powerpc/include/asm/drmem.h b/arch/powerpc/include/asm/drmem.h
index 7c1d8e74b25d..8b196720e9a0 100644
--- a/arch/powerpc/include/asm/drmem.h
+++ b/arch/powerpc/include/asm/drmem.h
@@ -12,6 +12,8 @@
#ifndef _ASM_POWERPC_LMB_H
#define _ASM_POWERPC_LMB_H
+#include <linux/sched.h>
+
struct drmem_lmb {
u64 base_addr;
u32 drc_index;
@@ -22,18 +24,32 @@ struct drmem_lmb {
struct drmem_lmb_info {
struct drmem_lmb *lmbs;
int n_lmbs;
- u32 lmb_size;
+ u64 lmb_size;
};
extern struct drmem_lmb_info *drmem_info;
+static inline struct drmem_lmb *drmem_lmb_next(struct drmem_lmb *lmb,
+ const struct drmem_lmb *start)
+{
+ /*
+ * DLPAR code paths can take several milliseconds per element
+ * when interacting with firmware. Ensure that we don't
+ * unfairly monopolize the CPU.
+ */
+ if (((++lmb - start) % 16) == 0)
+ cond_resched();
+
+ return lmb;
+}
+
#define for_each_drmem_lmb_in_range(lmb, start, end) \
- for ((lmb) = (start); (lmb) <= (end); (lmb)++)
+ for ((lmb) = (start); (lmb) < (end); lmb = drmem_lmb_next(lmb, start))
#define for_each_drmem_lmb(lmb) \
for_each_drmem_lmb_in_range((lmb), \
&drmem_info->lmbs[0], \
- &drmem_info->lmbs[drmem_info->n_lmbs - 1])
+ &drmem_info->lmbs[drmem_info->n_lmbs])
/*
* The of_drconf_cell_v1 struct defines the layout of the LMB data
@@ -67,7 +83,7 @@ struct of_drconf_cell_v2 {
#define DRCONF_MEM_AI_INVALID 0x00000040
#define DRCONF_MEM_RESERVED 0x00000080
-static inline u32 drmem_lmb_size(void)
+static inline u64 drmem_lmb_size(void)
{
return drmem_info->lmb_size;
}
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index a86feddddad0..35fb5b11955a 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -90,11 +90,18 @@
nop; \
nop
+#define ENTRY_FLUSH_SLOT \
+ ENTRY_FLUSH_FIXUP_SECTION; \
+ nop; \
+ nop; \
+ nop;
+
/*
* r10 must be free to use, r13 must be paca
*/
#define INTERRUPT_TO_KERNEL \
- STF_ENTRY_BARRIER_SLOT
+ STF_ENTRY_BARRIER_SLOT; \
+ ENTRY_FLUSH_SLOT
/*
* Macros for annotating the expected destination of (h)rfid
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index 40a6c9261a6b..5bf3f0779b93 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -205,6 +205,22 @@ label##3: \
FTR_ENTRY_OFFSET 955b-956b; \
.popsection;
+#define UACCESS_FLUSH_FIXUP_SECTION \
+959: \
+ .pushsection __uaccess_flush_fixup,"a"; \
+ .align 2; \
+960: \
+ FTR_ENTRY_OFFSET 959b-960b; \
+ .popsection;
+
+#define ENTRY_FLUSH_FIXUP_SECTION \
+957: \
+ .pushsection __entry_flush_fixup,"a"; \
+ .align 2; \
+958: \
+ FTR_ENTRY_OFFSET 957b-958b; \
+ .popsection;
+
#define RFI_FLUSH_FIXUP_SECTION \
951: \
.pushsection __rfi_flush_fixup,"a"; \
@@ -237,8 +253,11 @@ label##3: \
#include <linux/types.h>
extern long stf_barrier_fallback;
+extern long entry_flush_fallback;
extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
+extern long __start___uaccess_flush_fixup, __stop___uaccess_flush_fixup;
+extern long __start___entry_flush_fixup, __stop___entry_flush_fixup;
extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index 2a7b01f97a56..1eabc20dddd3 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -35,6 +35,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
{
int oldval = 0, ret;
+ allow_write_to_user(uaddr, sizeof(*uaddr));
pagefault_disable();
switch (op) {
@@ -61,6 +62,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
*oval = oldval;
+ prevent_write_to_user(uaddr, sizeof(*uaddr));
return ret;
}
@@ -74,6 +76,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
+ allow_write_to_user(uaddr, sizeof(*uaddr));
__asm__ __volatile__ (
PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\
@@ -94,6 +97,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "cc", "memory");
*uval = prev;
+ prevent_write_to_user(uaddr, sizeof(*uaddr));
return ret;
}
diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h
new file mode 100644
index 000000000000..f0f8e36ad71f
--- /dev/null
+++ b/arch/powerpc/include/asm/kup.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_KUP_H_
+#define _ASM_POWERPC_KUP_H_
+
+#ifndef __ASSEMBLY__
+
+#include <asm/pgtable.h>
+
+#ifdef CONFIG_PPC_BOOK3S_64
+#include <asm/book3s/64/kup-radix.h>
+#else
+static inline void allow_user_access(void __user *to, const void __user *from,
+ unsigned long size) { }
+static inline void prevent_user_access(void __user *to, const void __user *from,
+ unsigned long size) { }
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
+static inline void allow_read_from_user(const void __user *from, unsigned long size)
+{
+ allow_user_access(NULL, from, size);
+}
+
+static inline void allow_write_to_user(void __user *to, unsigned long size)
+{
+ allow_user_access(to, NULL, size);
+}
+
+static inline void prevent_read_from_user(const void __user *from, unsigned long size)
+{
+ prevent_user_access(NULL, from, size);
+}
+
+static inline void prevent_write_to_user(void __user *to, unsigned long size)
+{
+ prevent_user_access(to, NULL, size);
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_KUP_H_ */
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index a790d5cf6ea3..684e8ae00d16 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -163,4 +163,7 @@
#define KVM_INST_FETCH_FAILED -1
+/* Extract PO and XOP opcode fields */
+#define PO_XOP_OPCODE_MASK 0xfc0007fe
+
#endif /* __POWERPC_KVM_ASM_H__ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 2f95e38f0549..7b54d8412367 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -68,7 +68,8 @@
#define KVM_ARCH_WANT_MMU_NOTIFIER
extern int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end);
+ unsigned long start, unsigned long end,
+ bool blockable);
extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index a47de82fb8e2..bda87cbf106d 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -71,6 +71,9 @@ struct machdep_calls {
int (*pcibios_root_bridge_prepare)(struct pci_host_bridge
*bridge);
+ /* finds all the pci_controllers present at boot */
+ void (*discover_phbs)(void);
+
/* To setup PHBs when using automatic OF platform driver for PCI */
int (*pci_setup_phb)(struct pci_controller *host);
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index ae953958c0f3..d93bdcaa4a46 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -204,7 +204,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
*/
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
- switch_mm(prev, next, current);
+ switch_mm_irqs_off(prev, next, current);
}
/* We don't currently use enter_lazy_tlb() for anything */
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index b321c82b3624..913878d8e3be 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -151,9 +151,9 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
*/
if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) {
__asm__ __volatile__("\
- stw%U0%X0 %2,%0\n\
+ stw%X0 %2,%0\n\
eieio\n\
- stw%U0%X0 %L2,%1"
+ stw%X1 %L2,%1"
: "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
: "r" (pte) : "memory");
return;
diff --git a/arch/powerpc/include/asm/percpu.h b/arch/powerpc/include/asm/percpu.h
index dce863a7635c..8e5b7d0b851c 100644
--- a/arch/powerpc/include/asm/percpu.h
+++ b/arch/powerpc/include/asm/percpu.h
@@ -10,8 +10,6 @@
#ifdef CONFIG_SMP
-#include <asm/paca.h>
-
#define __my_cpu_offset local_paca->data_offset
#endif /* CONFIG_SMP */
@@ -19,4 +17,6 @@
#include <asm-generic/percpu.h>
+#include <asm/paca.h>
+
#endif /* _ASM_POWERPC_PERCPU_H_ */
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 52fadded5c1e..45bbcffcb7b6 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -386,7 +386,6 @@ struct thread_struct {
#else
#define INIT_THREAD { \
.ksp = INIT_SP, \
- .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
.addr_limit = KERNEL_DS, \
.fpexc_mode = 0, \
.ppr = INIT_PPR, \
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index af9971661512..494b0283f212 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -788,7 +788,7 @@
#define THRM1_TIN (1 << 31)
#define THRM1_TIV (1 << 30)
#define THRM1_THRES(x) ((x&0x7f)<<23)
-#define THRM3_SITV(x) ((x&0x3fff)<<1)
+#define THRM3_SITV(x) ((x & 0x1fff) << 1)
#define THRM1_TID (1<<2)
#define THRM1_TIE (1<<1)
#define THRM1_V (1<<0)
diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
index ccf44c135389..3b45a64e491e 100644
--- a/arch/powerpc/include/asm/security_features.h
+++ b/arch/powerpc/include/asm/security_features.h
@@ -84,12 +84,19 @@ static inline bool security_ftr_enabled(unsigned long feature)
// Software required to flush link stack on context switch
#define SEC_FTR_FLUSH_LINK_STACK 0x0000000000001000ull
+// The L1-D cache should be flushed when entering the kernel
+#define SEC_FTR_L1D_FLUSH_ENTRY 0x0000000000004000ull
+
+// The L1-D cache should be flushed after user accesses from the kernel
+#define SEC_FTR_L1D_FLUSH_UACCESS 0x0000000000008000ull
// Features enabled by default
#define SEC_FTR_DEFAULT \
(SEC_FTR_L1D_FLUSH_HV | \
SEC_FTR_L1D_FLUSH_PR | \
SEC_FTR_BNDS_CHK_SPEC_BAR | \
+ SEC_FTR_L1D_FLUSH_ENTRY | \
+ SEC_FTR_L1D_FLUSH_UACCESS | \
SEC_FTR_FAVOUR_SECURITY)
#endif /* _ASM_POWERPC_SECURITY_FEATURES_H */
diff --git a/arch/powerpc/include/asm/setjmp.h b/arch/powerpc/include/asm/setjmp.h
index 279d03a1eec6..6941fe202bc8 100644
--- a/arch/powerpc/include/asm/setjmp.h
+++ b/arch/powerpc/include/asm/setjmp.h
@@ -12,7 +12,9 @@
#define JMP_BUF_LEN 23
-extern long setjmp(long *);
-extern void longjmp(long *, long);
+typedef long jmp_buf[JMP_BUF_LEN];
+
+extern int setjmp(jmp_buf env) __attribute__((returns_twice));
+extern void longjmp(jmp_buf env, int val) __attribute__((noreturn));
#endif /* _ASM_POWERPC_SETJMP_H */
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index 65676e2325b8..6f2f4497e13b 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -52,12 +52,16 @@ enum l1d_flush_type {
};
void setup_rfi_flush(enum l1d_flush_type, bool enable);
+void setup_entry_flush(bool enable);
+void setup_uaccess_flush(bool enable);
void do_rfi_flush_fixups(enum l1d_flush_type types);
#ifdef CONFIG_PPC_BARRIER_NOSPEC
void setup_barrier_nospec(void);
#else
static inline void setup_barrier_nospec(void) { };
#endif
+void do_uaccess_flush_fixups(enum l1d_flush_type types);
+void do_entry_flush_fixups(enum l1d_flush_type types);
void do_barrier_nospec_fixups(bool enable);
extern bool barrier_nospec_enabled;
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index f0e571b2dc7c..a6073fecdacd 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -76,19 +76,6 @@ static inline int mm_is_thread_local(struct mm_struct *mm)
return false;
return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm));
}
-static inline void mm_reset_thread_local(struct mm_struct *mm)
-{
- WARN_ON(atomic_read(&mm->context.copros) > 0);
- /*
- * It's possible for mm_access to take a reference on mm_users to
- * access the remote mm from another thread, but it's not allowed
- * to set mm_cpumask, so mm_users may be > 1 here.
- */
- WARN_ON(current->mm != mm);
- atomic_set(&mm->context.active_cpus, 1);
- cpumask_clear(mm_cpumask(mm));
- cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
-}
#else /* CONFIG_PPC_BOOK3S_64 */
static inline int mm_is_thread_local(struct mm_struct *mm)
{
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 38a25ff8afb7..ab6612e35ace 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -6,6 +6,7 @@
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/extable.h>
+#include <asm/kup.h>
/*
* The fs value determines whether argument validity checking should be
@@ -91,9 +92,14 @@ static inline int __access_ok(unsigned long addr, unsigned long size,
__put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
#define __get_user(x, ptr) \
- __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+ __get_user_nocheck((x), (ptr), sizeof(*(ptr)), true)
#define __put_user(x, ptr) \
- __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), true)
+
+#define __get_user_allowed(x, ptr) \
+ __get_user_nocheck((x), (ptr), sizeof(*(ptr)), false)
+#define __put_user_allowed(x, ptr) \
+ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), false)
#define __get_user_inatomic(x, ptr) \
__get_user_nosleep((x), (ptr), sizeof(*(ptr)))
@@ -138,7 +144,7 @@ extern long __put_user_bad(void);
: "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
#endif /* __powerpc64__ */
-#define __put_user_size(x, ptr, size, retval) \
+#define __put_user_size_allowed(x, ptr, size, retval) \
do { \
retval = 0; \
switch (size) { \
@@ -150,14 +156,28 @@ do { \
} \
} while (0)
-#define __put_user_nocheck(x, ptr, size) \
+#define __put_user_size(x, ptr, size, retval) \
+do { \
+ allow_write_to_user(ptr, size); \
+ __put_user_size_allowed(x, ptr, size, retval); \
+ prevent_write_to_user(ptr, size); \
+} while (0)
+
+#define __put_user_nocheck(x, ptr, size, do_allow) \
({ \
long __pu_err; \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
+ __typeof__(*(ptr)) __pu_val = (x); \
+ __typeof__(size) __pu_size = (size); \
+ \
if (!is_kernel_addr((unsigned long)__pu_addr)) \
might_fault(); \
- __chk_user_ptr(ptr); \
- __put_user_size((x), __pu_addr, (size), __pu_err); \
+ __chk_user_ptr(__pu_addr); \
+ if (do_allow) \
+ __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
+ else \
+ __put_user_size_allowed(__pu_val, __pu_addr, __pu_size, __pu_err); \
+ \
__pu_err; \
})
@@ -165,9 +185,13 @@ do { \
({ \
long __pu_err = -EFAULT; \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
+ __typeof__(*(ptr)) __pu_val = (x); \
+ __typeof__(size) __pu_size = (size); \
+ \
might_fault(); \
- if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
- __put_user_size((x), __pu_addr, (size), __pu_err); \
+ if (access_ok(VERIFY_WRITE, __pu_addr, __pu_size)) \
+ __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
+ \
__pu_err; \
})
@@ -175,8 +199,12 @@ do { \
({ \
long __pu_err; \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
- __chk_user_ptr(ptr); \
- __put_user_size((x), __pu_addr, (size), __pu_err); \
+ __typeof__(*(ptr)) __pu_val = (x); \
+ __typeof__(size) __pu_size = (size); \
+ \
+ __chk_user_ptr(__pu_addr); \
+ __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
+ \
__pu_err; \
})
@@ -234,7 +262,7 @@ extern long __get_user_bad(void);
: "b" (addr), "i" (-EFAULT), "0" (err))
#endif /* __powerpc64__ */
-#define __get_user_size(x, ptr, size, retval) \
+#define __get_user_size_allowed(x, ptr, size, retval) \
do { \
retval = 0; \
__chk_user_ptr(ptr); \
@@ -249,6 +277,13 @@ do { \
} \
} while (0)
+#define __get_user_size(x, ptr, size, retval) \
+do { \
+ allow_read_from_user(ptr, size); \
+ __get_user_size_allowed(x, ptr, size, retval); \
+ prevent_read_from_user(ptr, size); \
+} while (0)
+
/*
* This is a type: either unsigned long, if the argument fits into
* that type, or otherwise unsigned long long.
@@ -256,17 +291,23 @@ do { \
#define __long_type(x) \
__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
-#define __get_user_nocheck(x, ptr, size) \
+#define __get_user_nocheck(x, ptr, size, do_allow) \
({ \
long __gu_err; \
__long_type(*(ptr)) __gu_val; \
__typeof__(*(ptr)) __user *__gu_addr = (ptr); \
- __chk_user_ptr(ptr); \
+ __typeof__(size) __gu_size = (size); \
+ \
+ __chk_user_ptr(__gu_addr); \
if (!is_kernel_addr((unsigned long)__gu_addr)) \
might_fault(); \
barrier_nospec(); \
- __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+ if (do_allow) \
+ __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
+ else \
+ __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
+ \
__gu_err; \
})
@@ -275,12 +316,15 @@ do { \
long __gu_err = -EFAULT; \
__long_type(*(ptr)) __gu_val = 0; \
__typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+ __typeof__(size) __gu_size = (size); \
+ \
might_fault(); \
- if (access_ok(VERIFY_READ, __gu_addr, (size))) { \
+ if (access_ok(VERIFY_READ, __gu_addr, __gu_size)) { \
barrier_nospec(); \
- __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+ __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
} \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
+ \
__gu_err; \
})
@@ -289,10 +333,13 @@ do { \
long __gu_err; \
__long_type(*(ptr)) __gu_val; \
__typeof__(*(ptr)) __user *__gu_addr = (ptr); \
- __chk_user_ptr(ptr); \
+ __typeof__(size) __gu_size = (size); \
+ \
+ __chk_user_ptr(__gu_addr); \
barrier_nospec(); \
- __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+ __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
+ \
__gu_err; \
})
@@ -306,16 +353,22 @@ extern unsigned long __copy_tofrom_user(void __user *to,
static inline unsigned long
raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
+ unsigned long ret;
+
barrier_nospec();
- return __copy_tofrom_user(to, from, n);
+ allow_user_access(to, from, n);
+ ret = __copy_tofrom_user(to, from, n);
+ prevent_user_access(to, from, n);
+ return ret;
}
#endif /* __powerpc64__ */
static inline unsigned long raw_copy_from_user(void *to,
const void __user *from, unsigned long n)
{
+ unsigned long ret;
if (__builtin_constant_p(n) && (n <= 8)) {
- unsigned long ret = 1;
+ ret = 1;
switch (n) {
case 1:
@@ -340,27 +393,30 @@ static inline unsigned long raw_copy_from_user(void *to,
}
barrier_nospec();
- return __copy_tofrom_user((__force void __user *)to, from, n);
+ allow_read_from_user(from, n);
+ ret = __copy_tofrom_user((__force void __user *)to, from, n);
+ prevent_read_from_user(from, n);
+ return ret;
}
-static inline unsigned long raw_copy_to_user(void __user *to,
- const void *from, unsigned long n)
+static inline unsigned long
+raw_copy_to_user_allowed(void __user *to, const void *from, unsigned long n)
{
if (__builtin_constant_p(n) && (n <= 8)) {
unsigned long ret = 1;
switch (n) {
case 1:
- __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret);
+ __put_user_size_allowed(*(u8 *)from, (u8 __user *)to, 1, ret);
break;
case 2:
- __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret);
+ __put_user_size_allowed(*(u16 *)from, (u16 __user *)to, 2, ret);
break;
case 4:
- __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret);
+ __put_user_size_allowed(*(u32 *)from, (u32 __user *)to, 4, ret);
break;
case 8:
- __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret);
+ __put_user_size_allowed(*(u64 *)from, (u64 __user *)to, 8, ret);
break;
}
if (ret == 0)
@@ -370,14 +426,34 @@ static inline unsigned long raw_copy_to_user(void __user *to,
return __copy_tofrom_user(to, (__force const void __user *)from, n);
}
-extern unsigned long __clear_user(void __user *addr, unsigned long size);
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ unsigned long ret;
+
+ allow_write_to_user(to, n);
+ ret = raw_copy_to_user_allowed(to, from, n);
+ prevent_write_to_user(to, n);
+ return ret;
+}
+
+unsigned long __arch_clear_user(void __user *addr, unsigned long size);
static inline unsigned long clear_user(void __user *addr, unsigned long size)
{
+ unsigned long ret = size;
might_fault();
- if (likely(access_ok(VERIFY_WRITE, addr, size)))
- return __clear_user(addr, size);
- return size;
+ if (likely(access_ok(VERIFY_WRITE, addr, size))) {
+ allow_write_to_user(addr, size);
+ ret = __arch_clear_user(addr, size);
+ prevent_write_to_user(addr, size);
+ }
+ return ret;
+}
+
+static inline unsigned long __clear_user(void __user *addr, unsigned long size)
+{
+ return clear_user(addr, size);
}
extern long strncpy_from_user(char *dst, const char __user *src, long count);
@@ -388,4 +464,13 @@ extern long __copy_from_user_flushcache(void *dst, const void __user *src,
extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
size_t len);
+#define user_access_begin(type, ptr, len) access_ok(type, ptr, len)
+#define user_access_end() prevent_user_access(NULL, NULL, ~0ul)
+
+#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
+#define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e)
+#define unsafe_put_user(x, p, e) unsafe_op_wrap(__put_user_allowed(x, p), e)
+#define unsafe_copy_to_user(d, s, l, e) \
+ unsafe_op_wrap(raw_copy_to_user_allowed(d, s, l), e)
+
#endif /* _ARCH_POWERPC_UACCESS_H */
diff --git a/arch/powerpc/include/uapi/asm/errno.h b/arch/powerpc/include/uapi/asm/errno.h
index cc79856896a1..4ba87de32be0 100644
--- a/arch/powerpc/include/uapi/asm/errno.h
+++ b/arch/powerpc/include/uapi/asm/errno.h
@@ -2,6 +2,7 @@
#ifndef _ASM_POWERPC_ERRNO_H
#define _ASM_POWERPC_ERRNO_H
+#undef EDEADLOCK
#include <asm-generic/errno.h>
#undef EDEADLOCK
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index d450280e5c29..1e64cfe22a83 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -5,9 +5,6 @@
CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
-# Avoid clang warnings around longjmp/setjmp declarations
-CFLAGS_crash.o += -ffreestanding
-
subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
ifdef CONFIG_PPC64
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 458b928dbd84..5bef78e2b4c1 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -183,7 +183,7 @@ __init_LPCR_ISA300:
__init_FSCR:
mfspr r3,SPRN_FSCR
- ori r3,r3,FSCR_TAR|FSCR_DSCR|FSCR_EBB
+ ori r3,r3,FSCR_TAR|FSCR_EBB
mtspr SPRN_FSCR,r3
blr
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index f9fe2080ceb9..eed3543aeca4 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -100,7 +100,8 @@ static u64 dma_iommu_get_required_mask(struct device *dev)
if (!tbl)
return 0;
- mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1);
+ mask = 1ULL << (fls_long(tbl->it_offset + tbl->it_size) +
+ tbl->it_page_shift - 1);
mask += mask - 1;
return mask;
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index a4b31e17492d..5195a4fc3e41 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -346,6 +346,14 @@ static int __init feat_enable_dscr(struct dt_cpu_feature *f)
{
u64 lpcr;
+ /*
+ * Linux relies on FSCR[DSCR] being clear, so that we can take the
+ * facility unavailable interrupt and track the task's usage of DSCR.
+ * See facility_unavailable_exception().
+ * Clear the bit here so that feat_enable() doesn't set it.
+ */
+ f->fscr_bit_nr = -1;
+
feat_enable(f);
lpcr = mfspr(SPRN_LPCR);
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index fe3c6f3bd3b6..44bb522fb4a2 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -364,14 +364,11 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
pa = pte_pfn(*ptep);
/* On radix we can do hugepage mappings for io, so handle that */
- if (hugepage_shift) {
- pa <<= hugepage_shift;
- pa |= token & ((1ul << hugepage_shift) - 1);
- } else {
- pa <<= PAGE_SHIFT;
- pa |= token & (PAGE_SIZE - 1);
- }
+ if (!hugepage_shift)
+ hugepage_shift = PAGE_SHIFT;
+ pa <<= PAGE_SHIFT;
+ pa |= token & ((1ul << hugepage_shift) - 1);
return pa;
}
@@ -502,7 +499,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
rc = 1;
if (pe->state & EEH_PE_ISOLATED) {
pe->check_count++;
- if (pe->check_count % EEH_MAX_FAILS == 0) {
+ if (pe->check_count == EEH_MAX_FAILS) {
dn = pci_device_to_OF_node(dev);
if (dn)
location = of_get_property(dn, "ibm,loc-code",
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index e1dab9b1e447..344e2758b22d 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -540,7 +540,7 @@ EXC_COMMON_BEGIN(unrecover_mce)
b 1b
-EXC_REAL(data_access, 0x300, 0x80)
+EXC_REAL_OOL(data_access, 0x300, 0x80)
EXC_VIRT(data_access, 0x4300, 0x80, 0x300)
TRAMP_KVM_SKIP(PACA_EXGEN, 0x300)
@@ -572,13 +572,16 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80)
SET_SCRATCH0(r13)
EXCEPTION_PROLOG_0(PACA_EXSLB)
+ b tramp_data_access_slb
+EXC_REAL_END(data_access_slb, 0x380, 0x80)
+
+TRAMP_REAL_BEGIN(tramp_data_access_slb)
EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380)
mr r12,r3 /* save r3 */
mfspr r3,SPRN_DAR
mfspr r11,SPRN_SRR1
crset 4*cr6+eq
BRANCH_TO_COMMON(r10, slb_miss_common)
-EXC_REAL_END(data_access_slb, 0x380, 0x80)
EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80)
SET_SCRATCH0(r13)
@@ -593,7 +596,7 @@ EXC_VIRT_END(data_access_slb, 0x4380, 0x80)
TRAMP_KVM_SKIP(PACA_EXSLB, 0x380)
-EXC_REAL(instruction_access, 0x400, 0x80)
+EXC_REAL_OOL(instruction_access, 0x400, 0x80)
EXC_VIRT(instruction_access, 0x4400, 0x80, 0x400)
TRAMP_KVM(PACA_EXGEN, 0x400)
@@ -616,13 +619,16 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80)
SET_SCRATCH0(r13)
EXCEPTION_PROLOG_0(PACA_EXSLB)
+ b tramp_instruction_access_slb
+EXC_REAL_END(instruction_access_slb, 0x480, 0x80)
+
+TRAMP_REAL_BEGIN(tramp_instruction_access_slb)
EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
mr r12,r3 /* save r3 */
mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
mfspr r11,SPRN_SRR1
crclr 4*cr6+eq
BRANCH_TO_COMMON(r10, slb_miss_common)
-EXC_REAL_END(instruction_access_slb, 0x480, 0x80)
EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80)
SET_SCRATCH0(r13)
@@ -883,13 +889,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x80, IRQS_DISABLED)
-EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x80, 0x900, IRQS_DISABLED)
+EXC_VIRT_OOL_MASKABLE(decrementer, 0x4900, 0x80, 0x900, IRQS_DISABLED)
TRAMP_KVM(PACA_EXGEN, 0x900)
EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt)
-EXC_REAL_HV(hdecrementer, 0x980, 0x80)
-EXC_VIRT_HV(hdecrementer, 0x4980, 0x80, 0x980)
+EXC_REAL_OOL_HV(hdecrementer, 0x980, 0x80)
+EXC_VIRT_OOL_HV(hdecrementer, 0x4980, 0x80, 0x980)
TRAMP_KVM_HV(PACA_EXGEN, 0x980)
EXC_COMMON(hdecrementer_common, 0x980, hdec_interrupt)
@@ -1523,15 +1529,8 @@ TRAMP_REAL_BEGIN(stf_barrier_fallback)
.endr
blr
-TRAMP_REAL_BEGIN(rfi_flush_fallback)
- SET_SCRATCH0(r13);
- GET_PACA(r13);
- std r1,PACA_EXRFI+EX_R12(r13)
- ld r1,PACAKSAVE(r13)
- std r9,PACA_EXRFI+EX_R9(r13)
- std r10,PACA_EXRFI+EX_R10(r13)
- std r11,PACA_EXRFI+EX_R11(r13)
- mfctr r9
+/* Clobbers r10, r11, ctr */
+.macro L1D_DISPLACEMENT_FLUSH
ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
ld r11,PACA_L1D_FLUSH_SIZE(r13)
srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
@@ -1542,7 +1541,7 @@ TRAMP_REAL_BEGIN(rfi_flush_fallback)
sync
/*
- * The load adresses are at staggered offsets within cachelines,
+ * The load addresses are at staggered offsets within cachelines,
* which suits some pipelines better (on others it should not
* hurt).
*/
@@ -1557,7 +1556,30 @@ TRAMP_REAL_BEGIN(rfi_flush_fallback)
ld r11,(0x80 + 8)*7(r10)
addi r10,r10,0x80*8
bdnz 1b
+.endm
+
+TRAMP_REAL_BEGIN(entry_flush_fallback)
+ std r9,PACA_EXRFI+EX_R9(r13)
+ std r10,PACA_EXRFI+EX_R10(r13)
+ std r11,PACA_EXRFI+EX_R11(r13)
+ mfctr r9
+ L1D_DISPLACEMENT_FLUSH
+ mtctr r9
+ ld r9,PACA_EXRFI+EX_R9(r13)
+ ld r10,PACA_EXRFI+EX_R10(r13)
+ ld r11,PACA_EXRFI+EX_R11(r13)
+ blr
+TRAMP_REAL_BEGIN(rfi_flush_fallback)
+ SET_SCRATCH0(r13);
+ GET_PACA(r13);
+ std r1,PACA_EXRFI+EX_R12(r13)
+ ld r1,PACAKSAVE(r13)
+ std r9,PACA_EXRFI+EX_R9(r13)
+ std r10,PACA_EXRFI+EX_R10(r13)
+ std r11,PACA_EXRFI+EX_R11(r13)
+ mfctr r9
+ L1D_DISPLACEMENT_FLUSH
mtctr r9
ld r9,PACA_EXRFI+EX_R9(r13)
ld r10,PACA_EXRFI+EX_R10(r13)
@@ -1575,32 +1597,7 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback)
std r10,PACA_EXRFI+EX_R10(r13)
std r11,PACA_EXRFI+EX_R11(r13)
mfctr r9
- ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
- ld r11,PACA_L1D_FLUSH_SIZE(r13)
- srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
- mtctr r11
- DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
-
- /* order ld/st prior to dcbt stop all streams with flushing */
- sync
-
- /*
- * The load adresses are at staggered offsets within cachelines,
- * which suits some pipelines better (on others it should not
- * hurt).
- */
-1:
- ld r11,(0x80 + 8)*0(r10)
- ld r11,(0x80 + 8)*1(r10)
- ld r11,(0x80 + 8)*2(r10)
- ld r11,(0x80 + 8)*3(r10)
- ld r11,(0x80 + 8)*4(r10)
- ld r11,(0x80 + 8)*5(r10)
- ld r11,(0x80 + 8)*6(r10)
- ld r11,(0x80 + 8)*7(r10)
- addi r10,r10,0x80*8
- bdnz 1b
-
+ L1D_DISPLACEMENT_FLUSH
mtctr r9
ld r9,PACA_EXRFI+EX_R9(r13)
ld r10,PACA_EXRFI+EX_R10(r13)
@@ -1609,6 +1606,19 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback)
GET_SCRATCH0(r13);
hrfid
+USE_TEXT_SECTION()
+
+_GLOBAL(do_uaccess_flush)
+ UACCESS_FLUSH_FIXUP_SECTION
+ nop
+ nop
+ nop
+ blr
+ L1D_DISPLACEMENT_FLUSH
+ blr
+_ASM_NOKPROBE_SYMBOL(do_uaccess_flush)
+EXPORT_SYMBOL(do_uaccess_flush)
+
/*
* Real mode exceptions actually use this too, but alternate
* instruction code patches (which end up in the common .text area)
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 3fb564f3e887..4f7b225d78cf 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -423,6 +423,10 @@ generic_secondary_common_init:
/* From now on, r24 is expected to be logical cpuid */
mr r24,r5
+ /* Create a temp kernel stack for use before relocation is on. */
+ ld r1,PACAEMERGSP(r13)
+ subi r1,r1,STACK_FRAME_OVERHEAD
+
/* See if we need to call a cpu state restore handler */
LOAD_REG_ADDR(r23, cur_cpu_spec)
ld r23,0(r23)
@@ -451,10 +455,6 @@ generic_secondary_common_init:
sync /* order paca.run and cur_cpu_spec */
isync /* In case code patching happened */
- /* Create a temp kernel stack for use before relocation is on. */
- ld r1,PACAEMERGSP(r13)
- subi r1,r1,STACK_FRAME_OVERHEAD
-
b __secondary_start
#endif /* SMP */
@@ -950,15 +950,8 @@ start_here_multiplatform:
std r0,0(r4)
#endif
- /* The following gets the stack set up with the regs */
- /* pointing to the real addr of the kernel stack. This is */
- /* all done to support the C function call below which sets */
- /* up the htab. This is done because we have relocated the */
- /* kernel but are still running in real mode. */
-
- LOAD_REG_ADDR(r3,init_thread_union)
-
/* set up a stack pointer */
+ LOAD_REG_ADDR(r3,init_thread_union)
LOAD_REG_IMMEDIATE(r1,THREAD_SIZE)
add r1,r3,r1
li r0,0
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 9fd2ff28b8ff..08c16aa0dd53 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -269,7 +269,7 @@ SystemCall:
/* On the MPC8xx, this is a software emulation interrupt. It occurs
* for all unimplemented and illegal instructions.
*/
- EXCEPTION(0x1000, SoftEmu, program_check_exception, EXC_XFER_STD)
+ EXCEPTION(0x1000, SoftEmu, emulation_assist_interrupt, EXC_XFER_STD)
. = 0x1100
/*
@@ -356,11 +356,9 @@ _ENTRY(ITLBMiss_cmp)
/* Load the MI_TWC with the attributes for this "segment." */
mtspr SPRN_MI_TWC, r11 /* Set segment attributes */
-#ifdef CONFIG_SWAP
- rlwinm r11, r10, 32-5, _PAGE_PRESENT
+ rlwinm r11, r10, 32-7, _PAGE_PRESENT
and r11, r11, r10
rlwimi r10, r11, 0, _PAGE_PRESENT
-#endif
li r11, RPN_PATTERN | 0x200
/* The Linux PTE won't go exactly into the MMU TLB.
* Software indicator bits 20 and 23 must be clear.
@@ -482,11 +480,9 @@ _ENTRY(DTLBMiss_jmp)
* r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5));
* r10 = (r10 & ~PRESENT) | r11;
*/
-#ifdef CONFIG_SWAP
- rlwinm r11, r10, 32-5, _PAGE_PRESENT
+ rlwinm r11, r10, 32-7, _PAGE_PRESENT
and r11, r11, r10
rlwimi r10, r11, 0, _PAGE_PRESENT
-#endif
/* The Linux PTE won't go exactly into the MMU TLB.
* Software indicator bits 24, 25, 26, and 27 must be
* set. All other Linux PTE bits control the behavior
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 36178000a2f2..4a860d3b9229 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -170,8 +170,11 @@ core_idle_lock_held:
bne- core_idle_lock_held
blr
-/* Reuse an unused pt_regs slot for IAMR */
+/* Reuse some unused pt_regs slots for AMR/IAMR/UAMOR/UAMOR */
+#define PNV_POWERSAVE_AMR _TRAP
#define PNV_POWERSAVE_IAMR _DAR
+#define PNV_POWERSAVE_UAMOR _DSISR
+#define PNV_POWERSAVE_AMOR RESULT
/*
* Pass requested state in r3:
@@ -205,8 +208,16 @@ pnv_powersave_common:
SAVE_NVGPRS(r1)
BEGIN_FTR_SECTION
+ mfspr r4, SPRN_AMR
mfspr r5, SPRN_IAMR
+ mfspr r6, SPRN_UAMOR
+ std r4, PNV_POWERSAVE_AMR(r1)
std r5, PNV_POWERSAVE_IAMR(r1)
+ std r6, PNV_POWERSAVE_UAMOR(r1)
+BEGIN_FTR_SECTION_NESTED(42)
+ mfspr r7, SPRN_AMOR
+ std r7, PNV_POWERSAVE_AMOR(r1)
+END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mfcr r5
@@ -935,12 +946,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
REST_GPR(2, r1)
BEGIN_FTR_SECTION
- /* IAMR was saved in pnv_powersave_common() */
+ /* These regs were saved in pnv_powersave_common() */
+ ld r4, PNV_POWERSAVE_AMR(r1)
ld r5, PNV_POWERSAVE_IAMR(r1)
+ ld r6, PNV_POWERSAVE_UAMOR(r1)
+ mtspr SPRN_AMR, r4
mtspr SPRN_IAMR, r5
+ mtspr SPRN_UAMOR, r6
+BEGIN_FTR_SECTION_NESTED(42)
+ ld r7, PNV_POWERSAVE_AMOR(r1)
+ mtspr SPRN_AMOR, r7
+END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42)
/*
- * We don't need an isync here because the upcoming mtmsrd is
- * execution synchronizing.
+ * We don't need an isync here after restoring IAMR because the upcoming
+ * mtmsrd is execution synchronizing.
*/
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index f0dc680e659a..c3d2d5cd7c10 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -1030,7 +1030,7 @@ int iommu_take_ownership(struct iommu_table *tbl)
spin_lock_irqsave(&tbl->large_pool.lock, flags);
for (i = 0; i < tbl->nr_pools; i++)
- spin_lock(&tbl->pools[i].lock);
+ spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
if (tbl->it_offset == 0)
clear_bit(0, tbl->it_map);
@@ -1059,7 +1059,7 @@ void iommu_release_ownership(struct iommu_table *tbl)
spin_lock_irqsave(&tbl->large_pool.lock, flags);
for (i = 0; i < tbl->nr_pools; i++)
- spin_lock(&tbl->pools[i].lock);
+ spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
memset(tbl->it_map, 0, sz);
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 5c60bb0f927f..53a39661eb13 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -277,6 +277,9 @@ int kprobe_handler(struct pt_regs *regs)
if (user_mode(regs))
return 0;
+ if (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR))
+ return 0;
+
/*
* We don't want to be preempted for the entire
* duration of kprobe processing
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 63f5a9311a29..094c37fb07a9 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -116,11 +116,12 @@ void machine_kexec(struct kimage *image)
void __init reserve_crashkernel(void)
{
- unsigned long long crash_size, crash_base;
+ unsigned long long crash_size, crash_base, total_mem_sz;
int ret;
+ total_mem_sz = memory_limit ? memory_limit : memblock_phys_mem_size();
/* use common parsing */
- ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
+ ret = parse_crashkernel(boot_command_line, total_mem_sz,
&crash_size, &crash_base);
if (ret == 0 && crash_size > 0) {
crashk_res.start = crash_base;
@@ -179,6 +180,7 @@ void __init reserve_crashkernel(void)
/* Crash kernel trumps memory limit */
if (memory_limit && memory_limit <= crashk_res.end) {
memory_limit = crashk_res.end + 1;
+ total_mem_sz = memory_limit;
printk("Adjusted memory limit for crashkernel, now 0x%llx\n",
memory_limit);
}
@@ -187,7 +189,7 @@ void __init reserve_crashkernel(void)
"for crashkernel (System RAM: %ldMB)\n",
(unsigned long)(crash_size >> 20),
(unsigned long)(crashk_res.start >> 20),
- (unsigned long)(memblock_phys_mem_size() >> 20));
+ (unsigned long)(total_mem_sz >> 20));
if (!memblock_is_region_memory(crashk_res.start, crash_size) ||
memblock_reserve(crashk_res.start, crash_size)) {
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 88e4f69a09e5..74628aca2bf1 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1671,3 +1671,13 @@ static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
+
+
+static int __init discover_phbs(void)
+{
+ if (ppc_md.discover_phbs)
+ ppc_md.discover_phbs();
+
+ return 0;
+}
+core_initcall(discover_phbs);
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index c101b321dece..7765837ba203 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -82,10 +82,16 @@ static void of_pci_parse_addrs(struct device_node *node, struct pci_dev *dev)
const __be32 *addrs;
u32 i;
int proplen;
+ bool mark_unset = false;
addrs = of_get_property(node, "assigned-addresses", &proplen);
- if (!addrs)
- return;
+ if (!addrs || !proplen) {
+ addrs = of_get_property(node, "reg", &proplen);
+ if (!addrs || !proplen)
+ return;
+ mark_unset = true;
+ }
+
pr_debug(" parse addresses (%d bytes) @ %p\n", proplen, addrs);
for (; proplen >= 20; proplen -= 20, addrs += 5) {
flags = pci_parse_of_flags(of_read_number(addrs, 1), 0);
@@ -110,6 +116,8 @@ static void of_pci_parse_addrs(struct device_node *node, struct pci_dev *dev)
continue;
}
res->flags = flags;
+ if (mark_unset)
+ res->flags |= IORESOURCE_UNSET;
res->name = pci_name(dev);
region.start = base;
region.end = base + size - 1;
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 8e88f78e57db..f8c49e5d4bd3 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -266,7 +266,7 @@ static struct feature_property {
};
#if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
-static inline void identical_pvr_fixup(unsigned long node)
+static __init void identical_pvr_fixup(unsigned long node)
{
unsigned int pvr;
const char *model = of_get_flat_dt_prop(node, "model", NULL);
@@ -685,6 +685,23 @@ static void __init tm_init(void)
static void tm_init(void) { }
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+#ifdef CONFIG_PPC64
+static void __init save_fscr_to_task(void)
+{
+ /*
+ * Ensure the init_task (pid 0, aka swapper) uses the value of FSCR we
+ * have configured via the device tree features or via __init_FSCR().
+ * That value will then be propagated to pid 1 (init) and all future
+ * processes.
+ */
+ if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
+ init_task.thread.fscr = mfspr(SPRN_FSCR);
+}
+#else
+static inline void save_fscr_to_task(void) {};
+#endif
+
+
void __init early_init_devtree(void *params)
{
phys_addr_t limit;
@@ -770,6 +787,8 @@ void __init early_init_devtree(void *params)
BUG();
}
+ save_fscr_to_task();
+
#if defined(CONFIG_SMP) && defined(CONFIG_PPC64)
/* We'll later wait for secondaries to check in; there are
* NCPUS-1 non-boot CPUs :-)
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 95d1264ba795..b3aa0cea6283 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -1057,6 +1057,147 @@ struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
return NULL;
}
+#ifdef CONFIG_PPC_RTAS_FILTER
+
+/*
+ * The sys_rtas syscall, as originally designed, allows root to pass
+ * arbitrary physical addresses to RTAS calls. A number of RTAS calls
+ * can be abused to write to arbitrary memory and do other things that
+ * are potentially harmful to system integrity, and thus should only
+ * be used inside the kernel and not exposed to userspace.
+ *
+ * All known legitimate users of the sys_rtas syscall will only ever
+ * pass addresses that fall within the RMO buffer, and use a known
+ * subset of RTAS calls.
+ *
+ * Accordingly, we filter RTAS requests to check that the call is
+ * permitted, and that provided pointers fall within the RMO buffer.
+ * The rtas_filters list contains an entry for each permitted call,
+ * with the indexes of the parameters which are expected to contain
+ * addresses and sizes of buffers allocated inside the RMO buffer.
+ */
+struct rtas_filter {
+ const char *name;
+ int token;
+ /* Indexes into the args buffer, -1 if not used */
+ int buf_idx1;
+ int size_idx1;
+ int buf_idx2;
+ int size_idx2;
+
+ int fixed_size;
+};
+
+static struct rtas_filter rtas_filters[] __ro_after_init = {
+ { "ibm,activate-firmware", -1, -1, -1, -1, -1 },
+ { "ibm,configure-connector", -1, 0, -1, 1, -1, 4096 }, /* Special cased */
+ { "display-character", -1, -1, -1, -1, -1 },
+ { "ibm,display-message", -1, 0, -1, -1, -1 },
+ { "ibm,errinjct", -1, 2, -1, -1, -1, 1024 },
+ { "ibm,close-errinjct", -1, -1, -1, -1, -1 },
+ { "ibm,open-errinjct", -1, -1, -1, -1, -1 },
+ { "ibm,get-config-addr-info2", -1, -1, -1, -1, -1 },
+ { "ibm,get-dynamic-sensor-state", -1, 1, -1, -1, -1 },
+ { "ibm,get-indices", -1, 2, 3, -1, -1 },
+ { "get-power-level", -1, -1, -1, -1, -1 },
+ { "get-sensor-state", -1, -1, -1, -1, -1 },
+ { "ibm,get-system-parameter", -1, 1, 2, -1, -1 },
+ { "get-time-of-day", -1, -1, -1, -1, -1 },
+ { "ibm,get-vpd", -1, 0, -1, 1, 2 },
+ { "ibm,lpar-perftools", -1, 2, 3, -1, -1 },
+ { "ibm,platform-dump", -1, 4, 5, -1, -1 },
+ { "ibm,read-slot-reset-state", -1, -1, -1, -1, -1 },
+ { "ibm,scan-log-dump", -1, 0, 1, -1, -1 },
+ { "ibm,set-dynamic-indicator", -1, 2, -1, -1, -1 },
+ { "ibm,set-eeh-option", -1, -1, -1, -1, -1 },
+ { "set-indicator", -1, -1, -1, -1, -1 },
+ { "set-power-level", -1, -1, -1, -1, -1 },
+ { "set-time-for-power-on", -1, -1, -1, -1, -1 },
+ { "ibm,set-system-parameter", -1, 1, -1, -1, -1 },
+ { "set-time-of-day", -1, -1, -1, -1, -1 },
+ { "ibm,suspend-me", -1, -1, -1, -1, -1 },
+ { "ibm,update-nodes", -1, 0, -1, -1, -1, 4096 },
+ { "ibm,update-properties", -1, 0, -1, -1, -1, 4096 },
+ { "ibm,physical-attestation", -1, 0, 1, -1, -1 },
+};
+
+static bool in_rmo_buf(u32 base, u32 end)
+{
+ return base >= rtas_rmo_buf &&
+ base < (rtas_rmo_buf + RTAS_RMOBUF_MAX) &&
+ base <= end &&
+ end >= rtas_rmo_buf &&
+ end < (rtas_rmo_buf + RTAS_RMOBUF_MAX);
+}
+
+static bool block_rtas_call(int token, int nargs,
+ struct rtas_args *args)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rtas_filters); i++) {
+ struct rtas_filter *f = &rtas_filters[i];
+ u32 base, size, end;
+
+ if (token != f->token)
+ continue;
+
+ if (f->buf_idx1 != -1) {
+ base = be32_to_cpu(args->args[f->buf_idx1]);
+ if (f->size_idx1 != -1)
+ size = be32_to_cpu(args->args[f->size_idx1]);
+ else if (f->fixed_size)
+ size = f->fixed_size;
+ else
+ size = 1;
+
+ end = base + size - 1;
+ if (!in_rmo_buf(base, end))
+ goto err;
+ }
+
+ if (f->buf_idx2 != -1) {
+ base = be32_to_cpu(args->args[f->buf_idx2]);
+ if (f->size_idx2 != -1)
+ size = be32_to_cpu(args->args[f->size_idx2]);
+ else if (f->fixed_size)
+ size = f->fixed_size;
+ else
+ size = 1;
+ end = base + size - 1;
+
+ /*
+ * Special case for ibm,configure-connector where the
+ * address can be 0
+ */
+ if (!strcmp(f->name, "ibm,configure-connector") &&
+ base == 0)
+ return false;
+
+ if (!in_rmo_buf(base, end))
+ goto err;
+ }
+
+ return false;
+ }
+
+err:
+ pr_err_ratelimited("sys_rtas: RTAS call blocked - exploit attempt?\n");
+ pr_err_ratelimited("sys_rtas: token=0x%x, nargs=%d (called by %s)\n",
+ token, nargs, current->comm);
+ return true;
+}
+
+#else
+
+static bool block_rtas_call(int token, int nargs,
+ struct rtas_args *args)
+{
+ return false;
+}
+
+#endif /* CONFIG_PPC_RTAS_FILTER */
+
/* We assume to be passed big endian arguments */
SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
{
@@ -1094,6 +1235,9 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
args.rets = &args.args[nargs];
memset(args.rets, 0, nret * sizeof(rtas_arg_t));
+ if (block_rtas_call(token, nargs, &args))
+ return -EINVAL;
+
/* Need to handle ibm,suspend_me call specially */
if (token == ibm_suspend_me_token) {
@@ -1155,6 +1299,9 @@ void __init rtas_initialize(void)
unsigned long rtas_region = RTAS_INSTANTIATE_MAX;
u32 base, size, entry;
int no_base, no_size, no_entry;
+#ifdef CONFIG_PPC_RTAS_FILTER
+ int i;
+#endif
/* Get RTAS dev node and fill up our "rtas" structure with infos
* about it.
@@ -1190,6 +1337,12 @@ void __init rtas_initialize(void)
#ifdef CONFIG_RTAS_ERROR_LOGGING
rtas_last_error_token = rtas_token("rtas-last-error");
#endif
+
+#ifdef CONFIG_PPC_RTAS_FILTER
+ for (i = 0; i < ARRAY_SIZE(rtas_filters); i++) {
+ rtas_filters[i].token = rtas_token(rtas_filters[i].name);
+ }
+#endif
}
int __init early_init_dt_scan_rtas(unsigned long node,
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 508244bcf19c..7787a26d4777 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -929,8 +929,6 @@ void __init setup_arch(char **cmdline_p)
/* On BookE, setup per-core TLB data structures. */
setup_tlb_core_data();
-
- smp_release_cpus();
#endif
/* Print various info about the machine that has been gathered so far. */
@@ -964,6 +962,8 @@ void __init setup_arch(char **cmdline_p)
exc_lvl_early_init();
emergency_stack_init();
+ smp_release_cpus();
+
initmem_init();
#ifdef CONFIG_DUMMY_CONSOLE
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index eaf7300be5ab..122365624d3d 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -518,6 +518,8 @@ static bool __init parse_cache_info(struct device_node *np,
lsizep = of_get_property(np, propnames[3], NULL);
if (bsizep == NULL)
bsizep = lsizep;
+ if (lsizep == NULL)
+ lsizep = bsizep;
if (lsizep != NULL)
lsize = be32_to_cpu(*lsizep);
if (bsizep != NULL)
@@ -861,7 +863,13 @@ early_initcall(disable_hardlockup_detector);
static enum l1d_flush_type enabled_flush_types;
static void *l1d_flush_fallback_area;
static bool no_rfi_flush;
+static bool no_entry_flush;
+static bool no_uaccess_flush;
bool rfi_flush;
+bool entry_flush;
+bool uaccess_flush;
+DEFINE_STATIC_KEY_FALSE(uaccess_flush_key);
+EXPORT_SYMBOL(uaccess_flush_key);
static int __init handle_no_rfi_flush(char *p)
{
@@ -871,6 +879,22 @@ static int __init handle_no_rfi_flush(char *p)
}
early_param("no_rfi_flush", handle_no_rfi_flush);
+static int __init handle_no_entry_flush(char *p)
+{
+ pr_info("entry-flush: disabled on command line.");
+ no_entry_flush = true;
+ return 0;
+}
+early_param("no_entry_flush", handle_no_entry_flush);
+
+static int __init handle_no_uaccess_flush(char *p)
+{
+ pr_info("uaccess-flush: disabled on command line.");
+ no_uaccess_flush = true;
+ return 0;
+}
+early_param("no_uaccess_flush", handle_no_uaccess_flush);
+
/*
* The RFI flush is not KPTI, but because users will see doco that says to use
* nopti we hijack that option here to also disable the RFI flush.
@@ -902,6 +926,32 @@ void rfi_flush_enable(bool enable)
rfi_flush = enable;
}
+void entry_flush_enable(bool enable)
+{
+ if (enable) {
+ do_entry_flush_fixups(enabled_flush_types);
+ on_each_cpu(do_nothing, NULL, 1);
+ } else {
+ do_entry_flush_fixups(L1D_FLUSH_NONE);
+ }
+
+ entry_flush = enable;
+}
+
+void uaccess_flush_enable(bool enable)
+{
+ if (enable) {
+ do_uaccess_flush_fixups(enabled_flush_types);
+ static_branch_enable(&uaccess_flush_key);
+ on_each_cpu(do_nothing, NULL, 1);
+ } else {
+ static_branch_disable(&uaccess_flush_key);
+ do_uaccess_flush_fixups(L1D_FLUSH_NONE);
+ }
+
+ uaccess_flush = enable;
+}
+
static void __ref init_fallback_flush(void)
{
u64 l1d_size, limit;
@@ -955,10 +1005,28 @@ void setup_rfi_flush(enum l1d_flush_type types, bool enable)
enabled_flush_types = types;
- if (!no_rfi_flush && !cpu_mitigations_off())
+ if (!cpu_mitigations_off() && !no_rfi_flush)
rfi_flush_enable(enable);
}
+void setup_entry_flush(bool enable)
+{
+ if (cpu_mitigations_off())
+ return;
+
+ if (!no_entry_flush)
+ entry_flush_enable(enable);
+}
+
+void setup_uaccess_flush(bool enable)
+{
+ if (cpu_mitigations_off())
+ return;
+
+ if (!no_uaccess_flush)
+ uaccess_flush_enable(enable);
+}
+
#ifdef CONFIG_DEBUG_FS
static int rfi_flush_set(void *data, u64 val)
{
@@ -986,9 +1054,63 @@ static int rfi_flush_get(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
+static int entry_flush_set(void *data, u64 val)
+{
+ bool enable;
+
+ if (val == 1)
+ enable = true;
+ else if (val == 0)
+ enable = false;
+ else
+ return -EINVAL;
+
+ /* Only do anything if we're changing state */
+ if (enable != entry_flush)
+ entry_flush_enable(enable);
+
+ return 0;
+}
+
+static int entry_flush_get(void *data, u64 *val)
+{
+ *val = entry_flush ? 1 : 0;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n");
+
+static int uaccess_flush_set(void *data, u64 val)
+{
+ bool enable;
+
+ if (val == 1)
+ enable = true;
+ else if (val == 0)
+ enable = false;
+ else
+ return -EINVAL;
+
+ /* Only do anything if we're changing state */
+ if (enable != uaccess_flush)
+ uaccess_flush_enable(enable);
+
+ return 0;
+}
+
+static int uaccess_flush_get(void *data, u64 *val)
+{
+ *val = uaccess_flush ? 1 : 0;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n");
+
static __init int rfi_flush_debugfs_init(void)
{
debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
+ debugfs_create_file("entry_flush", 0600, powerpc_debugfs_root, NULL, &fops_entry_flush);
+ debugfs_create_file("uaccess_flush", 0600, powerpc_debugfs_root, NULL, &fops_uaccess_flush);
return 0;
}
device_initcall(rfi_flush_debugfs_init);
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index b088b0700d0d..7aba592bba36 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -477,8 +477,10 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
err |= __get_user(tsk->thread.ckpt_regs.ccr,
&sc->gp_regs[PT_CCR]);
+ /* Don't allow userspace to set the trap value */
+ regs->trap = 0;
+
/* These regs are not checkpointed; they can go in 'regs'. */
- err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]);
err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 6dc43205382b..a9ec4467705c 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -1032,6 +1032,9 @@ void start_secondary(void *unused)
vdso_getcpu_init();
#endif
+ set_numa_node(numa_cpu_lookup_table[cpu]);
+ set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
+
/* Update topology CPU masks */
add_cpu_to_masks(cpu);
@@ -1042,9 +1045,6 @@ void start_secondary(void *unused)
if (!cpumask_equal(cpu_l2_cache_mask(cpu), cpu_sibling_mask(cpu)))
shared_caches = true;
- set_numa_node(numa_cpu_lookup_table[cpu]);
- set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
-
smp_wmb();
notify_cpu_starting(cpu);
set_cpu_online(cpu, true);
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 755dc98a57ae..6b107de10ffa 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -29,29 +29,27 @@
static DEFINE_PER_CPU(struct cpu, cpu_devices);
-/*
- * SMT snooze delay stuff, 64-bit only for now
- */
-
#ifdef CONFIG_PPC64
-/* Time in microseconds we delay before sleeping in the idle loop */
-static DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 };
+/*
+ * Snooze delay has not been hooked up since 3fa8cad82b94 ("powerpc/pseries/cpuidle:
+ * smt-snooze-delay cleanup.") and has been broken even longer. As was foretold in
+ * 2014:
+ *
+ * "ppc64_util currently utilises it. Once we fix ppc64_util, propose to clean
+ * up the kernel code."
+ *
+ * powerpc-utils stopped using it as of 1.3.8. At some point in the future this
+ * code should be removed.
+ */
static ssize_t store_smt_snooze_delay(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
- struct cpu *cpu = container_of(dev, struct cpu, dev);
- ssize_t ret;
- long snooze;
-
- ret = sscanf(buf, "%ld", &snooze);
- if (ret != 1)
- return -EINVAL;
-
- per_cpu(smt_snooze_delay, cpu->dev.id) = snooze;
+ pr_warn_once("%s (%d) stored to unsupported smt_snooze_delay, which has no effect.\n",
+ current->comm, current->pid);
return count;
}
@@ -59,9 +57,9 @@ static ssize_t show_smt_snooze_delay(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct cpu *cpu = container_of(dev, struct cpu, dev);
-
- return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->dev.id));
+ pr_warn_once("%s (%d) read from unsupported smt_snooze_delay\n",
+ current->comm, current->pid);
+ return sprintf(buf, "100\n");
}
static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
@@ -69,16 +67,10 @@ static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
static int __init setup_smt_snooze_delay(char *str)
{
- unsigned int cpu;
- long snooze;
-
if (!cpu_has_feature(CPU_FTR_SMT))
return 1;
- snooze = simple_strtol(str, NULL, 10);
- for_each_possible_cpu(cpu)
- per_cpu(smt_snooze_delay, cpu) = snooze;
-
+ pr_warn("smt-snooze-delay command line option has no effect\n");
return 1;
}
__setup("smt-snooze-delay=", setup_smt_snooze_delay);
diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c
index e2ab8a111b69..8ece45c2a1f6 100644
--- a/arch/powerpc/kernel/tau_6xx.c
+++ b/arch/powerpc/kernel/tau_6xx.c
@@ -13,13 +13,14 @@
*/
#include <linux/errno.h>
-#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
#include <asm/io.h>
#include <asm/reg.h>
@@ -39,9 +40,7 @@ static struct tau_temp
unsigned char grew;
} tau[NR_CPUS];
-struct timer_list tau_timer;
-
-#undef DEBUG
+static bool tau_int_enable;
/* TODO: put these in a /proc interface, with some sanity checks, and maybe
* dynamic adjustment to minimize # of interrupts */
@@ -50,72 +49,49 @@ struct timer_list tau_timer;
#define step_size 2 /* step size when temp goes out of range */
#define window_expand 1 /* expand the window by this much */
/* configurable values for shrinking the window */
-#define shrink_timer 2*HZ /* period between shrinking the window */
+#define shrink_timer 2000 /* period between shrinking the window */
#define min_window 2 /* minimum window size, degrees C */
static void set_thresholds(unsigned long cpu)
{
-#ifdef CONFIG_TAU_INT
- /*
- * setup THRM1,
- * threshold, valid bit, enable interrupts, interrupt when below threshold
- */
- mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TIE | THRM1_TID);
+ u32 maybe_tie = tau_int_enable ? THRM1_TIE : 0;
- /* setup THRM2,
- * threshold, valid bit, enable interrupts, interrupt when above threshold
- */
- mtspr (SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | THRM1_TIE);
-#else
- /* same thing but don't enable interrupts */
- mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TID);
- mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V);
-#endif
+ /* setup THRM1, threshold, valid bit, interrupt when below threshold */
+ mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | maybe_tie | THRM1_TID);
+
+ /* setup THRM2, threshold, valid bit, interrupt when above threshold */
+ mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | maybe_tie);
}
static void TAUupdate(int cpu)
{
- unsigned thrm;
-
-#ifdef DEBUG
- printk("TAUupdate ");
-#endif
+ u32 thrm;
+ u32 bits = THRM1_TIV | THRM1_TIN | THRM1_V;
/* if both thresholds are crossed, the step_sizes cancel out
* and the window winds up getting expanded twice. */
- if((thrm = mfspr(SPRN_THRM1)) & THRM1_TIV){ /* is valid? */
- if(thrm & THRM1_TIN){ /* crossed low threshold */
- if (tau[cpu].low >= step_size){
- tau[cpu].low -= step_size;
- tau[cpu].high -= (step_size - window_expand);
- }
- tau[cpu].grew = 1;
-#ifdef DEBUG
- printk("low threshold crossed ");
-#endif
+ thrm = mfspr(SPRN_THRM1);
+ if ((thrm & bits) == bits) {
+ mtspr(SPRN_THRM1, 0);
+
+ if (tau[cpu].low >= step_size) {
+ tau[cpu].low -= step_size;
+ tau[cpu].high -= (step_size - window_expand);
}
+ tau[cpu].grew = 1;
+ pr_debug("%s: low threshold crossed\n", __func__);
}
- if((thrm = mfspr(SPRN_THRM2)) & THRM1_TIV){ /* is valid? */
- if(thrm & THRM1_TIN){ /* crossed high threshold */
- if (tau[cpu].high <= 127-step_size){
- tau[cpu].low += (step_size - window_expand);
- tau[cpu].high += step_size;
- }
- tau[cpu].grew = 1;
-#ifdef DEBUG
- printk("high threshold crossed ");
-#endif
+ thrm = mfspr(SPRN_THRM2);
+ if ((thrm & bits) == bits) {
+ mtspr(SPRN_THRM2, 0);
+
+ if (tau[cpu].high <= 127 - step_size) {
+ tau[cpu].low += (step_size - window_expand);
+ tau[cpu].high += step_size;
}
+ tau[cpu].grew = 1;
+ pr_debug("%s: high threshold crossed\n", __func__);
}
-
-#ifdef DEBUG
- printk("grew = %d\n", tau[cpu].grew);
-#endif
-
-#ifndef CONFIG_TAU_INT /* tau_timeout will do this if not using interrupts */
- set_thresholds(cpu);
-#endif
-
}
#ifdef CONFIG_TAU_INT
@@ -140,17 +116,16 @@ void TAUException(struct pt_regs * regs)
static void tau_timeout(void * info)
{
int cpu;
- unsigned long flags;
int size;
int shrink;
- /* disabling interrupts *should* be okay */
- local_irq_save(flags);
cpu = smp_processor_id();
-#ifndef CONFIG_TAU_INT
- TAUupdate(cpu);
-#endif
+ if (!tau_int_enable)
+ TAUupdate(cpu);
+
+ /* Stop thermal sensor comparisons and interrupts */
+ mtspr(SPRN_THRM3, 0);
size = tau[cpu].high - tau[cpu].low;
if (size > min_window && ! tau[cpu].grew) {
@@ -173,32 +148,26 @@ static void tau_timeout(void * info)
set_thresholds(cpu);
- /*
- * Do the enable every time, since otherwise a bunch of (relatively)
- * complex sleep code needs to be added. One mtspr every time
- * tau_timeout is called is probably not a big deal.
- *
- * Enable thermal sensor and set up sample interval timer
- * need 20 us to do the compare.. until a nice 'cpu_speed' function
- * call is implemented, just assume a 500 mhz clock. It doesn't really
- * matter if we take too long for a compare since it's all interrupt
- * driven anyway.
- *
- * use a extra long time.. (60 us @ 500 mhz)
+ /* Restart thermal sensor comparisons and interrupts.
+ * The "PowerPC 740 and PowerPC 750 Microprocessor Datasheet"
+ * recommends that "the maximum value be set in THRM3 under all
+ * conditions."
*/
- mtspr(SPRN_THRM3, THRM3_SITV(500*60) | THRM3_E);
-
- local_irq_restore(flags);
+ mtspr(SPRN_THRM3, THRM3_SITV(0x1fff) | THRM3_E);
}
-static void tau_timeout_smp(struct timer_list *unused)
-{
+static struct workqueue_struct *tau_workq;
- /* schedule ourselves to be run again */
- mod_timer(&tau_timer, jiffies + shrink_timer) ;
+static void tau_work_func(struct work_struct *work)
+{
+ msleep(shrink_timer);
on_each_cpu(tau_timeout, NULL, 0);
+ /* schedule ourselves to be run again */
+ queue_work(tau_workq, work);
}
+DECLARE_WORK(tau_work, tau_work_func);
+
/*
* setup the TAU
*
@@ -231,21 +200,19 @@ static int __init TAU_init(void)
return 1;
}
+ tau_int_enable = IS_ENABLED(CONFIG_TAU_INT) &&
+ !strcmp(cur_cpu_spec->platform, "ppc750");
- /* first, set up the window shrinking timer */
- timer_setup(&tau_timer, tau_timeout_smp, 0);
- tau_timer.expires = jiffies + shrink_timer;
- add_timer(&tau_timer);
+ tau_workq = alloc_workqueue("tau", WQ_UNBOUND, 1);
+ if (!tau_workq)
+ return -ENOMEM;
on_each_cpu(TAU_init_smp, NULL, 0);
- printk("Thermal assist unit ");
-#ifdef CONFIG_TAU_INT
- printk("using interrupts, ");
-#else
- printk("using timers, ");
-#endif
- printk("shrink_timer: %d jiffies\n", shrink_timer);
+ queue_work(tau_workq, &tau_work);
+
+ pr_info("Thermal assist unit using %s, shrink_timer: %d ms\n",
+ tau_int_enable ? "interrupts" : "workqueue", shrink_timer);
tau_initialized = 1;
return 0;
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 5449e76cf2df..f6c21f6af274 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -492,35 +492,6 @@ static inline void clear_irq_work_pending(void)
"i" (offsetof(struct paca_struct, irq_work_pending)));
}
-void arch_irq_work_raise(void)
-{
- preempt_disable();
- set_irq_work_pending_flag();
- /*
- * Non-nmi code running with interrupts disabled will replay
- * irq_happened before it re-enables interrupts, so setthe
- * decrementer there instead of causing a hardware exception
- * which would immediately hit the masked interrupt handler
- * and have the net effect of setting the decrementer in
- * irq_happened.
- *
- * NMI interrupts can not check this when they return, so the
- * decrementer hardware exception is raised, which will fire
- * when interrupts are next enabled.
- *
- * BookE does not support this yet, it must audit all NMI
- * interrupt handlers to ensure they call nmi_enter() so this
- * check would be correct.
- */
- if (IS_ENABLED(CONFIG_BOOKE) || !irqs_disabled() || in_nmi()) {
- set_dec(1);
- } else {
- hard_irq_disable();
- local_paca->irq_happened |= PACA_IRQ_DEC;
- }
- preempt_enable();
-}
-
#else /* 32-bit */
DEFINE_PER_CPU(u8, irq_work_pending);
@@ -529,16 +500,27 @@ DEFINE_PER_CPU(u8, irq_work_pending);
#define test_irq_work_pending() __this_cpu_read(irq_work_pending)
#define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)
+#endif /* 32 vs 64 bit */
+
void arch_irq_work_raise(void)
{
+ /*
+ * 64-bit code that uses irq soft-mask can just cause an immediate
+ * interrupt here that gets soft masked, if this is called under
+ * local_irq_disable(). It might be possible to prevent that happening
+ * by noticing interrupts are disabled and setting decrementer pending
+ * to be replayed when irqs are enabled. The problem there is that
+ * tracing can call irq_work_raise, including in code that does low
+ * level manipulations of irq soft-mask state (e.g., trace_hardirqs_on)
+ * which could get tangled up if we're messing with the same state
+ * here.
+ */
preempt_disable();
set_irq_work_pending_flag();
set_dec(1);
preempt_enable();
}
-#endif /* 32 vs 64 bit */
-
#else /* CONFIG_IRQ_WORK */
#define test_irq_work_pending() 0
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index d5f351f02c15..2379c4bf3979 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -430,11 +430,14 @@ out:
#ifdef CONFIG_PPC_BOOK3S_64
BUG_ON(get_paca()->in_nmi == 0);
if (get_paca()->in_nmi > 1)
- nmi_panic(regs, "Unrecoverable nested System Reset");
+ die("Unrecoverable nested System Reset", regs, SIGABRT);
#endif
/* Must die if the interrupt is not recoverable */
- if (!(regs->msr & MSR_RI))
- nmi_panic(regs, "Unrecoverable System Reset");
+ if (!(regs->msr & MSR_RI)) {
+ /* For the reason explained in die_mce, nmi_exit before die */
+ nmi_exit();
+ die("Unrecoverable System Reset", regs, SIGABRT);
+ }
if (!nested)
nmi_exit();
@@ -775,7 +778,7 @@ void machine_check_exception(struct pt_regs *regs)
/* Must die if the interrupt is not recoverable */
if (!(regs->msr & MSR_RI))
- nmi_panic(regs, "Unrecoverable Machine check");
+ die("Unrecoverable Machine check", regs, SIGBUS);
return;
@@ -794,7 +797,7 @@ static void p9_hmi_special_emu(struct pt_regs *regs)
{
unsigned int ra, rb, t, i, sel, instr, rc;
const void __user *addr;
- u8 vbuf[16], *vdst;
+ u8 vbuf[16] __aligned(16), *vdst;
unsigned long ea, msr, msr_mask;
bool swap;
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 65b3bdb99f0b..31ab6eb61e26 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -705,7 +705,7 @@ int vdso_getcpu_init(void)
node = cpu_to_node(cpu);
WARN_ON_ONCE(node > 0xffff);
- val = (cpu & 0xfff) | ((node & 0xffff) << 16);
+ val = (cpu & 0xffff) | ((node & 0xffff) << 16);
mtspr(SPRN_SPRG_VDSO_WRITE, val);
get_paca()->sprg_vdso = val;
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index d081d726ca8e..9b346f3d2814 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -99,6 +99,7 @@ SECTIONS
#endif
/* careful! __ftr_alt_* sections need to be close to .text */
*(.text.hot TEXT_MAIN .text.fixup .text.unlikely .fixup __ftr_alt_* .ref.text);
+ NOINSTR_TEXT
SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT
@@ -141,6 +142,20 @@ SECTIONS
}
. = ALIGN(8);
+ __uaccess_flush_fixup : AT(ADDR(__uaccess_flush_fixup) - LOAD_OFFSET) {
+ __start___uaccess_flush_fixup = .;
+ *(__uaccess_flush_fixup)
+ __stop___uaccess_flush_fixup = .;
+ }
+
+ . = ALIGN(8);
+ __entry_flush_fixup : AT(ADDR(__entry_flush_fixup) - LOAD_OFFSET) {
+ __start___entry_flush_fixup = .;
+ *(__entry_flush_fixup)
+ __stop___entry_flush_fixup = .;
+ }
+
+ . = ALIGN(8);
__stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
__start___stf_exit_barrier_fixup = .;
*(__stf_exit_barrier_fixup)
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index cc05f346e042..bc9d1321dc73 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -812,7 +812,8 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new);
}
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
+ bool blockable)
{
return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 998f8d089ac7..df0f08cb821b 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -171,7 +171,13 @@ static struct kmem_cache *kvm_pmd_cache;
static pte_t *kvmppc_pte_alloc(void)
{
- return kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
+ pte_t *pte;
+
+ pte = kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
+ /* pmd_populate() will only reference _pa(pte). */
+ kmemleak_ignore(pte);
+
+ return pte;
}
static void kvmppc_pte_free(pte_t *ptep)
@@ -187,7 +193,13 @@ static inline int pmd_is_leaf(pmd_t pmd)
static pmd_t *kvmppc_pmd_alloc(void)
{
- return kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
+ pmd_t *pmd;
+
+ pmd = kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
+ /* pud_populate() will only reference _pa(pmd). */
+ kmemleak_ignore(pmd);
+
+ return pmd;
}
static void kvmppc_pmd_free(pmd_t *pmdp)
diff --git a/arch/powerpc/kvm/book3s_hv_tm.c b/arch/powerpc/kvm/book3s_hv_tm.c
index 31cd0f327c8a..e7fd60cf9780 100644
--- a/arch/powerpc/kvm/book3s_hv_tm.c
+++ b/arch/powerpc/kvm/book3s_hv_tm.c
@@ -6,6 +6,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kvm_host.h>
#include <asm/kvm_ppc.h>
@@ -47,7 +49,18 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
u64 newmsr, bescr;
int ra, rs;
- switch (instr & 0xfc0007ff) {
+ /*
+ * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit
+ * in these instructions, so masking bit 31 out doesn't change these
+ * instructions. For treclaim., tsr., and trechkpt. instructions if bit
+ * 31 = 0 then they are per ISA invalid forms, however P9 UM, in section
+ * 4.6.10 Book II Invalid Forms, informs specifically that ignoring bit
+ * 31 is an acceptable way to handle these invalid forms that have
+ * bit 31 = 0. Moreover, for emulation purposes both forms (w/ and wo/
+ * bit 31 set) can generate a softpatch interrupt. Hence both forms
+ * are handled below for these instructions so they behave the same way.
+ */
+ switch (instr & PO_XOP_OPCODE_MASK) {
case PPC_INST_RFID:
/* XXX do we need to check for PR=0 here? */
newmsr = vcpu->arch.shregs.srr1;
@@ -108,7 +121,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
vcpu->arch.shregs.msr = newmsr;
return RESUME_GUEST;
- case PPC_INST_TSR:
+ /* ignore bit 31, see comment above */
+ case (PPC_INST_TSR & PO_XOP_OPCODE_MASK):
/* check for PR=1 and arch 2.06 bit set in PCR */
if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
/* generate an illegal instruction interrupt */
@@ -143,7 +157,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
vcpu->arch.shregs.msr = msr;
return RESUME_GUEST;
- case PPC_INST_TRECLAIM:
+ /* ignore bit 31, see comment above */
+ case (PPC_INST_TRECLAIM & PO_XOP_OPCODE_MASK):
/* check for TM disabled in the HFSCR or MSR */
if (!(vcpu->arch.hfscr & HFSCR_TM)) {
/* generate an illegal instruction interrupt */
@@ -179,7 +194,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
return RESUME_GUEST;
- case PPC_INST_TRECHKPT:
+ /* ignore bit 31, see comment above */
+ case (PPC_INST_TRECHKPT & PO_XOP_OPCODE_MASK):
/* XXX do we need to check for PR=0 here? */
/* check for TM disabled in the HFSCR or MSR */
if (!(vcpu->arch.hfscr & HFSCR_TM)) {
@@ -211,6 +227,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
}
/* What should we do here? We didn't recognize the instruction */
- WARN_ON_ONCE(1);
+ kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
+ pr_warn_ratelimited("Unrecognized TM-related instruction %#x for emulation", instr);
+
return RESUME_GUEST;
}
diff --git a/arch/powerpc/kvm/book3s_hv_tm_builtin.c b/arch/powerpc/kvm/book3s_hv_tm_builtin.c
index 3cf5863bc06e..3c7ca2fa1959 100644
--- a/arch/powerpc/kvm/book3s_hv_tm_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_tm_builtin.c
@@ -26,7 +26,18 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
u64 newmsr, msr, bescr;
int rs;
- switch (instr & 0xfc0007ff) {
+ /*
+ * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit
+ * in these instructions, so masking bit 31 out doesn't change these
+ * instructions. For the tsr. instruction if bit 31 = 0 then it is per
+ * ISA an invalid form, however P9 UM, in section 4.6.10 Book II Invalid
+ * Forms, informs specifically that ignoring bit 31 is an acceptable way
+ * to handle TM-related invalid forms that have bit 31 = 0. Moreover,
+ * for emulation purposes both forms (w/ and wo/ bit 31 set) can
+ * generate a softpatch interrupt. Hence both forms are handled below
+ * for tsr. to make them behave the same way.
+ */
+ switch (instr & PO_XOP_OPCODE_MASK) {
case PPC_INST_RFID:
/* XXX do we need to check for PR=0 here? */
newmsr = vcpu->arch.shregs.srr1;
@@ -76,7 +87,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
vcpu->arch.shregs.msr = newmsr;
return 1;
- case PPC_INST_TSR:
+ /* ignore bit 31, see comment above */
+ case (PPC_INST_TSR & PO_XOP_OPCODE_MASK):
/* we know the MSR has the TS field = S (0b01) here */
msr = vcpu->arch.shregs.msr;
/* check for PR=1 and arch 2.06 bit set in PCR */
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 8f2985e46f6f..bbb02195dc53 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -737,7 +737,8 @@ static int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
return 0;
}
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
+ bool blockable)
{
/* kvm_unmap_hva flushes everything anyways */
kvm_unmap_hva(kvm, start);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 51cd66dc1bb0..7c8354dfe80e 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -1497,7 +1497,7 @@ int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated;
}
-int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
+static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
{
union kvmppc_one_reg reg;
int vmx_offset = 0;
@@ -1515,7 +1515,7 @@ int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
return result;
}
-int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
+static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
{
union kvmppc_one_reg reg;
int vmx_offset = 0;
@@ -1533,7 +1533,7 @@ int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
return result;
}
-int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
+static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
{
union kvmppc_one_reg reg;
int vmx_offset = 0;
@@ -1551,7 +1551,7 @@ int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
return result;
}
-int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
+static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
{
union kvmppc_one_reg reg;
int vmx_offset = 0;
diff --git a/arch/powerpc/lib/checksum_wrappers.c b/arch/powerpc/lib/checksum_wrappers.c
index a0cb63fb76a1..8d83c39be7e4 100644
--- a/arch/powerpc/lib/checksum_wrappers.c
+++ b/arch/powerpc/lib/checksum_wrappers.c
@@ -29,6 +29,7 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst,
unsigned int csum;
might_sleep();
+ allow_read_from_user(src, len);
*err_ptr = 0;
@@ -60,6 +61,7 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst,
}
out:
+ prevent_read_from_user(src, len);
return (__force __wsum)csum;
}
EXPORT_SYMBOL(csum_and_copy_from_user);
@@ -70,6 +72,7 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
unsigned int csum;
might_sleep();
+ allow_write_to_user(dst, len);
*err_ptr = 0;
@@ -97,6 +100,7 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
}
out:
+ prevent_write_to_user(dst, len);
return (__force __wsum)csum;
}
EXPORT_SYMBOL(csum_and_copy_to_user);
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index dbe478e7b8e0..1561094ea4a5 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -18,6 +18,7 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/sched/mm.h>
+#include <linux/stop_machine.h>
#include <asm/cputable.h>
#include <asm/code-patching.h>
#include <asm/page.h>
@@ -225,11 +226,143 @@ void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
: "unknown");
}
+static int __do_stf_barrier_fixups(void *data)
+{
+ enum stf_barrier_type *types = data;
+
+ do_stf_entry_barrier_fixups(*types);
+ do_stf_exit_barrier_fixups(*types);
+
+ return 0;
+}
void do_stf_barrier_fixups(enum stf_barrier_type types)
{
- do_stf_entry_barrier_fixups(types);
- do_stf_exit_barrier_fixups(types);
+ /*
+ * The call to the fallback entry flush, and the fallback/sync-ori exit
+ * flush can not be safely patched in/out while other CPUs are executing
+ * them. So call __do_stf_barrier_fixups() on one CPU while all other CPUs
+ * spin in the stop machine core with interrupts hard disabled.
+ */
+ stop_machine(__do_stf_barrier_fixups, &types, NULL);
+}
+
+void do_uaccess_flush_fixups(enum l1d_flush_type types)
+{
+ unsigned int instrs[4], *dest;
+ long *start, *end;
+ int i;
+
+ start = PTRRELOC(&__start___uaccess_flush_fixup);
+ end = PTRRELOC(&__stop___uaccess_flush_fixup);
+
+ instrs[0] = 0x60000000; /* nop */
+ instrs[1] = 0x60000000; /* nop */
+ instrs[2] = 0x60000000; /* nop */
+ instrs[3] = 0x4e800020; /* blr */
+
+ i = 0;
+ if (types == L1D_FLUSH_FALLBACK) {
+ instrs[3] = 0x60000000; /* nop */
+ /* fallthrough to fallback flush */
+ }
+
+ if (types & L1D_FLUSH_ORI) {
+ instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
+ instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
+ }
+
+ if (types & L1D_FLUSH_MTTRIG)
+ instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
+
+ for (i = 0; start < end; start++, i++) {
+ dest = (void *)start + *start;
+
+ pr_devel("patching dest %lx\n", (unsigned long)dest);
+
+ patch_instruction(dest, instrs[0]);
+
+ patch_instruction((dest + 1), instrs[1]);
+ patch_instruction((dest + 2), instrs[2]);
+ patch_instruction((dest + 3), instrs[3]);
+ }
+
+ printk(KERN_DEBUG "uaccess-flush: patched %d locations (%s flush)\n", i,
+ (types == L1D_FLUSH_NONE) ? "no" :
+ (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
+ (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG)
+ ? "ori+mttrig type"
+ : "ori type" :
+ (types & L1D_FLUSH_MTTRIG) ? "mttrig type"
+ : "unknown");
+}
+
+static int __do_entry_flush_fixups(void *data)
+{
+ enum l1d_flush_type types = *(enum l1d_flush_type *)data;
+ unsigned int instrs[3], *dest;
+ long *start, *end;
+ int i;
+
+ start = PTRRELOC(&__start___entry_flush_fixup);
+ end = PTRRELOC(&__stop___entry_flush_fixup);
+
+ instrs[0] = 0x60000000; /* nop */
+ instrs[1] = 0x60000000; /* nop */
+ instrs[2] = 0x60000000; /* nop */
+
+ i = 0;
+ if (types == L1D_FLUSH_FALLBACK) {
+ instrs[i++] = 0x7d4802a6; /* mflr r10 */
+ instrs[i++] = 0x60000000; /* branch patched below */
+ instrs[i++] = 0x7d4803a6; /* mtlr r10 */
+ }
+
+ if (types & L1D_FLUSH_ORI) {
+ instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
+ instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
+ }
+
+ if (types & L1D_FLUSH_MTTRIG)
+ instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
+
+ for (i = 0; start < end; start++, i++) {
+ dest = (void *)start + *start;
+
+ pr_devel("patching dest %lx\n", (unsigned long)dest);
+
+ patch_instruction(dest, instrs[0]);
+
+ if (types == L1D_FLUSH_FALLBACK)
+ patch_branch((dest + 1), (unsigned long)&entry_flush_fallback,
+ BRANCH_SET_LINK);
+ else
+ patch_instruction((dest + 1), instrs[1]);
+
+ patch_instruction((dest + 2), instrs[2]);
+ }
+
+ printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i,
+ (types == L1D_FLUSH_NONE) ? "no" :
+ (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
+ (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG)
+ ? "ori+mttrig type"
+ : "ori type" :
+ (types & L1D_FLUSH_MTTRIG) ? "mttrig type"
+ : "unknown");
+
+ return 0;
+}
+
+void do_entry_flush_fixups(enum l1d_flush_type types)
+{
+ /*
+ * The call to the fallback flush can not be safely patched in/out while
+ * other CPUs are executing it. So call __do_entry_flush_fixups() on one
+ * CPU while all other CPUs spin in the stop machine core with interrupts
+ * hard disabled.
+ */
+ stop_machine(__do_entry_flush_fixups, &types, NULL);
}
void do_rfi_flush_fixups(enum l1d_flush_type types)
diff --git a/arch/powerpc/lib/string_32.S b/arch/powerpc/lib/string_32.S
index f69a6aab7bfb..1ddb26394e8a 100644
--- a/arch/powerpc/lib/string_32.S
+++ b/arch/powerpc/lib/string_32.S
@@ -17,7 +17,7 @@ CACHELINE_BYTES = L1_CACHE_BYTES
LG_CACHELINE_BYTES = L1_CACHE_SHIFT
CACHELINE_MASK = (L1_CACHE_BYTES-1)
-_GLOBAL(__clear_user)
+_GLOBAL(__arch_clear_user)
/*
* Use dcbz on the complete cache lines in the destination
* to set them to zero. This requires that the destination
@@ -87,4 +87,4 @@ _GLOBAL(__clear_user)
EX_TABLE(8b, 91b)
EX_TABLE(9b, 91b)
-EXPORT_SYMBOL(__clear_user)
+EXPORT_SYMBOL(__arch_clear_user)
diff --git a/arch/powerpc/lib/string_64.S b/arch/powerpc/lib/string_64.S
index 56aac4c22025..ea3798f4f25f 100644
--- a/arch/powerpc/lib/string_64.S
+++ b/arch/powerpc/lib/string_64.S
@@ -29,7 +29,7 @@ PPC64_CACHES:
.section ".text"
/**
- * __clear_user: - Zero a block of memory in user space, with less checking.
+ * __arch_clear_user: - Zero a block of memory in user space, with less checking.
* @to: Destination address, in user space.
* @n: Number of bytes to zero.
*
@@ -70,7 +70,7 @@ err3; stb r0,0(r3)
mr r3,r4
blr
-_GLOBAL_TOC(__clear_user)
+_GLOBAL_TOC(__arch_clear_user)
cmpdi r4,32
neg r6,r3
li r0,0
@@ -193,4 +193,4 @@ err1; dcbz 0,r3
cmpdi r4,32
blt .Lshort_clear
b .Lmedium_clear
-EXPORT_SYMBOL(__clear_user)
+EXPORT_SYMBOL(__arch_clear_user)
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 6e0ff8b600ce..eb5252177b66 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -233,6 +233,9 @@ static bool bad_kernel_fault(bool is_exec, unsigned long error_code,
return is_exec || (address >= TASK_SIZE);
}
+// This comes from 64-bit struct rt_sigframe + __SIGNAL_FRAMESIZE
+#define SIGFRAME_MAX_SIZE (4096 + 128)
+
static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
struct vm_area_struct *vma, unsigned int flags,
bool *must_retry)
@@ -240,7 +243,7 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
/*
* N.B. The POWER/Open ABI allows programs to access up to
* 288 bytes below the stack pointer.
- * The kernel signal delivery code writes up to about 1.5kB
+ * The kernel signal delivery code writes a bit over 4KB
* below the stack pointer (r1) before decrementing it.
* The exec code can write slightly over 640kB to the stack
* before setting the user r1. Thus we allow the stack to
@@ -265,7 +268,7 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
* between the last mapped region and the stack will
* expand the stack rather than segfaulting.
*/
- if (address + 2048 >= uregs->gpr[1])
+ if (address + SIGFRAME_MAX_SIZE >= uregs->gpr[1])
return false;
if ((flags & FAULT_FLAG_WRITE) && (flags & FAULT_FLAG_USER) &&
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 5404a631d583..9ee235fca427 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -115,7 +115,7 @@ static int early_map_kernel_page(unsigned long ea, unsigned long pa,
set_the_pte:
set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
- smp_wmb();
+ asm volatile("ptesync": : :"memory");
return 0;
}
@@ -169,7 +169,7 @@ static int __map_kernel_page(unsigned long ea, unsigned long pa,
set_the_pte:
set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
- smp_wmb();
+ asm volatile("ptesync": : :"memory");
return 0;
}
diff --git a/arch/powerpc/mm/pkeys.c b/arch/powerpc/mm/pkeys.c
index 25a8dd9cd71d..a587f9013988 100644
--- a/arch/powerpc/mm/pkeys.c
+++ b/arch/powerpc/mm/pkeys.c
@@ -81,13 +81,17 @@ int pkey_initialize(void)
scan_pkey_feature();
/*
- * Let's assume 32 pkeys on P8 bare metal, if its not defined by device
- * tree. We make this exception since skiboot forgot to expose this
- * property on power8.
+ * Let's assume 32 pkeys on P8/P9 bare metal, if its not defined by device
+ * tree. We make this exception since some version of skiboot forgot to
+ * expose this property on power8/9.
*/
- if (!pkeys_devtree_defined && !firmware_has_feature(FW_FEATURE_LPAR) &&
- cpu_has_feature(CPU_FTRS_POWER8))
- pkeys_total = 32;
+ if (!pkeys_devtree_defined && !firmware_has_feature(FW_FEATURE_LPAR)) {
+ unsigned long pvr = mfspr(SPRN_PVR);
+
+ if (PVR_VER(pvr) == PVR_POWER8 || PVR_VER(pvr) == PVR_POWER8E ||
+ PVR_VER(pvr) == PVR_POWER8NVL || PVR_VER(pvr) == PVR_POWER9)
+ pkeys_total = 32;
+ }
/*
* Adjust the upper limit, based on the number of bits supported by
@@ -365,12 +369,14 @@ static bool pkey_access_permitted(int pkey, bool write, bool execute)
return true;
pkey_shift = pkeyshift(pkey);
- if (execute && !(read_iamr() & (IAMR_EX_BIT << pkey_shift)))
- return true;
+ if (execute)
+ return !(read_iamr() & (IAMR_EX_BIT << pkey_shift));
+
+ amr = read_amr();
+ if (write)
+ return !(amr & (AMR_WR_BIT << pkey_shift));
- amr = read_amr(); /* Delay reading amr until absolutely needed */
- return ((!write && !(amr & (AMR_RD_BIT << pkey_shift))) ||
- (write && !(amr & (AMR_WR_BIT << pkey_shift))));
+ return !(amr & (AMR_RD_BIT << pkey_shift));
}
bool arch_pte_access_permitted(u64 pte, bool write, bool execute)
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 1749f15fc070..80b8fc4173de 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -598,19 +598,29 @@ static void do_exit_flush_lazy_tlb(void *arg)
struct mm_struct *mm = arg;
unsigned long pid = mm->context.id;
+ /*
+ * A kthread could have done a mmget_not_zero() after the flushing CPU
+ * checked mm_is_singlethreaded, and be in the process of
+ * kthread_use_mm when interrupted here. In that case, current->mm will
+ * be set to mm, because kthread_use_mm() setting ->mm and switching to
+ * the mm is done with interrupts off.
+ */
if (current->mm == mm)
- return; /* Local CPU */
+ goto out_flush;
if (current->active_mm == mm) {
- /*
- * Must be a kernel thread because sender is single-threaded.
- */
- BUG_ON(current->mm);
+ WARN_ON_ONCE(current->mm != NULL);
+ /* Is a kernel thread and is using mm as the lazy tlb */
mmgrab(&init_mm);
- switch_mm(mm, &init_mm, current);
current->active_mm = &init_mm;
+ switch_mm_irqs_off(mm, &init_mm, current);
mmdrop(mm);
}
+
+ atomic_dec(&mm->context.active_cpus);
+ cpumask_clear_cpu(smp_processor_id(), mm_cpumask(mm));
+
+out_flush:
_tlbiel_pid(pid, RIC_FLUSH_ALL);
}
@@ -625,7 +635,6 @@ static void exit_flush_lazy_tlbs(struct mm_struct *mm)
*/
smp_call_function_many(mm_cpumask(mm), do_exit_flush_lazy_tlb,
(void *)mm, 1);
- mm_reset_thread_local(mm);
}
void radix__flush_tlb_mm(struct mm_struct *mm)
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
index e066a658acac..56f58a362ea5 100644
--- a/arch/powerpc/mm/tlb_nohash_low.S
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -402,7 +402,7 @@ _GLOBAL(set_context)
* extern void loadcam_entry(unsigned int index)
*
* Load TLBCAM[index] entry in to the L2 CAM MMU
- * Must preserve r7, r8, r9, and r10
+ * Must preserve r7, r8, r9, r10 and r11
*/
_GLOBAL(loadcam_entry)
mflr r5
@@ -438,6 +438,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
*/
_GLOBAL(loadcam_multi)
mflr r8
+ /* Don't switch to AS=1 if already there */
+ mfmsr r11
+ andi. r11,r11,MSR_IS
+ bne 10f
/*
* Set up temporary TLB entry that is the same as what we're
@@ -463,6 +467,7 @@ _GLOBAL(loadcam_multi)
mtmsr r6
isync
+10:
mr r9,r3
add r10,r3,r4
2: bl loadcam_entry
@@ -471,6 +476,10 @@ _GLOBAL(loadcam_multi)
mr r3,r9
blt 2b
+ /* Don't return to AS=0 if we were in AS=1 at function start */
+ andi. r11,r11,MSR_IS
+ bne 3f
+
/* Return to AS=0 and clear the temporary entry */
mfmsr r6
rlwinm. r6,r6,0,~(MSR_IS|MSR_DS)
@@ -486,6 +495,7 @@ _GLOBAL(loadcam_multi)
tlbwe
isync
+3:
mtlr r8
blr
#endif
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 4004dbdab9c7..091bdeaf02a3 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -2046,7 +2046,17 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
left += period;
if (left <= 0)
left = period;
- record = siar_valid(regs);
+
+ /*
+ * If address is not requested in the sample via
+ * PERF_SAMPLE_IP, just record that sample irrespective
+ * of SIAR valid check.
+ */
+ if (event->attr.sample_type & PERF_SAMPLE_IP)
+ record = siar_valid(regs);
+ else
+ record = 1;
+
event->hw.last_period = event->hw.sample_period;
}
if (left < 0x80000000LL)
@@ -2059,6 +2069,17 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
perf_event_update_userpage(event);
/*
+ * Due to hardware limitation, sometimes SIAR could sample a kernel
+ * address even when freeze on supervisor state (kernel) is set in
+ * MMCR2. Check attr.exclude_kernel and address to drop the sample in
+ * these cases.
+ */
+ if (event->attr.exclude_kernel &&
+ (event->attr.sample_type & PERF_SAMPLE_IP) &&
+ is_kernel_addr(mfspr(SPRN_SIAR)))
+ record = 0;
+
+ /*
* Finally record data if requested.
*/
if (record) {
@@ -2087,6 +2108,10 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
if (perf_event_overflow(event, &data, regs))
power_pmu_stop(event, 0);
+ } else if (period) {
+ /* Account for interrupt in case of invalid SIAR */
+ if (perf_event_account_interrupt(event))
+ power_pmu_stop(event, 0);
}
}
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index 72238eedc360..2bb798918483 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -1413,16 +1413,6 @@ static void h_24x7_event_read(struct perf_event *event)
h24x7hw = &get_cpu_var(hv_24x7_hw);
h24x7hw->events[i] = event;
put_cpu_var(h24x7hw);
- /*
- * Clear the event count so we can compute the _change_
- * in the 24x7 raw counter value at the end of the txn.
- *
- * Note that we could alternatively read the 24x7 value
- * now and save its value in event->hw.prev_count. But
- * that would require issuing a hcall, which would then
- * defeat the purpose of using the txn interface.
- */
- local64_set(&event->count, 0);
}
put_cpu_var(hv_24x7_reqb);
diff --git a/arch/powerpc/perf/hv-gpci-requests.h b/arch/powerpc/perf/hv-gpci-requests.h
index e608f9db12dd..8965b4463d43 100644
--- a/arch/powerpc/perf/hv-gpci-requests.h
+++ b/arch/powerpc/perf/hv-gpci-requests.h
@@ -95,7 +95,7 @@ REQUEST(__field(0, 8, partition_id)
#define REQUEST_NAME system_performance_capabilities
#define REQUEST_NUM 0x40
-#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
+#define REQUEST_IDX_KIND "starting_index=0xffffffff"
#include I(REQUEST_BEGIN)
REQUEST(__field(0, 1, perf_collect_privileged)
__field(0x1, 1, capability_mask)
@@ -223,7 +223,7 @@ REQUEST(__field(0, 2, partition_id)
#define REQUEST_NAME system_hypervisor_times
#define REQUEST_NUM 0xF0
-#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
+#define REQUEST_IDX_KIND "starting_index=0xffffffff"
#include I(REQUEST_BEGIN)
REQUEST(__count(0, 8, time_spent_to_dispatch_virtual_processors)
__count(0x8, 8, time_spent_processing_virtual_processor_timers)
@@ -234,7 +234,7 @@ REQUEST(__count(0, 8, time_spent_to_dispatch_virtual_processors)
#define REQUEST_NAME system_tlbie_count_and_time
#define REQUEST_NUM 0xF4
-#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
+#define REQUEST_IDX_KIND "starting_index=0xffffffff"
#include I(REQUEST_BEGIN)
REQUEST(__count(0, 8, tlbie_instructions_issued)
/*
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
index 053b8e9aa9e7..a1ff4142cc6a 100644
--- a/arch/powerpc/perf/isa207-common.c
+++ b/arch/powerpc/perf/isa207-common.c
@@ -273,6 +273,15 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
mask |= CNST_PMC_MASK(pmc);
value |= CNST_PMC_VAL(pmc);
+
+ /*
+ * PMC5 and PMC6 are used to count cycles and instructions and
+ * they do not support most of the constraint bits. Add a check
+ * to exclude PMC5/6 from most of the constraints except for
+ * EBB/BHRB.
+ */
+ if (pmc >= 5)
+ goto ebb_bhrb;
}
if (pmc <= 4) {
@@ -331,6 +340,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
}
}
+ebb_bhrb:
if (!pmc && ebb)
/* EBB events must specify the PMC */
return -1;
@@ -349,8 +359,8 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
* EBB events are pinned & exclusive, so this should never actually
* hit, but we leave it as a fallback in case.
*/
- mask |= CNST_EBB_VAL(ebb);
- value |= CNST_EBB_MASK;
+ mask |= CNST_EBB_MASK;
+ value |= CNST_EBB_VAL(ebb);
*maskp = mask;
*valp = value;
diff --git a/arch/powerpc/platforms/4xx/pci.c b/arch/powerpc/platforms/4xx/pci.c
index 5aca523551ae..2f237027fdcc 100644
--- a/arch/powerpc/platforms/4xx/pci.c
+++ b/arch/powerpc/platforms/4xx/pci.c
@@ -1242,7 +1242,7 @@ static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port)
if (mbase == NULL) {
printk(KERN_ERR "%pOF: Can't map internal config space !",
port->node);
- goto done;
+ return;
}
while (attempt && (0 == (in_le32(mbase + PECFG_460SX_DLLSTA)
@@ -1252,9 +1252,7 @@ static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port)
}
if (attempt)
port->link = 1;
-done:
iounmap(mbase);
-
}
static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = {
diff --git a/arch/powerpc/platforms/52xx/lite5200_sleep.S b/arch/powerpc/platforms/52xx/lite5200_sleep.S
index 3a9969c429b3..054f927bfef9 100644
--- a/arch/powerpc/platforms/52xx/lite5200_sleep.S
+++ b/arch/powerpc/platforms/52xx/lite5200_sleep.S
@@ -181,7 +181,7 @@ sram_code:
udelay: /* r11 - tb_ticks_per_usec, r12 - usecs, overwrites r13 */
mullw r12, r12, r11
mftb r13 /* start */
- addi r12, r13, r12 /* end */
+ add r12, r13, r12 /* end */
1:
mftb r13 /* current */
cmp cr0, r13, r12
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 14ef17e10ec9..9914544e6677 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -238,12 +238,11 @@ config TAU
temperature within 2-4 degrees Celsius. This option shows the current
on-die temperature in /proc/cpuinfo if the cpu supports it.
- Unfortunately, on some chip revisions, this sensor is very inaccurate
- and in many cases, does not work at all, so don't assume the cpu
- temp is actually what /proc/cpuinfo says it is.
+ Unfortunately, this sensor is very inaccurate when uncalibrated, so
+ don't assume the cpu temp is actually what /proc/cpuinfo says it is.
config TAU_INT
- bool "Interrupt driven TAU driver (DANGEROUS)"
+ bool "Interrupt driven TAU driver (EXPERIMENTAL)"
depends on TAU
---help---
The TAU supports an interrupt driven mode which causes an interrupt
@@ -251,12 +250,7 @@ config TAU_INT
to get notified the temp has exceeded a range. With this option off,
a timer is used to re-check the temperature periodically.
- However, on some cpus it appears that the TAU interrupt hardware
- is buggy and can cause a situation which would lead unexplained hard
- lockups.
-
- Unless you are extending the TAU driver, or enjoy kernel/hardware
- debugging, leave this option off.
+ If in doubt, say N here.
config TAU_AVERAGE
bool "Average high and low temp"
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 9f5958f16923..741a8fa8a3e6 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -46,6 +46,7 @@ config SPU_FS
tristate "SPU file system"
default m
depends on PPC_CELL
+ depends on COREDUMP
select SPU_BASE
help
The SPU file system is used to access Synergistic Processing
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 43e7b93f27c7..d16adcd93921 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -1991,8 +1991,9 @@ static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
- int ret;
struct spu_context *ctx = file->private_data;
+ u32 stat, data;
+ int ret;
if (!access_ok(VERIFY_WRITE, buf, len))
return -EFAULT;
@@ -2001,11 +2002,16 @@ static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
if (ret)
return ret;
spin_lock(&ctx->csa.register_lock);
- ret = __spufs_mbox_info_read(ctx, buf, len, pos);
+ stat = ctx->csa.prob.mb_stat_R;
+ data = ctx->csa.prob.pu_mb_R;
spin_unlock(&ctx->csa.register_lock);
spu_release_saved(ctx);
- return ret;
+ /* EOF if there's no entry in the mbox */
+ if (!(stat & 0x0000ff))
+ return 0;
+
+ return simple_read_from_buffer(buf, len, pos, &data, sizeof(data));
}
static const struct file_operations spufs_mbox_info_fops = {
@@ -2032,6 +2038,7 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
+ u32 stat, data;
int ret;
if (!access_ok(VERIFY_WRITE, buf, len))
@@ -2041,11 +2048,16 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
if (ret)
return ret;
spin_lock(&ctx->csa.register_lock);
- ret = __spufs_ibox_info_read(ctx, buf, len, pos);
+ stat = ctx->csa.prob.mb_stat_R;
+ data = ctx->csa.priv2.puint_mb_R;
spin_unlock(&ctx->csa.register_lock);
spu_release_saved(ctx);
- return ret;
+ /* EOF if there's no entry in the ibox */
+ if (!(stat & 0xff0000))
+ return 0;
+
+ return simple_read_from_buffer(buf, len, pos, &data, sizeof(data));
}
static const struct file_operations spufs_ibox_info_fops = {
@@ -2054,6 +2066,11 @@ static const struct file_operations spufs_ibox_info_fops = {
.llseek = generic_file_llseek,
};
+static size_t spufs_wbox_info_cnt(struct spu_context *ctx)
+{
+ return (4 - ((ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8)) * sizeof(u32);
+}
+
static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
char __user *buf, size_t len, loff_t *pos)
{
@@ -2062,7 +2079,7 @@ static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
u32 wbox_stat;
wbox_stat = ctx->csa.prob.mb_stat_R;
- cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
+ cnt = spufs_wbox_info_cnt(ctx);
for (i = 0; i < cnt; i++) {
data[i] = ctx->csa.spu_mailbox_data[i];
}
@@ -2075,7 +2092,8 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
- int ret;
+ u32 data[ARRAY_SIZE(ctx->csa.spu_mailbox_data)];
+ int ret, count;
if (!access_ok(VERIFY_WRITE, buf, len))
return -EFAULT;
@@ -2084,11 +2102,13 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
if (ret)
return ret;
spin_lock(&ctx->csa.register_lock);
- ret = __spufs_wbox_info_read(ctx, buf, len, pos);
+ count = spufs_wbox_info_cnt(ctx);
+ memcpy(&data, &ctx->csa.spu_mailbox_data, sizeof(data));
spin_unlock(&ctx->csa.register_lock);
spu_release_saved(ctx);
- return ret;
+ return simple_read_from_buffer(buf, len, pos, &data,
+ count * sizeof(u32));
}
static const struct file_operations spufs_wbox_info_fops = {
@@ -2097,27 +2117,33 @@ static const struct file_operations spufs_wbox_info_fops = {
.llseek = generic_file_llseek,
};
-static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
- char __user *buf, size_t len, loff_t *pos)
+static void spufs_get_dma_info(struct spu_context *ctx,
+ struct spu_dma_info *info)
{
- struct spu_dma_info info;
- struct mfc_cq_sr *qp, *spuqp;
int i;
- info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
- info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
- info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
- info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
- info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
+ info->dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
+ info->dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
+ info->dma_info_status = ctx->csa.spu_chnldata_RW[24];
+ info->dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
+ info->dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
for (i = 0; i < 16; i++) {
- qp = &info.dma_info_command_data[i];
- spuqp = &ctx->csa.priv2.spuq[i];
+ struct mfc_cq_sr *qp = &info->dma_info_command_data[i];
+ struct mfc_cq_sr *spuqp = &ctx->csa.priv2.spuq[i];
qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
}
+}
+
+static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
+ char __user *buf, size_t len, loff_t *pos)
+{
+ struct spu_dma_info info;
+
+ spufs_get_dma_info(ctx, &info);
return simple_read_from_buffer(buf, len, pos, &info,
sizeof info);
@@ -2127,6 +2153,7 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
+ struct spu_dma_info info;
int ret;
if (!access_ok(VERIFY_WRITE, buf, len))
@@ -2136,11 +2163,12 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
if (ret)
return ret;
spin_lock(&ctx->csa.register_lock);
- ret = __spufs_dma_info_read(ctx, buf, len, pos);
+ spufs_get_dma_info(ctx, &info);
spin_unlock(&ctx->csa.register_lock);
spu_release_saved(ctx);
- return ret;
+ return simple_read_from_buffer(buf, len, pos, &info,
+ sizeof(info));
}
static const struct file_operations spufs_dma_info_fops = {
@@ -2149,13 +2177,31 @@ static const struct file_operations spufs_dma_info_fops = {
.llseek = no_llseek,
};
+static void spufs_get_proxydma_info(struct spu_context *ctx,
+ struct spu_proxydma_info *info)
+{
+ int i;
+
+ info->proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
+ info->proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
+ info->proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
+
+ for (i = 0; i < 8; i++) {
+ struct mfc_cq_sr *qp = &info->proxydma_info_command_data[i];
+ struct mfc_cq_sr *puqp = &ctx->csa.priv2.puq[i];
+
+ qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
+ qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
+ qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
+ qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
+ }
+}
+
static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
char __user *buf, size_t len, loff_t *pos)
{
struct spu_proxydma_info info;
- struct mfc_cq_sr *qp, *puqp;
int ret = sizeof info;
- int i;
if (len < ret)
return -EINVAL;
@@ -2163,18 +2209,7 @@ static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
if (!access_ok(VERIFY_WRITE, buf, len))
return -EFAULT;
- info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
- info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
- info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
- for (i = 0; i < 8; i++) {
- qp = &info.proxydma_info_command_data[i];
- puqp = &ctx->csa.priv2.puq[i];
-
- qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
- qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
- qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
- qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
- }
+ spufs_get_proxydma_info(ctx, &info);
return simple_read_from_buffer(buf, len, pos, &info,
sizeof info);
@@ -2184,17 +2219,19 @@ static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
+ struct spu_proxydma_info info;
int ret;
ret = spu_acquire_saved(ctx);
if (ret)
return ret;
spin_lock(&ctx->csa.register_lock);
- ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
+ spufs_get_proxydma_info(ctx, &info);
spin_unlock(&ctx->csa.register_lock);
spu_release_saved(ctx);
- return ret;
+ return simple_read_from_buffer(buf, len, pos, &info,
+ sizeof(info));
}
static const struct file_operations spufs_proxydma_info_fops = {
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index b7f937563827..d1fee2d35b49 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -299,23 +299,6 @@ static int __init maple_probe(void)
return 1;
}
-define_machine(maple) {
- .name = "Maple",
- .probe = maple_probe,
- .setup_arch = maple_setup_arch,
- .init_IRQ = maple_init_IRQ,
- .pci_irq_fixup = maple_pci_irq_fixup,
- .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq,
- .restart = maple_restart,
- .halt = maple_halt,
- .get_boot_time = maple_get_boot_time,
- .set_rtc_time = maple_set_rtc_time,
- .get_rtc_time = maple_get_rtc_time,
- .calibrate_decr = generic_calibrate_decr,
- .progress = maple_progress,
- .power_save = power4_idle,
-};
-
#ifdef CONFIG_EDAC
/*
* Register a platform device for CPC925 memory controller on
@@ -372,3 +355,20 @@ static int __init maple_cpc925_edac_setup(void)
}
machine_device_initcall(maple, maple_cpc925_edac_setup);
#endif
+
+define_machine(maple) {
+ .name = "Maple",
+ .probe = maple_probe,
+ .setup_arch = maple_setup_arch,
+ .init_IRQ = maple_init_IRQ,
+ .pci_irq_fixup = maple_pci_irq_fixup,
+ .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq,
+ .restart = maple_restart,
+ .halt = maple_halt,
+ .get_boot_time = maple_get_boot_time,
+ .set_rtc_time = maple_set_rtc_time,
+ .get_rtc_time = maple_get_rtc_time,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = maple_progress,
+ .power_save = power4_idle,
+};
diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
index 84d038ed3882..ce6597a29bc9 100644
--- a/arch/powerpc/platforms/powernv/memtrace.c
+++ b/arch/powerpc/platforms/powernv/memtrace.c
@@ -33,6 +33,7 @@ struct memtrace_entry {
char name[16];
};
+static DEFINE_MUTEX(memtrace_mutex);
static u64 memtrace_size;
static struct memtrace_entry *memtrace_array;
@@ -70,6 +71,23 @@ static int change_memblock_state(struct memory_block *mem, void *arg)
return 0;
}
+static void memtrace_clear_range(unsigned long start_pfn,
+ unsigned long nr_pages)
+{
+ unsigned long pfn;
+
+ /*
+ * As pages are offline, we cannot trust the memmap anymore. As HIGHMEM
+ * does not apply, avoid passing around "struct page" and use
+ * clear_page() instead directly.
+ */
+ for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
+ if (IS_ALIGNED(pfn, PAGES_PER_SECTION))
+ cond_resched();
+ clear_page(__va(PFN_PHYS(pfn)));
+ }
+}
+
/* called with device_hotplug_lock held */
static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages)
{
@@ -115,6 +133,11 @@ static u64 memtrace_alloc_node(u32 nid, u64 size)
for (base_pfn = end_pfn; base_pfn > start_pfn; base_pfn -= nr_pages) {
if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true) {
/*
+ * Clear the range while we still have a linear
+ * mapping.
+ */
+ memtrace_clear_range(base_pfn, nr_pages);
+ /*
* Remove memory in memory block size chunks so that
* iomem resources are always split to the same size and
* we never try to remove memory that spans two iomem
@@ -272,6 +295,7 @@ static int memtrace_online(void)
static int memtrace_enable_set(void *data, u64 val)
{
+ int rc = -EAGAIN;
u64 bytes;
/*
@@ -284,25 +308,31 @@ static int memtrace_enable_set(void *data, u64 val)
return -EINVAL;
}
+ mutex_lock(&memtrace_mutex);
+
/* Re-add/online previously removed/offlined memory */
if (memtrace_size) {
if (memtrace_online())
- return -EAGAIN;
+ goto out_unlock;
}
- if (!val)
- return 0;
+ if (!val) {
+ rc = 0;
+ goto out_unlock;
+ }
/* Offline and remove memory */
if (memtrace_init_regions_runtime(val))
- return -EINVAL;
+ goto out_unlock;
if (memtrace_init_debugfs())
- return -EINVAL;
+ goto out_unlock;
memtrace_size = val;
-
- return 0;
+ rc = 0;
+out_unlock:
+ mutex_unlock(&memtrace_mutex);
+ return rc;
}
static int memtrace_enable_get(void *data, u64 *val)
diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c
index 198143833f00..1dc2122a3cf5 100644
--- a/arch/powerpc/platforms/powernv/opal-dump.c
+++ b/arch/powerpc/platforms/powernv/opal-dump.c
@@ -322,15 +322,14 @@ static ssize_t dump_attr_read(struct file *filep, struct kobject *kobj,
return count;
}
-static struct dump_obj *create_dump_obj(uint32_t id, size_t size,
- uint32_t type)
+static void create_dump_obj(uint32_t id, size_t size, uint32_t type)
{
struct dump_obj *dump;
int rc;
dump = kzalloc(sizeof(*dump), GFP_KERNEL);
if (!dump)
- return NULL;
+ return;
dump->kobj.kset = dump_kset;
@@ -350,21 +349,39 @@ static struct dump_obj *create_dump_obj(uint32_t id, size_t size,
rc = kobject_add(&dump->kobj, NULL, "0x%x-0x%x", type, id);
if (rc) {
kobject_put(&dump->kobj);
- return NULL;
+ return;
}
+ /*
+ * As soon as the sysfs file for this dump is created/activated there is
+ * a chance the opal_errd daemon (or any userspace) might read and
+ * acknowledge the dump before kobject_uevent() is called. If that
+ * happens then there is a potential race between
+ * dump_ack_store->kobject_put() and kobject_uevent() which leads to a
+ * use-after-free of a kernfs object resulting in a kernel crash.
+ *
+ * To avoid that, we need to take a reference on behalf of the bin file,
+ * so that our reference remains valid while we call kobject_uevent().
+ * We then drop our reference before exiting the function, leaving the
+ * bin file to drop the last reference (if it hasn't already).
+ */
+
+ /* Take a reference for the bin file */
+ kobject_get(&dump->kobj);
rc = sysfs_create_bin_file(&dump->kobj, &dump->dump_attr);
- if (rc) {
+ if (rc == 0) {
+ kobject_uevent(&dump->kobj, KOBJ_ADD);
+
+ pr_info("%s: New platform dump. ID = 0x%x Size %u\n",
+ __func__, dump->id, dump->size);
+ } else {
+ /* Drop reference count taken for bin file */
kobject_put(&dump->kobj);
- return NULL;
}
- pr_info("%s: New platform dump. ID = 0x%x Size %u\n",
- __func__, dump->id, dump->size);
-
- kobject_uevent(&dump->kobj, KOBJ_ADD);
-
- return dump;
+ /* Drop our reference */
+ kobject_put(&dump->kobj);
+ return;
}
static irqreturn_t process_dump(int irq, void *data)
diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c
index ba6e437abb4b..398a06631456 100644
--- a/arch/powerpc/platforms/powernv/opal-elog.c
+++ b/arch/powerpc/platforms/powernv/opal-elog.c
@@ -183,14 +183,14 @@ static ssize_t raw_attr_read(struct file *filep, struct kobject *kobj,
return count;
}
-static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type)
+static void create_elog_obj(uint64_t id, size_t size, uint64_t type)
{
struct elog_obj *elog;
int rc;
elog = kzalloc(sizeof(*elog), GFP_KERNEL);
if (!elog)
- return NULL;
+ return;
elog->kobj.kset = elog_kset;
@@ -223,18 +223,37 @@ static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type)
rc = kobject_add(&elog->kobj, NULL, "0x%llx", id);
if (rc) {
kobject_put(&elog->kobj);
- return NULL;
+ return;
}
+ /*
+ * As soon as the sysfs file for this elog is created/activated there is
+ * a chance the opal_errd daemon (or any userspace) might read and
+ * acknowledge the elog before kobject_uevent() is called. If that
+ * happens then there is a potential race between
+ * elog_ack_store->kobject_put() and kobject_uevent() which leads to a
+ * use-after-free of a kernfs object resulting in a kernel crash.
+ *
+ * To avoid that, we need to take a reference on behalf of the bin file,
+ * so that our reference remains valid while we call kobject_uevent().
+ * We then drop our reference before exiting the function, leaving the
+ * bin file to drop the last reference (if it hasn't already).
+ */
+
+ /* Take a reference for the bin file */
+ kobject_get(&elog->kobj);
rc = sysfs_create_bin_file(&elog->kobj, &elog->raw_attr);
- if (rc) {
+ if (rc == 0) {
+ kobject_uevent(&elog->kobj, KOBJ_ADD);
+ } else {
+ /* Drop the reference taken for the bin file */
kobject_put(&elog->kobj);
- return NULL;
}
- kobject_uevent(&elog->kobj, KOBJ_ADD);
+ /* Drop our reference */
+ kobject_put(&elog->kobj);
- return elog;
+ return;
}
static irqreturn_t elog_event(int irq, void *data)
diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
index 649fb268f446..5399682797d0 100644
--- a/arch/powerpc/platforms/powernv/opal-imc.c
+++ b/arch/powerpc/platforms/powernv/opal-imc.c
@@ -63,10 +63,6 @@ static void export_imc_mode_and_cmd(struct device_node *node,
imc_debugfs_parent = debugfs_create_dir("imc", powerpc_debugfs_root);
- /*
- * Return here, either because 'imc' directory already exists,
- * Or failed to create a new one.
- */
if (!imc_debugfs_parent)
return;
@@ -139,7 +135,6 @@ static int imc_get_mem_addr_nest(struct device_node *node,
}
pmu_ptr->imc_counter_mmaped = true;
- export_imc_mode_and_cmd(node, pmu_ptr);
kfree(base_addr_arr);
kfree(chipid_arr);
return 0;
@@ -155,7 +150,7 @@ error:
* and domain as the inputs.
* Allocates memory for the struct imc_pmu, sets up its domain, size and offsets
*/
-static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
+static struct imc_pmu *imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
{
int ret = 0;
struct imc_pmu *pmu_ptr;
@@ -163,27 +158,23 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
/* Return for unknown domain */
if (domain < 0)
- return -EINVAL;
+ return NULL;
/* memory for pmu */
pmu_ptr = kzalloc(sizeof(*pmu_ptr), GFP_KERNEL);
if (!pmu_ptr)
- return -ENOMEM;
+ return NULL;
/* Set the domain */
pmu_ptr->domain = domain;
ret = of_property_read_u32(parent, "size", &pmu_ptr->counter_mem_size);
- if (ret) {
- ret = -EINVAL;
+ if (ret)
goto free_pmu;
- }
if (!of_property_read_u32(parent, "offset", &offset)) {
- if (imc_get_mem_addr_nest(parent, pmu_ptr, offset)) {
- ret = -EINVAL;
+ if (imc_get_mem_addr_nest(parent, pmu_ptr, offset))
goto free_pmu;
- }
}
/* Function to register IMC pmu */
@@ -194,14 +185,14 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
if (pmu_ptr->domain == IMC_DOMAIN_NEST)
kfree(pmu_ptr->mem_info);
kfree(pmu_ptr);
- return ret;
+ return NULL;
}
- return 0;
+ return pmu_ptr;
free_pmu:
kfree(pmu_ptr);
- return ret;
+ return NULL;
}
static void disable_nest_pmu_counters(void)
@@ -258,6 +249,7 @@ int get_max_nest_dev(void)
static int opal_imc_counters_probe(struct platform_device *pdev)
{
struct device_node *imc_dev = pdev->dev.of_node;
+ struct imc_pmu *pmu;
int pmu_count = 0, domain;
bool core_imc_reg = false, thread_imc_reg = false;
u32 type;
@@ -273,6 +265,7 @@ static int opal_imc_counters_probe(struct platform_device *pdev)
}
for_each_compatible_node(imc_dev, NULL, IMC_DTB_UNIT_COMPAT) {
+ pmu = NULL;
if (of_property_read_u32(imc_dev, "type", &type)) {
pr_warn("IMC Device without type property\n");
continue;
@@ -294,9 +287,13 @@ static int opal_imc_counters_probe(struct platform_device *pdev)
break;
}
- if (!imc_pmu_create(imc_dev, pmu_count, domain)) {
- if (domain == IMC_DOMAIN_NEST)
+ pmu = imc_pmu_create(imc_dev, pmu_count, domain);
+ if (pmu != NULL) {
+ if (domain == IMC_DOMAIN_NEST) {
+ if (!imc_debugfs_parent)
+ export_imc_mode_and_cmd(imc_dev, pmu);
pmu_count++;
+ }
if (domain == IMC_DOMAIN_CORE)
core_imc_reg = true;
if (domain == IMC_DOMAIN_THREAD)
@@ -304,10 +301,6 @@ static int opal_imc_counters_probe(struct platform_device *pdev)
}
}
- /* If none of the nest units are registered, remove debugfs interface */
- if (pmu_count == 0)
- debugfs_remove_recursive(imc_debugfs_parent);
-
/* If core imc is not registered, unregister thread-imc */
if (!core_imc_reg && thread_imc_reg)
unregister_thread_imc();
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index adddde023622..5068dd7f6e74 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -125,12 +125,29 @@ static void pnv_setup_rfi_flush(void)
type = L1D_FLUSH_ORI;
}
+ /*
+ * If we are non-Power9 bare metal, we don't need to flush on kernel
+ * entry or after user access: they fix a P9 specific vulnerability.
+ */
+ if (!pvr_version_is(PVR_POWER9)) {
+ security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY);
+ security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS);
+ }
+
enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \
(security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || \
security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV));
setup_rfi_flush(type, enable);
setup_count_cache_flush();
+
+ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
+ security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY);
+ setup_entry_flush(enable);
+
+ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
+ security_ftr_enabled(SEC_FTR_L1D_FLUSH_UACCESS);
+ setup_uaccess_flush(enable);
}
static void __init pnv_setup_arch(void)
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 3d3c989e44dd..889c3dbec6fb 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -47,7 +47,7 @@
#include <asm/udbg.h>
#define DBG(fmt...) udbg_printf(fmt)
#else
-#define DBG(fmt...)
+#define DBG(fmt...) do { } while (0)
#endif
static void pnv_smp_setup_cpu(int cpu)
@@ -171,7 +171,6 @@ static void pnv_smp_cpu_kill_self(void)
/* Standard hot unplug procedure */
idle_task_exit();
- current->active_mm = NULL; /* for sanity */
cpu = smp_processor_id();
DBG("CPU%d offline\n", cpu);
generic_set_cpu_dead(cpu);
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
index 8c7009d001d9..894f62d77a77 100644
--- a/arch/powerpc/platforms/ps3/mm.c
+++ b/arch/powerpc/platforms/ps3/mm.c
@@ -212,13 +212,14 @@ void ps3_mm_vas_destroy(void)
{
int result;
- DBG("%s:%d: map.vas_id = %llu\n", __func__, __LINE__, map.vas_id);
-
if (map.vas_id) {
result = lv1_select_virtual_address_space(0);
- BUG_ON(result);
- result = lv1_destruct_virtual_address_space(map.vas_id);
- BUG_ON(result);
+ result += lv1_destruct_virtual_address_space(map.vas_id);
+
+ if (result) {
+ lv1_panic(0);
+ }
+
map.vas_id = 0;
}
}
@@ -316,19 +317,20 @@ static void ps3_mm_region_destroy(struct mem_region *r)
int result;
if (!r->destroy) {
- pr_info("%s:%d: Not destroying high region: %llxh %llxh\n",
- __func__, __LINE__, r->base, r->size);
return;
}
- DBG("%s:%d: r->base = %llxh\n", __func__, __LINE__, r->base);
-
if (r->base) {
result = lv1_release_memory(r->base);
- BUG_ON(result);
+
+ if (result) {
+ lv1_panic(0);
+ }
+
r->size = r->base = r->offset = 0;
map.total = map.rm.size;
}
+
ps3_mm_set_repository_highmem(NULL);
}
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index c5ffcadab730..90fd03b9d3c2 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -132,7 +132,6 @@ void dlpar_free_cc_nodes(struct device_node *dn)
#define NEXT_PROPERTY 3
#define PREV_PARENT 4
#define MORE_MEMORY 5
-#define CALL_AGAIN -2
#define ERR_CFG_USE -9003
struct device_node *dlpar_configure_connector(__be32 drc_index,
@@ -173,6 +172,9 @@ struct device_node *dlpar_configure_connector(__be32 drc_index,
spin_unlock(&rtas_data_buf_lock);
+ if (rtas_busy_delay(rc))
+ continue;
+
switch (rc) {
case COMPLETE:
break;
@@ -221,9 +223,6 @@ struct device_node *dlpar_configure_connector(__be32 drc_index,
last_dn = last_dn->parent;
break;
- case CALL_AGAIN:
- break;
-
case MORE_MEMORY:
case ERR_CFG_USE:
default:
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index 1d3f9313c02f..8bfb97d07d10 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -95,9 +95,6 @@ static void rtas_stop_self(void)
BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
- printk("cpu %u (hwid %u) Ready to die...\n",
- smp_processor_id(), hard_smp_processor_id());
-
rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL);
panic("Alas, I survived.\n");
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index fc01a2c0f8ed..afabe6918619 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -31,7 +31,7 @@ static bool rtas_hp_event;
unsigned long pseries_memory_block_size(void)
{
struct device_node *np;
- unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE;
+ u64 memblock_size = MIN_MEMORY_BLOCK_SIZE;
struct resource r;
np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
@@ -227,7 +227,7 @@ static int get_lmb_range(u32 drc_index, int n_lmbs,
struct drmem_lmb **end_lmb)
{
struct drmem_lmb *lmb, *start, *end;
- struct drmem_lmb *last_lmb;
+ struct drmem_lmb *limit;
start = NULL;
for_each_drmem_lmb(lmb) {
@@ -240,10 +240,10 @@ static int get_lmb_range(u32 drc_index, int n_lmbs,
if (!start)
return -EINVAL;
- end = &start[n_lmbs - 1];
+ end = &start[n_lmbs];
- last_lmb = &drmem_info->lmbs[drmem_info->n_lmbs - 1];
- if (end > last_lmb)
+ limit = &drmem_info->lmbs[drmem_info->n_lmbs];
+ if (end > limit)
return -EINVAL;
*start_lmb = start;
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 49e3a88b6a0c..d660a90616cd 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -1056,7 +1056,7 @@ static int __init vpa_debugfs_init(void)
{
char name[16];
long i;
- static struct dentry *vpa_dir;
+ struct dentry *vpa_dir;
if (!firmware_has_feature(FW_FEATURE_SPLPAR))
return 0;
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 561917fa54a8..afca4b737e80 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -66,6 +66,7 @@ EXPORT_SYMBOL_GPL(init_phb_dynamic);
int remove_phb_dynamic(struct pci_controller *phb)
{
struct pci_bus *b = phb->bus;
+ struct pci_host_bridge *host_bridge = to_pci_host_bridge(b->bridge);
struct resource *res;
int rc, i;
@@ -92,7 +93,8 @@ int remove_phb_dynamic(struct pci_controller *phb)
/* Remove the PCI bus and unregister the bridge device from sysfs */
phb->bus = NULL;
pci_remove_bus(b);
- device_unregister(b->bridge);
+ host_bridge->bus = NULL;
+ device_unregister(&host_bridge->dev);
/* Now release the IO resource */
if (res->flags & IORESOURCE_IO)
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 851ce326874a..e827108680f2 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -118,7 +118,6 @@ static void handle_system_shutdown(char event_modifier)
case EPOW_SHUTDOWN_ON_UPS:
pr_emerg("Loss of system power detected. System is running on"
" UPS/battery. Check RTAS error log for details\n");
- orderly_poweroff(true);
break;
case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS:
@@ -328,10 +327,11 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id)
/*
* Some versions of FWNMI place the buffer inside the 4kB page starting at
* 0x7000. Other versions place it inside the rtas buffer. We check both.
+ * Minimum size of the buffer is 16 bytes.
*/
#define VALID_FWNMI_BUFFER(A) \
- ((((A) >= 0x7000) && ((A) < 0x7ff0)) || \
- (((A) >= rtas.base) && ((A) < (rtas.base + rtas.size - 16))))
+ ((((A) >= 0x7000) && ((A) <= 0x8000 - 16)) || \
+ (((A) >= rtas.base) && ((A) <= (rtas.base + rtas.size - 16))))
static inline struct rtas_error_log *fwnmi_get_errlog(void)
{
diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c
index 31ca557af60b..262b8c5e1b9d 100644
--- a/arch/powerpc/platforms/pseries/rng.c
+++ b/arch/powerpc/platforms/pseries/rng.c
@@ -40,6 +40,7 @@ static __init int rng_init(void)
ppc_md.get_random_seed = pseries_get_random_long;
+ of_node_put(dn);
return 0;
}
machine_subsys_initcall(pseries, rng_init);
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index c2d318d1df02..2e0d38cafdd4 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -565,6 +565,14 @@ void pseries_setup_rfi_flush(void)
setup_rfi_flush(types, enable);
setup_count_cache_flush();
+
+ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
+ security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY);
+ setup_entry_flush(enable);
+
+ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
+ security_ftr_enabled(SEC_FTR_L1D_FLUSH_UACCESS);
+ setup_uaccess_flush(enable);
}
#ifdef CONFIG_PCI_IOV
diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
index 52a021e1f86b..5414d3295e0a 100644
--- a/arch/powerpc/platforms/pseries/suspend.c
+++ b/arch/powerpc/platforms/pseries/suspend.c
@@ -26,7 +26,6 @@
#include <asm/mmu.h>
#include <asm/rtas.h>
#include <asm/topology.h>
-#include "../../kernel/cacheinfo.h"
static u64 stream_id;
static struct device suspend_dev;
@@ -91,9 +90,7 @@ static void pseries_suspend_enable_irqs(void)
* Update configuration which can be modified based on device tree
* changes during resume.
*/
- cacheinfo_cpu_offline(smp_processor_id());
post_mobility_fixup();
- cacheinfo_cpu_online(smp_processor_id());
}
/**
@@ -223,7 +220,6 @@ static struct bus_type suspend_subsys = {
static const struct platform_suspend_ops pseries_suspend_ops = {
.valid = suspend_valid_only_mem,
- .begin = pseries_suspend_begin,
.prepare_late = pseries_prepare_late,
.enter = pseries_suspend_enter,
};
diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
index 280e964e1aa8..497e86cfb12e 100644
--- a/arch/powerpc/sysdev/mpic_msgr.c
+++ b/arch/powerpc/sysdev/mpic_msgr.c
@@ -196,7 +196,7 @@ static int mpic_msgr_probe(struct platform_device *dev)
/* IO map the message register block. */
of_address_to_resource(np, 0, &rsrc);
- msgr_block_addr = ioremap(rsrc.start, resource_size(&rsrc));
+ msgr_block_addr = devm_ioremap(&dev->dev, rsrc.start, resource_size(&rsrc));
if (!msgr_block_addr) {
dev_err(&dev->dev, "Failed to iomap MPIC message registers");
return -EFAULT;
diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
index bbc839a98c41..003deaabb568 100644
--- a/arch/powerpc/sysdev/xics/icp-hv.c
+++ b/arch/powerpc/sysdev/xics/icp-hv.c
@@ -179,6 +179,7 @@ int icp_hv_init(void)
icp_ops = &icp_hv_ops;
+ of_node_put(np);
return 0;
}
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index 3c939b9de488..2aa9f3de223c 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/msi.h>
+#include <linux/vmalloc.h>
#include <asm/prom.h>
#include <asm/io.h>
@@ -72,13 +73,6 @@ static u32 xive_ipi_irq;
/* Xive state for each CPU */
static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu);
-/*
- * A "disabled" interrupt should never fire, to catch problems
- * we set its logical number to this
- */
-#define XIVE_BAD_IRQ 0x7fffffff
-#define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1)
-
/* An invalid CPU target */
#define XIVE_INVALID_TARGET (-1)
@@ -940,12 +934,16 @@ EXPORT_SYMBOL_GPL(is_xive_irq);
void xive_cleanup_irq_data(struct xive_irq_data *xd)
{
if (xd->eoi_mmio) {
+ unmap_kernel_range((unsigned long)xd->eoi_mmio,
+ 1u << xd->esb_shift);
iounmap(xd->eoi_mmio);
if (xd->eoi_mmio == xd->trig_mmio)
xd->trig_mmio = NULL;
xd->eoi_mmio = NULL;
}
if (xd->trig_mmio) {
+ unmap_kernel_range((unsigned long)xd->trig_mmio,
+ 1u << xd->esb_shift);
iounmap(xd->trig_mmio);
xd->trig_mmio = NULL;
}
@@ -1074,7 +1072,7 @@ static int xive_setup_cpu_ipi(unsigned int cpu)
xc = per_cpu(xive_cpu, cpu);
/* Check if we are already setup */
- if (xc->hw_ipi != 0)
+ if (xc->hw_ipi != XIVE_BAD_IRQ)
return 0;
/* Grab an IPI from the backend, this will populate xc->hw_ipi */
@@ -1111,7 +1109,7 @@ static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
/* Disable the IPI and free the IRQ data */
/* Already cleaned up ? */
- if (xc->hw_ipi == 0)
+ if (xc->hw_ipi == XIVE_BAD_IRQ)
return;
/* Mask the IPI */
@@ -1267,6 +1265,7 @@ static int xive_prepare_cpu(unsigned int cpu)
if (np)
xc->chip_id = of_get_ibm_chip_id(np);
of_node_put(np);
+ xc->hw_ipi = XIVE_BAD_IRQ;
per_cpu(xive_cpu, cpu) = xc;
}
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
index 6d5b28022452..411f785cdfb5 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -22,6 +22,7 @@
#include <linux/delay.h>
#include <linux/cpumask.h>
#include <linux/mm.h>
+#include <linux/kmemleak.h>
#include <asm/prom.h>
#include <asm/io.h>
@@ -311,7 +312,7 @@ static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
s64 rc;
/* Free the IPI */
- if (!xc->hw_ipi)
+ if (xc->hw_ipi == XIVE_BAD_IRQ)
return;
for (;;) {
rc = opal_xive_free_irq(xc->hw_ipi);
@@ -319,7 +320,7 @@ static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
msleep(OPAL_BUSY_DELAY_MS);
continue;
}
- xc->hw_ipi = 0;
+ xc->hw_ipi = XIVE_BAD_IRQ;
break;
}
}
@@ -627,6 +628,7 @@ static bool xive_native_provision_pages(void)
pr_err("Failed to allocate provisioning page\n");
return false;
}
+ kmemleak_ignore(p);
opal_xive_donate_page(chip, __pa(p));
}
return true;
diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c
index e3ebf6469392..5566bbc86f4a 100644
--- a/arch/powerpc/sysdev/xive/spapr.c
+++ b/arch/powerpc/sysdev/xive/spapr.c
@@ -509,11 +509,11 @@ static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc)
static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc)
{
- if (!xc->hw_ipi)
+ if (xc->hw_ipi == XIVE_BAD_IRQ)
return;
xive_irq_bitmap_free(xc->hw_ipi);
- xc->hw_ipi = 0;
+ xc->hw_ipi = XIVE_BAD_IRQ;
}
#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/sysdev/xive/xive-internal.h b/arch/powerpc/sysdev/xive/xive-internal.h
index f34abed0c05f..48808dbb25dc 100644
--- a/arch/powerpc/sysdev/xive/xive-internal.h
+++ b/arch/powerpc/sysdev/xive/xive-internal.h
@@ -9,6 +9,13 @@
#ifndef __XIVE_INTERNAL_H
#define __XIVE_INTERNAL_H
+/*
+ * A "disabled" interrupt should never fire, to catch problems
+ * we set its logical number to this
+ */
+#define XIVE_BAD_IRQ 0x7fffffff
+#define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1)
+
/* Each CPU carry one of these with various per-CPU state */
struct xive_cpu {
#ifdef CONFIG_SMP
diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile
index 365e711bebab..ab193cd7dbf9 100644
--- a/arch/powerpc/xmon/Makefile
+++ b/arch/powerpc/xmon/Makefile
@@ -1,9 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for xmon
-# Avoid clang warnings around longjmp/setjmp declarations
-subdir-ccflags-y := -ffreestanding
-
subdir-ccflags-$(CONFIG_PPC_WERROR) += -Werror
GCOV_PROFILE := n
diff --git a/arch/powerpc/xmon/nonstdio.c b/arch/powerpc/xmon/nonstdio.c
index d00123421e00..eefe1b94e0aa 100644
--- a/arch/powerpc/xmon/nonstdio.c
+++ b/arch/powerpc/xmon/nonstdio.c
@@ -182,7 +182,7 @@ void xmon_printf(const char *format, ...)
if (n && rc == 0) {
/* No udbg hooks, fallback to printk() - dangerous */
- printk("%s", xmon_outbuf);
+ pr_cont("%s", xmon_outbuf);
}
}
diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h
index d4628e4b3a5e..f4c92c91aa04 100644
--- a/arch/riscv/include/asm/barrier.h
+++ b/arch/riscv/include/asm/barrier.h
@@ -69,8 +69,16 @@ do { \
* The AQ/RL pair provides a RCpc critical section, but there's not really any
* way we can take advantage of that here because the ordering is only enforced
* on that one lock. Thus, we're just doing a full fence.
+ *
+ * Since we allow writeX to be called from preemptive regions we need at least
+ * an "o" in the predecessor set to ensure device writes are visible before the
+ * task is marked as available for scheduling on a new hart. While I don't see
+ * any concrete reason we need a full IO fence, it seems safer to just upgrade
+ * this in order to avoid any IO crossing a scheduling boundary. In both
+ * instances the scheduler pairs this with an mb(), so nothing is necessary on
+ * the new hart.
*/
-#define smp_mb__after_spinlock() RISCV_FENCE(rw,rw)
+#define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw)
#include <asm-generic/barrier.h>
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index c12833f7b6bd..42978aac99d5 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -187,7 +187,7 @@
" bnez %1, 0b\n" \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" (__old), "rJ" (__new) \
+ : "rJ" ((long)__old), "rJ" (__new) \
: "memory"); \
break; \
case 8: \
@@ -232,7 +232,7 @@
RISCV_ACQUIRE_BARRIER \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" (__old), "rJ" (__new) \
+ : "rJ" ((long)__old), "rJ" (__new) \
: "memory"); \
break; \
case 8: \
@@ -278,7 +278,7 @@
" bnez %1, 0b\n" \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" (__old), "rJ" (__new) \
+ : "rJ" ((long)__old), "rJ" (__new) \
: "memory"); \
break; \
case 8: \
@@ -324,7 +324,7 @@
" fence rw, rw\n" \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" (__old), "rJ" (__new) \
+ : "rJ" ((long)__old), "rJ" (__new) \
: "memory"); \
break; \
case 8: \
diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
index c6dcc5291f97..693c3839a7df 100644
--- a/arch/riscv/include/asm/ftrace.h
+++ b/arch/riscv/include/asm/ftrace.h
@@ -10,9 +10,19 @@
#endif
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
+/*
+ * Clang prior to 13 had "mcount" instead of "_mcount":
+ * https://reviews.llvm.org/D98881
+ */
+#if defined(CONFIG_CC_IS_GCC) || CONFIG_CLANG_VERSION >= 130000
+#define MCOUNT_NAME _mcount
+#else
+#define MCOUNT_NAME mcount
+#endif
+
#define ARCH_SUPPORTS_FTRACE_OPS 1
#ifndef __ASSEMBLY__
-void _mcount(void);
+void MCOUNT_NAME(void);
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
return addr;
@@ -33,7 +43,7 @@ struct dyn_arch_ftrace {
* both auipc and jalr at the same time.
*/
-#define MCOUNT_ADDR ((unsigned long)_mcount)
+#define MCOUNT_ADDR ((unsigned long)MCOUNT_NAME)
#define JALR_SIGN_MASK (0x00000800)
#define JALR_OFFSET_MASK (0x00000fff)
#define AUIPC_OFFSET_MASK (0xfffff000)
@@ -63,4 +73,11 @@ do { \
* Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here.
*/
#define MCOUNT_INSN_SIZE 8
+
+#ifndef __ASSEMBLY__
+struct dyn_ftrace;
+int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
+#define ftrace_init_nop ftrace_init_nop
+#endif
+
#endif
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 06cfbb3aacbb..abc147aeff8b 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -115,7 +115,10 @@ extern unsigned long min_low_pfn;
#endif /* __ASSEMBLY__ */
-#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
+#define virt_addr_valid(vaddr) ({ \
+ unsigned long _addr = (unsigned long)vaddr; \
+ (unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr)); \
+})
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index f8fa1cd2dad9..131f58b26b8f 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -20,7 +20,11 @@
#include <linux/const.h>
/* thread information allocation */
+#ifdef CONFIG_64BIT
+#define THREAD_SIZE_ORDER (2)
+#else
#define THREAD_SIZE_ORDER (1)
+#endif
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#ifndef __ASSEMBLY__
diff --git a/arch/riscv/include/uapi/asm/auxvec.h b/arch/riscv/include/uapi/asm/auxvec.h
index 1376515547cd..ed7bf7c7add5 100644
--- a/arch/riscv/include/uapi/asm/auxvec.h
+++ b/arch/riscv/include/uapi/asm/auxvec.h
@@ -21,4 +21,7 @@
/* vDSO location */
#define AT_SYSINFO_EHDR 33
+/* entries in ARCH_DLINFO */
+#define AT_VECTOR_SIZE_ARCH 1
+
#endif /* _UAPI_ASM_RISCV_AUXVEC_H */
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index a03821b2656a..d9de22686e27 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -449,6 +449,7 @@ ENDPROC(__fstate_restore)
.section ".rodata"
+ .align LGREG
/* Exception vector table */
ENTRY(excp_vect_table)
RISCV_PTR do_trap_insn_misaligned
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index 6d39f64e4dce..fa8530f05ed4 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -88,6 +88,25 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
return __ftrace_modify_call(rec->ip, addr, false);
}
+
+/*
+ * This is called early on, and isn't wrapped by
+ * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
+ * text_mutex, which triggers a lockdep failure. SMP isn't running so we could
+ * just directly poke the text, but it's simpler to just take the lock
+ * ourselves.
+ */
+int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
+{
+ int out;
+
+ ftrace_arch_code_modify_prepare();
+ out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
+ ftrace_arch_code_modify_post_process();
+
+ return out;
+}
+
int ftrace_update_ftrace_func(ftrace_func_t func)
{
int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S
index 5721624886a1..fabddee90d1b 100644
--- a/arch/riscv/kernel/mcount.S
+++ b/arch/riscv/kernel/mcount.S
@@ -47,8 +47,8 @@
ENTRY(ftrace_stub)
#ifdef CONFIG_DYNAMIC_FTRACE
- .global _mcount
- .set _mcount, ftrace_stub
+ .global MCOUNT_NAME
+ .set MCOUNT_NAME, ftrace_stub
#endif
ret
ENDPROC(ftrace_stub)
@@ -79,7 +79,7 @@ EXPORT_SYMBOL(return_to_handler)
#endif
#ifndef CONFIG_DYNAMIC_FTRACE
-ENTRY(_mcount)
+ENTRY(MCOUNT_NAME)
la t4, ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
la t0, ftrace_graph_return
@@ -125,6 +125,6 @@ do_trace:
jalr t5
RESTORE_ABI_STATE
ret
-ENDPROC(_mcount)
+ENDPROC(MCOUNT_NAME)
#endif
-EXPORT_SYMBOL(_mcount)
+EXPORT_SYMBOL(MCOUNT_NAME)
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 9713d4e8c22b..6558617bd2ce 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -19,6 +19,7 @@
* to the Free Software Foundation, Inc.,
*/
+#include <linux/bootmem.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/memblock.h>
@@ -187,6 +188,7 @@ static void __init setup_bootmem(void)
set_max_mapnr(PFN_DOWN(mem_size));
max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
+ max_pfn = max_low_pfn;
#ifdef CONFIG_BLK_DEV_INITRD
setup_initrd();
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index a4b1d94371a0..74b2168d7298 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -75,7 +75,7 @@ static void notrace walk_stackframe(struct task_struct *task,
#else /* !CONFIG_FRAME_POINTER */
-static void notrace walk_stackframe(struct task_struct *task,
+void notrace walk_stackframe(struct task_struct *task,
struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
{
unsigned long sp, pc;
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
index fb03a4482ad6..db44da32701f 100644
--- a/arch/riscv/kernel/sys_riscv.c
+++ b/arch/riscv/kernel/sys_riscv.c
@@ -16,6 +16,7 @@
#include <linux/syscalls.h>
#include <asm/unistd.h>
#include <asm/cacheflush.h>
+#include <asm-generic/mman-common.h>
static long riscv_sys_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
@@ -24,6 +25,11 @@ static long riscv_sys_mmap(unsigned long addr, unsigned long len,
{
if (unlikely(offset & (~PAGE_MASK >> page_shift_offset)))
return -EINVAL;
+
+ if ((prot & PROT_WRITE) && (prot & PROT_EXEC))
+ if (unlikely(!(prot & PROT_READ)))
+ return -EINVAL;
+
return ksys_mmap_pgoff(addr, len, prot, flags, fd,
offset >> (PAGE_SHIFT - page_shift_offset));
}
diff --git a/arch/riscv/kernel/time.c b/arch/riscv/kernel/time.c
index 1911c8f6b8a6..15f4ab40e222 100644
--- a/arch/riscv/kernel/time.c
+++ b/arch/riscv/kernel/time.c
@@ -12,6 +12,7 @@
* GNU General Public License for more details.
*/
+#include <linux/of_clk.h>
#include <linux/clocksource.h>
#include <linux/delay.h>
#include <asm/sbi.h>
@@ -29,5 +30,7 @@ void __init time_init(void)
riscv_timebase = prop;
lpj_fine = riscv_timebase / HZ;
+
+ of_clk_init(NULL);
timer_probe();
}
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index 87f71a6cd3ef..1dd134fc0d84 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -30,15 +30,15 @@ $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
$(call if_changed,vdsold)
# We also create a special relocatable object that should mirror the symbol
-# table and layout of the linked DSO. With ld -R we can then refer to
-# these symbols in the kernel code rather than hand-coded addresses.
+# table and layout of the linked DSO. With ld --just-symbols we can then
+# refer to these symbols in the kernel code rather than hand-coded addresses.
SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
$(call cc-ldoption, -Wl$(comma)--hash-style=both)
$(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE
$(call if_changed,vdsold)
-LDFLAGS_vdso-syms.o := -r -R
+LDFLAGS_vdso-syms.o := -r --just-symbols
$(obj)/vdso-syms.o: $(obj)/vdso-dummy.o FORCE
$(call if_changed,ld)
diff --git a/arch/s390/crypto/arch_random.c b/arch/s390/crypto/arch_random.c
index dd95cdbd22ce..4cbb4b6d85a8 100644
--- a/arch/s390/crypto/arch_random.c
+++ b/arch/s390/crypto/arch_random.c
@@ -53,6 +53,10 @@ static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer);
bool s390_arch_random_generate(u8 *buf, unsigned int nbytes)
{
+ /* max hunk is ARCH_RNG_BUF_SIZE */
+ if (nbytes > ARCH_RNG_BUF_SIZE)
+ return false;
+
/* lock rng buffer */
if (!spin_trylock(&arch_rng_lock))
return false;
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index dad110e9f41b..be8fa30a6ea6 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -30,12 +30,12 @@
#define KVM_USER_MEM_SLOTS 32
/*
- * These seem to be used for allocating ->chip in the routing table,
- * which we don't use. 4096 is an out-of-thin-air value. If we need
- * to look at ->chip later on, we'll need to revisit this.
+ * These seem to be used for allocating ->chip in the routing table, which we
+ * don't use. 1 is as small as we can get to reduce the needed memory. If we
+ * need to look at ->chip later on, we'll need to revisit this.
*/
#define KVM_NR_IRQCHIPS 1
-#define KVM_IRQCHIP_NUM_PINS 4096
+#define KVM_IRQCHIP_NUM_PINS 1
#define KVM_HALT_POLL_NS_DEFAULT 80000
/* s390-specific vcpu->requests bit members */
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 0095ddb58ff6..50f6661ba566 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -29,7 +29,7 @@
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ old__, new__, prev__; \
pcp_op_T__ *ptr__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
prev__ = *ptr__; \
do { \
@@ -37,7 +37,7 @@
new__ = old__ op (val); \
prev__ = cmpxchg(ptr__, old__, new__); \
} while (prev__ != old__); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
new__; \
})
@@ -68,7 +68,7 @@
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ val__ = (val); \
pcp_op_T__ old__, *ptr__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
if (__builtin_constant_p(val__) && \
((szcast)val__ > -129) && ((szcast)val__ < 128)) { \
@@ -84,7 +84,7 @@
: [val__] "d" (val__) \
: "cc"); \
} \
- preempt_enable(); \
+ preempt_enable_notrace(); \
}
#define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int)
@@ -95,14 +95,14 @@
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ val__ = (val); \
pcp_op_T__ old__, *ptr__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
asm volatile( \
op " %[old__],%[val__],%[ptr__]\n" \
: [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
: [val__] "d" (val__) \
: "cc"); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
old__ + val__; \
})
@@ -114,14 +114,14 @@
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ val__ = (val); \
pcp_op_T__ old__, *ptr__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
asm volatile( \
op " %[old__],%[val__],%[ptr__]\n" \
: [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
: [val__] "d" (val__) \
: "cc"); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
}
#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lan")
@@ -136,10 +136,10 @@
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ ret__; \
pcp_op_T__ *ptr__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
ret__ = cmpxchg(ptr__, oval, nval); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
ret__; \
})
@@ -152,10 +152,10 @@
({ \
typeof(pcp) *ptr__; \
typeof(pcp) ret__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
ret__ = xchg(ptr__, nval); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
ret__; \
})
@@ -171,11 +171,11 @@
typeof(pcp1) *p1__; \
typeof(pcp2) *p2__; \
int ret__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
p1__ = raw_cpu_ptr(&(pcp1)); \
p2__ = raw_cpu_ptr(&(pcp2)); \
ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
ret__; \
})
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index 96f9a9151fde..2becba8008c4 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -38,7 +38,17 @@ static inline void syscall_rollback(struct task_struct *task,
static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs)
{
- return IS_ERR_VALUE(regs->gprs[2]) ? regs->gprs[2] : 0;
+ unsigned long error = regs->gprs[2];
+#ifdef CONFIG_COMPAT
+ if (test_tsk_thread_flag(task, TIF_31BIT)) {
+ /*
+ * Sign-extend the value so (int)-EFOO becomes (long)-EFOO
+ * and will match correctly in comparisons.
+ */
+ error = (long)(int)error;
+ }
+#endif
+ return IS_ERR_VALUE(error) ? error : 0;
}
static inline long syscall_get_return_value(struct task_struct *task,
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index 169d7604eb80..f3ba84fa9bd1 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -36,6 +36,7 @@ struct vdso_data {
__u32 tk_shift; /* Shift used for xtime_nsec 0x60 */
__u32 ts_dir; /* TOD steering direction 0x64 */
__u64 ts_end; /* TOD steering end 0x68 */
+ __u32 hrtimer_res; /* hrtimer resolution 0x70 */
};
struct vdso_per_cpu_data {
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 66e830f1c7bf..e9d09f6e81d2 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -75,6 +75,7 @@ int main(void)
OFFSET(__VDSO_TK_SHIFT, vdso_data, tk_shift);
OFFSET(__VDSO_TS_DIR, vdso_data, ts_dir);
OFFSET(__VDSO_TS_END, vdso_data, ts_end);
+ OFFSET(__VDSO_CLOCK_REALTIME_RES, vdso_data, hrtimer_res);
OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base);
OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time);
OFFSET(__VDSO_CPU_NR, vdso_per_cpu_data, cpu_nr);
@@ -86,7 +87,6 @@ int main(void)
DEFINE(__CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
DEFINE(__CLOCK_MONOTONIC_COARSE, CLOCK_MONOTONIC_COARSE);
DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID);
- DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
DEFINE(__CLOCK_COARSE_RES, LOW_RES_NSEC);
BLANK();
/* idle data offsets */
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index af013b4244d3..2da027359798 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -37,10 +37,12 @@ static int diag8_noresponse(int cmdlen)
static int diag8_response(int cmdlen, char *response, int *rlen)
{
+ unsigned long _cmdlen = cmdlen | 0x40000000L;
+ unsigned long _rlen = *rlen;
register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
register unsigned long reg3 asm ("3") = (addr_t) response;
- register unsigned long reg4 asm ("4") = cmdlen | 0x40000000L;
- register unsigned long reg5 asm ("5") = *rlen;
+ register unsigned long reg4 asm ("4") = _cmdlen;
+ register unsigned long reg5 asm ("5") = _rlen;
asm volatile(
" diag %2,%0,0x8\n"
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index d374f9b218b4..04bbf7e97fea 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -198,9 +198,10 @@ static debug_entry_t ***debug_areas_alloc(int pages_per_area, int nr_areas)
if (!areas)
goto fail_malloc_areas;
for (i = 0; i < nr_areas; i++) {
+ /* GFP_NOWARN to avoid user triggerable WARN, we handle fails */
areas[i] = kmalloc_array(pages_per_area,
sizeof(debug_entry_t *),
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_NOWARN);
if (!areas[i])
goto fail_malloc_areas2;
for (j = 0; j < pages_per_area; j++) {
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index 53a5316cc4b7..4c7cf8787a84 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -79,7 +79,7 @@ static int show_diag_stat(struct seq_file *m, void *v)
static void *show_diag_stat_start(struct seq_file *m, loff_t *pos)
{
- return *pos <= nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL;
+ return *pos <= NR_DIAG_STAT ? (void *)((unsigned long) *pos + 1) : NULL;
}
static void *show_diag_stat_next(struct seq_file *m, void *v, loff_t *pos)
@@ -128,7 +128,7 @@ void diag_stat_inc(enum diag_stat_enum nr)
}
EXPORT_SYMBOL(diag_stat_inc);
-void diag_stat_inc_norecursion(enum diag_stat_enum nr)
+void notrace diag_stat_inc_norecursion(enum diag_stat_enum nr)
{
this_cpu_inc(diag_stat.counter[nr]);
trace_s390_diagnose_norecursion(diag_map[nr].code);
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 41925f220694..01307db4b4dd 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -558,7 +558,7 @@ void show_code(struct pt_regs *regs)
void print_fn_code(unsigned char *code, unsigned long len)
{
- char buffer[64], *ptr;
+ char buffer[128], *ptr;
int opsize, i;
while (len) {
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index e7e6608b996c..ad88bed74395 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -155,6 +155,8 @@ static noinline __init void setup_lowcore_early(void)
psw_t psw;
psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA;
+ if (IS_ENABLED(CONFIG_KASAN))
+ psw.mask |= PSW_MASK_DAT;
psw.addr = (unsigned long) s390_base_ext_handler;
S390_lowcore.external_new_psw = psw;
psw.addr = (unsigned long) s390_base_pgm_handler;
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 150130c897c3..7e6a9cf863c7 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -949,6 +949,7 @@ ENTRY(ext_int_handler)
* Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
*/
ENTRY(psw_idle)
+ stg %r14,(__SF_GPRS+8*8)(%r15)
stg %r3,__SF_EMPTY(%r15)
larl %r1,.Lpsw_idle_lpsw+4
stg %r1,__SF_EMPTY+8(%r15)
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 83afd5b78e16..020f9aac7dc0 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -40,6 +40,7 @@ EXPORT_SYMBOL(_mcount)
ENTRY(ftrace_caller)
.globl ftrace_regs_caller
.set ftrace_regs_caller,ftrace_caller
+ stg %r14,(__SF_GPRS+8*8)(%r15) # save traced function caller
lgr %r1,%r15
#if !(defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT))
aghi %r0,MCOUNT_RETURN_FIXUP
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 5bfb1ce129f4..c8e1e325215b 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1377,8 +1377,8 @@ static int aux_output_begin(struct perf_output_handle *handle,
idx = aux->empty_mark + 1;
for (i = 0; i < range_scan; i++, idx++) {
te = aux_sdb_trailer(aux, idx);
- te->flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK;
- te->flags = te->flags & ~SDB_TE_ALERT_REQ_MASK;
+ te->flags &= ~(SDB_TE_BUFFER_FULL_MASK |
+ SDB_TE_ALERT_REQ_MASK);
te->overflow = 0;
}
/* Save the position of empty SDBs */
@@ -1425,8 +1425,7 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
te = aux_sdb_trailer(aux, alert_index);
do {
orig_flags = te->flags;
- orig_overflow = te->overflow;
- *overflow = orig_overflow;
+ *overflow = orig_overflow = te->overflow;
if (orig_flags & SDB_TE_BUFFER_FULL_MASK) {
/*
* SDB is already set by hardware.
@@ -1537,6 +1536,7 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
perf_aux_output_end(handle, size);
num_sdb = aux->sfb.num_sdb;
+ num_sdb = aux->sfb.num_sdb;
while (!done) {
/* Get an output handle */
aux = perf_aux_output_begin(handle, cpuhw->event);
@@ -1659,7 +1659,7 @@ static void *aux_buffer_setup(struct perf_event *event, void **pages,
}
/* Allocate aux_buffer struct for the event */
- aux = kmalloc(sizeof(struct aux_buffer), GFP_KERNEL);
+ aux = kzalloc(sizeof(struct aux_buffer), GFP_KERNEL);
if (!aux)
goto no_aux;
sfb = &aux->sfb;
@@ -2097,4 +2097,4 @@ out:
return err;
}
arch_initcall(init_cpum_sampling_pmu);
-core_param(cpum_sfb_size, CPUM_SF_MAX_SDB, sfb_size, 0640);
+core_param(cpum_sfb_size, CPUM_SF_MAX_SDB, sfb_size, 0644);
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 6fe2e1875058..675d4be0c2b7 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -157,8 +157,9 @@ static void show_cpu_mhz(struct seq_file *m, unsigned long n)
static int show_cpuinfo(struct seq_file *m, void *v)
{
unsigned long n = (unsigned long) v - 1;
+ unsigned long first = cpumask_first(cpu_online_mask);
- if (!n)
+ if (n == first)
show_cpu_summary(m, v);
if (!machine_has_cpu_mhz)
return 0;
@@ -171,6 +172,8 @@ static inline void *c_update(loff_t *pos)
{
if (*pos)
*pos = cpumask_next(*pos - 1, cpu_online_mask);
+ else
+ *pos = cpumask_first(cpu_online_mask);
return *pos < nr_cpu_ids ? (void *)*pos + 1 : NULL;
}
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index cd3df5514552..3ffa2847c110 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -325,6 +325,25 @@ static inline void __poke_user_per(struct task_struct *child,
child->thread.per_user.end = data;
}
+static void fixup_int_code(struct task_struct *child, addr_t data)
+{
+ struct pt_regs *regs = task_pt_regs(child);
+ int ilc = regs->int_code >> 16;
+ u16 insn;
+
+ if (ilc > 6)
+ return;
+
+ if (ptrace_access_vm(child, regs->psw.addr - (regs->int_code >> 16),
+ &insn, sizeof(insn), FOLL_FORCE) != sizeof(insn))
+ return;
+
+ /* double check that tracee stopped on svc instruction */
+ if ((insn >> 8) != 0xa)
+ return;
+
+ regs->int_code = 0x20000 | (data & 0xffff);
+}
/*
* Write a word to the user area of a process at location addr. This
* operation does have an additional problem compared to peek_user.
@@ -336,7 +355,9 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
struct user *dummy = NULL;
addr_t offset;
+
if (addr < (addr_t) &dummy->regs.acrs) {
+ struct pt_regs *regs = task_pt_regs(child);
/*
* psw and gprs are stored on the stack
*/
@@ -354,7 +375,11 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
/* Invalid addressing mode bits */
return -EINVAL;
}
- *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
+
+ if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
+ addr == offsetof(struct user, regs.gprs[2]))
+ fixup_int_code(child, data);
+ *(addr_t *)((addr_t) &regs->psw + addr) = data;
} else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
/*
@@ -720,6 +745,10 @@ static int __poke_user_compat(struct task_struct *child,
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
(__u64)(tmp & PSW32_ADDR_AMODE);
} else {
+
+ if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
+ addr == offsetof(struct compat_user, regs.gprs[2]))
+ fixup_int_code(child, data);
/* gpr 0-15 */
*(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
}
@@ -1257,7 +1286,6 @@ static bool is_ri_cb_valid(struct runtime_instr_cb *cb)
cb->pc == 1 &&
cb->qc == 0 &&
cb->reserved2 == 0 &&
- cb->key == PAGE_DEFAULT_KEY &&
cb->reserved3 == 0 &&
cb->reserved4 == 0 &&
cb->reserved5 == 0 &&
@@ -1321,7 +1349,11 @@ static int s390_runtime_instr_set(struct task_struct *target,
kfree(data);
return -EINVAL;
}
-
+ /*
+ * Override access key in any case, since user space should
+ * not be able to set it, nor should it care about it.
+ */
+ ri_cb.key = PAGE_DEFAULT_KEY >> 4;
preempt_disable();
if (!target->thread.ri_cb)
target->thread.ri_cb = data;
diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
index 125c7f6e8715..1788a5454b6f 100644
--- a/arch/s390/kernel/runtime_instr.c
+++ b/arch/s390/kernel/runtime_instr.c
@@ -57,7 +57,7 @@ static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
cb->k = 1;
cb->ps = 1;
cb->pc = 1;
- cb->key = PAGE_DEFAULT_KEY;
+ cb->key = PAGE_DEFAULT_KEY >> 4;
cb->v = 1;
}
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 5f85e0dfa66d..4bda9055daef 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -537,7 +537,7 @@ static struct notifier_block kdump_mem_nb = {
/*
* Make sure that the area behind memory_end is protected
*/
-static void reserve_memory_end(void)
+static void __init reserve_memory_end(void)
{
#ifdef CONFIG_CRASH_DUMP
if (ipl_info.type == IPL_TYPE_FCP_DUMP &&
@@ -555,7 +555,7 @@ static void reserve_memory_end(void)
/*
* Make sure that oldmem, where the dump is stored, is protected
*/
-static void reserve_oldmem(void)
+static void __init reserve_oldmem(void)
{
#ifdef CONFIG_CRASH_DUMP
if (OLDMEM_BASE)
@@ -567,7 +567,7 @@ static void reserve_oldmem(void)
/*
* Make sure that oldmem, where the dump is stored, is protected
*/
-static void remove_oldmem(void)
+static void __init remove_oldmem(void)
{
#ifdef CONFIG_CRASH_DUMP
if (OLDMEM_BASE)
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index ecd24711f3aa..bce678c7179c 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -393,7 +393,7 @@ int smp_find_processor_id(u16 address)
return -1;
}
-bool arch_vcpu_is_preempted(int cpu)
+bool notrace arch_vcpu_is_preempted(int cpu)
{
if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
return false;
@@ -403,7 +403,7 @@ bool arch_vcpu_is_preempted(int cpu)
}
EXPORT_SYMBOL(arch_vcpu_is_preempted);
-void smp_yield_cpu(int cpu)
+void notrace smp_yield_cpu(int cpu)
{
if (MACHINE_HAS_DIAG9C) {
diag_stat_inc_norecursion(DIAG_STAT_X09C);
@@ -751,7 +751,7 @@ static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
{
struct sclp_core_entry *core;
- cpumask_t avail;
+ static cpumask_t avail;
bool configured;
u16 core_id;
int nr, i;
@@ -831,7 +831,7 @@ void __init smp_detect_cpus(void)
*/
static void smp_start_secondary(void *cpuvoid)
{
- int cpu = smp_processor_id();
+ int cpu = raw_smp_processor_id();
S390_lowcore.last_update_clock = get_tod_clock();
S390_lowcore.restart_stack = (unsigned long) restart_stack;
@@ -844,6 +844,7 @@ static void smp_start_secondary(void *cpuvoid)
set_cpu_flag(CIF_ASCE_PRIMARY);
set_cpu_flag(CIF_ASCE_SECONDARY);
cpu_init();
+ rcu_cpu_starting(cpu);
preempt_disable();
init_cpu_timer();
vtime_init();
@@ -862,24 +863,12 @@ static void smp_start_secondary(void *cpuvoid)
/* Upping and downing of CPUs */
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
- struct pcpu *pcpu;
- int base, i, rc;
+ struct pcpu *pcpu = pcpu_devices + cpu;
+ int rc;
- pcpu = pcpu_devices + cpu;
if (pcpu->state != CPU_STATE_CONFIGURED)
return -EIO;
- base = smp_get_base_cpu(cpu);
- for (i = 0; i <= smp_cpu_mtid; i++) {
- if (base + i < nr_cpu_ids)
- if (cpu_online(base + i))
- break;
- }
- /*
- * If this is the first CPU of the core to get online
- * do an initial CPU reset.
- */
- if (i > smp_cpu_mtid &&
- pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) !=
+ if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) !=
SIGP_CC_ORDER_CODE_ACCEPTED)
return -EIO;
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index e8766beee5ad..11c32b228f51 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -310,6 +310,7 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->tk_mult = tk->tkr_mono.mult;
vdso_data->tk_shift = tk->tkr_mono.shift;
+ vdso_data->hrtimer_res = hrtimer_resolution;
smp_wmb();
++vdso_data->tb_update_count;
}
@@ -353,8 +354,9 @@ static DEFINE_PER_CPU(atomic_t, clock_sync_word);
static DEFINE_MUTEX(clock_sync_mutex);
static unsigned long clock_sync_flags;
-#define CLOCK_SYNC_HAS_STP 0
-#define CLOCK_SYNC_STP 1
+#define CLOCK_SYNC_HAS_STP 0
+#define CLOCK_SYNC_STP 1
+#define CLOCK_SYNC_STPINFO_VALID 2
/*
* The get_clock function for the physical clock. It will get the current
@@ -591,6 +593,22 @@ void stp_queue_work(void)
queue_work(time_sync_wq, &stp_work);
}
+static int __store_stpinfo(void)
+{
+ int rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
+
+ if (rc)
+ clear_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
+ else
+ set_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
+ return rc;
+}
+
+static int stpinfo_valid(void)
+{
+ return stp_online && test_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
+}
+
static int stp_sync_clock(void *data)
{
struct clock_sync_data *sync = data;
@@ -612,8 +630,7 @@ static int stp_sync_clock(void *data)
if (rc == 0) {
sync->clock_delta = clock_delta;
clock_sync_global(clock_delta);
- rc = chsc_sstpi(stp_page, &stp_info,
- sizeof(struct stp_sstpi));
+ rc = __store_stpinfo();
if (rc == 0 && stp_info.tmd != 2)
rc = -EAGAIN;
}
@@ -658,7 +675,7 @@ static void stp_work_fn(struct work_struct *work)
if (rc)
goto out_unlock;
- rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
+ rc = __store_stpinfo();
if (rc || stp_info.c == 0)
goto out_unlock;
@@ -695,10 +712,14 @@ static ssize_t stp_ctn_id_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- if (!stp_online)
- return -ENODATA;
- return sprintf(buf, "%016llx\n",
- *(unsigned long long *) stp_info.ctnid);
+ ssize_t ret = -ENODATA;
+
+ mutex_lock(&stp_work_mutex);
+ if (stpinfo_valid())
+ ret = sprintf(buf, "%016llx\n",
+ *(unsigned long long *) stp_info.ctnid);
+ mutex_unlock(&stp_work_mutex);
+ return ret;
}
static DEVICE_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL);
@@ -707,9 +728,13 @@ static ssize_t stp_ctn_type_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- if (!stp_online)
- return -ENODATA;
- return sprintf(buf, "%i\n", stp_info.ctn);
+ ssize_t ret = -ENODATA;
+
+ mutex_lock(&stp_work_mutex);
+ if (stpinfo_valid())
+ ret = sprintf(buf, "%i\n", stp_info.ctn);
+ mutex_unlock(&stp_work_mutex);
+ return ret;
}
static DEVICE_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL);
@@ -718,9 +743,13 @@ static ssize_t stp_dst_offset_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- if (!stp_online || !(stp_info.vbits & 0x2000))
- return -ENODATA;
- return sprintf(buf, "%i\n", (int)(s16) stp_info.dsto);
+ ssize_t ret = -ENODATA;
+
+ mutex_lock(&stp_work_mutex);
+ if (stpinfo_valid() && (stp_info.vbits & 0x2000))
+ ret = sprintf(buf, "%i\n", (int)(s16) stp_info.dsto);
+ mutex_unlock(&stp_work_mutex);
+ return ret;
}
static DEVICE_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL);
@@ -729,9 +758,13 @@ static ssize_t stp_leap_seconds_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- if (!stp_online || !(stp_info.vbits & 0x8000))
- return -ENODATA;
- return sprintf(buf, "%i\n", (int)(s16) stp_info.leaps);
+ ssize_t ret = -ENODATA;
+
+ mutex_lock(&stp_work_mutex);
+ if (stpinfo_valid() && (stp_info.vbits & 0x8000))
+ ret = sprintf(buf, "%i\n", (int)(s16) stp_info.leaps);
+ mutex_unlock(&stp_work_mutex);
+ return ret;
}
static DEVICE_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL);
@@ -740,9 +773,13 @@ static ssize_t stp_stratum_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- if (!stp_online)
- return -ENODATA;
- return sprintf(buf, "%i\n", (int)(s16) stp_info.stratum);
+ ssize_t ret = -ENODATA;
+
+ mutex_lock(&stp_work_mutex);
+ if (stpinfo_valid())
+ ret = sprintf(buf, "%i\n", (int)(s16) stp_info.stratum);
+ mutex_unlock(&stp_work_mutex);
+ return ret;
}
static DEVICE_ATTR(stratum, 0400, stp_stratum_show, NULL);
@@ -751,9 +788,13 @@ static ssize_t stp_time_offset_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- if (!stp_online || !(stp_info.vbits & 0x0800))
- return -ENODATA;
- return sprintf(buf, "%i\n", (int) stp_info.tto);
+ ssize_t ret = -ENODATA;
+
+ mutex_lock(&stp_work_mutex);
+ if (stpinfo_valid() && (stp_info.vbits & 0x0800))
+ ret = sprintf(buf, "%i\n", (int) stp_info.tto);
+ mutex_unlock(&stp_work_mutex);
+ return ret;
}
static DEVICE_ATTR(time_offset, 0400, stp_time_offset_show, NULL);
@@ -762,9 +803,13 @@ static ssize_t stp_time_zone_offset_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- if (!stp_online || !(stp_info.vbits & 0x4000))
- return -ENODATA;
- return sprintf(buf, "%i\n", (int)(s16) stp_info.tzo);
+ ssize_t ret = -ENODATA;
+
+ mutex_lock(&stp_work_mutex);
+ if (stpinfo_valid() && (stp_info.vbits & 0x4000))
+ ret = sprintf(buf, "%i\n", (int)(s16) stp_info.tzo);
+ mutex_unlock(&stp_work_mutex);
+ return ret;
}
static DEVICE_ATTR(time_zone_offset, 0400,
@@ -774,9 +819,13 @@ static ssize_t stp_timing_mode_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- if (!stp_online)
- return -ENODATA;
- return sprintf(buf, "%i\n", stp_info.tmd);
+ ssize_t ret = -ENODATA;
+
+ mutex_lock(&stp_work_mutex);
+ if (stpinfo_valid())
+ ret = sprintf(buf, "%i\n", stp_info.tmd);
+ mutex_unlock(&stp_work_mutex);
+ return ret;
}
static DEVICE_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL);
@@ -785,9 +834,13 @@ static ssize_t stp_timing_state_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- if (!stp_online)
- return -ENODATA;
- return sprintf(buf, "%i\n", stp_info.tst);
+ ssize_t ret = -ENODATA;
+
+ mutex_lock(&stp_work_mutex);
+ if (stpinfo_valid())
+ ret = sprintf(buf, "%i\n", stp_info.tst);
+ mutex_unlock(&stp_work_mutex);
+ return ret;
}
static DEVICE_ATTR(timing_state, 0400, stp_timing_state_show, NULL);
diff --git a/arch/s390/kernel/trace.c b/arch/s390/kernel/trace.c
index 490b52e85014..11a669f3cc93 100644
--- a/arch/s390/kernel/trace.c
+++ b/arch/s390/kernel/trace.c
@@ -14,7 +14,7 @@ EXPORT_TRACEPOINT_SYMBOL(s390_diagnose);
static DEFINE_PER_CPU(unsigned int, diagnose_trace_depth);
-void trace_s390_diagnose_norecursion(int diag_nr)
+void notrace trace_s390_diagnose_norecursion(int diag_nr)
{
unsigned long flags;
unsigned int *depth;
diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S
index 081435398e0a..0c79caa32b59 100644
--- a/arch/s390/kernel/vdso64/clock_getres.S
+++ b/arch/s390/kernel/vdso64/clock_getres.S
@@ -17,12 +17,14 @@
.type __kernel_clock_getres,@function
__kernel_clock_getres:
CFI_STARTPROC
- larl %r1,4f
+ larl %r1,3f
+ lg %r0,0(%r1)
cghi %r2,__CLOCK_REALTIME_COARSE
je 0f
cghi %r2,__CLOCK_MONOTONIC_COARSE
je 0f
- larl %r1,3f
+ larl %r1,_vdso_data
+ llgf %r0,__VDSO_CLOCK_REALTIME_RES(%r1)
cghi %r2,__CLOCK_REALTIME
je 0f
cghi %r2,__CLOCK_MONOTONIC
@@ -36,7 +38,6 @@ __kernel_clock_getres:
jz 2f
0: ltgr %r3,%r3
jz 1f /* res == NULL */
- lg %r0,0(%r1)
xc 0(8,%r3),0(%r3) /* set tp->tv_sec to zero */
stg %r0,8(%r3) /* store tp->tv_usec */
1: lghi %r2,0
@@ -45,6 +46,5 @@ __kernel_clock_getres:
svc 0
br %r14
CFI_ENDPROC
-3: .quad __CLOCK_REALTIME_RES
-4: .quad __CLOCK_COARSE_RES
+3: .quad __CLOCK_COARSE_RES
.size __kernel_clock_getres,.-__kernel_clock_getres
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index f4c51756c462..4c56de542960 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -18,17 +18,14 @@
/**
* kvm_s390_real_to_abs - convert guest real address to guest absolute address
- * @vcpu - guest virtual cpu
+ * @prefix - guest prefix
* @gra - guest real address
*
* Returns the guest absolute address that corresponds to the passed guest real
- * address @gra of a virtual guest cpu by applying its prefix.
+ * address @gra of by applying the given prefix.
*/
-static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
- unsigned long gra)
+static inline unsigned long _kvm_s390_real_to_abs(u32 prefix, unsigned long gra)
{
- unsigned long prefix = kvm_s390_get_prefix(vcpu);
-
if (gra < 2 * PAGE_SIZE)
gra += prefix;
else if (gra >= prefix && gra < prefix + 2 * PAGE_SIZE)
@@ -37,6 +34,43 @@ static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
}
/**
+ * kvm_s390_real_to_abs - convert guest real address to guest absolute address
+ * @vcpu - guest virtual cpu
+ * @gra - guest real address
+ *
+ * Returns the guest absolute address that corresponds to the passed guest real
+ * address @gra of a virtual guest cpu by applying its prefix.
+ */
+static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
+ unsigned long gra)
+{
+ return _kvm_s390_real_to_abs(kvm_s390_get_prefix(vcpu), gra);
+}
+
+/**
+ * _kvm_s390_logical_to_effective - convert guest logical to effective address
+ * @psw: psw of the guest
+ * @ga: guest logical address
+ *
+ * Convert a guest logical address to an effective address by applying the
+ * rules of the addressing mode defined by bits 31 and 32 of the given PSW
+ * (extendended/basic addressing mode).
+ *
+ * Depending on the addressing mode, the upper 40 bits (24 bit addressing
+ * mode), 33 bits (31 bit addressing mode) or no bits (64 bit addressing
+ * mode) of @ga will be zeroed and the remaining bits will be returned.
+ */
+static inline unsigned long _kvm_s390_logical_to_effective(psw_t *psw,
+ unsigned long ga)
+{
+ if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT)
+ return ga;
+ if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT)
+ return ga & ((1UL << 31) - 1);
+ return ga & ((1UL << 24) - 1);
+}
+
+/**
* kvm_s390_logical_to_effective - convert guest logical to effective address
* @vcpu: guest virtual cpu
* @ga: guest logical address
@@ -52,13 +86,7 @@ static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu,
unsigned long ga)
{
- psw_t *psw = &vcpu->arch.sie_block->gpsw;
-
- if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT)
- return ga;
- if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT)
- return ga & ((1UL << 31) - 1);
- return ga & ((1UL << 24) - 1);
+ return _kvm_s390_logical_to_effective(&vcpu->arch.sie_block->gpsw, ga);
}
/*
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 11c3cd906ab4..71c67a1d2849 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1666,6 +1666,9 @@ static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
start = slot + 1;
}
+ if (start >= slots->used_slots)
+ return slots->used_slots - 1;
+
if (gfn >= memslots[start].base_gfn &&
gfn < memslots[start].base_gfn + memslots[start].npages) {
atomic_set(&slots->lru_slot, start);
@@ -3621,16 +3624,16 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
if (MACHINE_HAS_GS) {
+ preempt_disable();
__ctl_set_bit(2, 4);
if (vcpu->arch.gs_enabled)
save_gs_cb(current->thread.gs_cb);
- preempt_disable();
current->thread.gs_cb = vcpu->arch.host_gscb;
restore_gs_cb(vcpu->arch.host_gscb);
- preempt_enable();
if (!vcpu->arch.host_gscb)
__ctl_clear_bit(2, 4);
vcpu->arch.host_gscb = NULL;
+ preempt_enable();
}
/* SIE will save etoken directly into SDNX and therefore kvm_run */
}
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index a2b28cd1e3fe..17d73b71df1d 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -1024,6 +1024,7 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
scb_s->iprcc = PGM_ADDRESSING;
scb_s->pgmilc = 4;
scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, 4);
+ rc = 1;
}
return rc;
}
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index c4f8039a35e8..0267405ab7c6 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -64,10 +64,13 @@ mm_segment_t enable_sacf_uaccess(void)
{
mm_segment_t old_fs;
unsigned long asce, cr;
+ unsigned long flags;
old_fs = current->thread.mm_segment;
if (old_fs & 1)
return old_fs;
+ /* protect against a concurrent page table upgrade */
+ local_irq_save(flags);
current->thread.mm_segment |= 1;
asce = S390_lowcore.kernel_asce;
if (likely(old_fs == USER_DS)) {
@@ -83,6 +86,7 @@ mm_segment_t enable_sacf_uaccess(void)
__ctl_load(asce, 7, 7);
set_cpu_flag(CIF_ASCE_SECONDARY);
}
+ local_irq_restore(flags);
return old_fs;
}
EXPORT_SYMBOL(enable_sacf_uaccess);
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 911c7ded35f1..7cde0f2f52e1 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -787,14 +787,18 @@ static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
static inline unsigned long *gmap_table_walk(struct gmap *gmap,
unsigned long gaddr, int level)
{
+ const int asce_type = gmap->asce & _ASCE_TYPE_MASK;
unsigned long *table;
if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4))
return NULL;
if (gmap_is_shadow(gmap) && gmap->removed)
return NULL;
- if (gaddr & (-1UL << (31 + ((gmap->asce & _ASCE_TYPE_MASK) >> 2)*11)))
+
+ if (asce_type != _ASCE_TYPE_REGION1 &&
+ gaddr & (-1UL << (31 + (asce_type >> 2) * 11)))
return NULL;
+
table = gmap->table;
switch (gmap->asce & _ASCE_TYPE_MASK) {
case _ASCE_TYPE_REGION1:
@@ -1834,6 +1838,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
goto out_free;
} else if (*table & _REGION_ENTRY_ORIGIN) {
rc = -EAGAIN; /* Race with shadow */
+ goto out_free;
}
crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
/* mark as invalid as long as the parent table is not protected */
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 5674710a4841..ff8234bca56c 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -117,7 +117,7 @@ static inline pte_t __rste_to_pte(unsigned long rste)
_PAGE_YOUNG);
#ifdef CONFIG_MEM_SOFT_DIRTY
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY,
- _PAGE_DIRTY);
+ _PAGE_SOFT_DIRTY);
#endif
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC,
_PAGE_NOEXEC);
@@ -159,10 +159,13 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
rste &= ~_SEGMENT_ENTRY_NOEXEC;
/* Set correct table type for 2G hugepages */
- if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
- rste |= _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE;
- else
+ if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
+ if (likely(pte_present(pte)))
+ rste |= _REGION3_ENTRY_LARGE;
+ rste |= _REGION_ENTRY_TYPE_R3;
+ } else if (likely(pte_present(pte)))
rste |= _SEGMENT_ENTRY_LARGE;
+
clear_huge_pte_skeys(mm, rste);
pte_val(*ptep) = rste;
}
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 814f26520aa2..f3bc9c9305da 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -72,8 +72,20 @@ static void __crst_table_upgrade(void *arg)
{
struct mm_struct *mm = arg;
- if (current->active_mm == mm)
- set_user_asce(mm);
+ /* we must change all active ASCEs to avoid the creation of new TLBs */
+ if (current->active_mm == mm) {
+ S390_lowcore.user_asce = mm->context.asce;
+ if (current->thread.mm_segment == USER_DS) {
+ __ctl_load(S390_lowcore.user_asce, 1, 1);
+ /* Mark user-ASCE present in CR1 */
+ clear_cpu_flag(CIF_ASCE_PRIMARY);
+ }
+ if (current->thread.mm_segment == USER_DS_SACF) {
+ __ctl_load(S390_lowcore.user_asce, 7, 7);
+ /* enable_sacf_uaccess does all or nothing */
+ WARN_ON(!test_cpu_flag(CIF_ASCE_SECONDARY));
+ }
+ }
__tlb_flush_local();
}
diff --git a/arch/s390/purgatory/head.S b/arch/s390/purgatory/head.S
index 2e3707b12edd..9b2d7a71fd1b 100644
--- a/arch/s390/purgatory/head.S
+++ b/arch/s390/purgatory/head.S
@@ -61,14 +61,15 @@
jh 10b
.endm
-.macro START_NEXT_KERNEL base
+.macro START_NEXT_KERNEL base subcode
lg %r4,kernel_entry-\base(%r13)
lg %r5,load_psw_mask-\base(%r13)
ogr %r4,%r5
stg %r4,0(%r0)
xgr %r0,%r0
- diag %r0,%r0,0x308
+ lghi %r1,\subcode
+ diag %r0,%r1,0x308
.endm
.text
@@ -123,7 +124,7 @@ ENTRY(purgatory_start)
je .start_crash_kernel
/* start normal kernel */
- START_NEXT_KERNEL .base_crash
+ START_NEXT_KERNEL .base_crash 0
.return_old_kernel:
lmg %r6,%r15,gprregs-.base_crash(%r13)
@@ -227,7 +228,7 @@ ENTRY(purgatory_start)
MEMCPY %r9,%r10,%r11
/* start crash kernel */
- START_NEXT_KERNEL .base_dst
+ START_NEXT_KERNEL .base_dst 1
load_psw_mask:
diff --git a/arch/sh/boards/mach-landisk/setup.c b/arch/sh/boards/mach-landisk/setup.c
index f1147caebacf..af69fb7fef7c 100644
--- a/arch/sh/boards/mach-landisk/setup.c
+++ b/arch/sh/boards/mach-landisk/setup.c
@@ -85,6 +85,9 @@ device_initcall(landisk_devices_setup);
static void __init landisk_setup(char **cmdline_p)
{
+ /* I/O port identity mapping */
+ __set_io_port_base(0);
+
/* LED ON */
__raw_writeb(__raw_readb(PA_LED) | 0x03, PA_LED);
diff --git a/arch/sh/configs/sh03_defconfig b/arch/sh/configs/sh03_defconfig
index 2156223405a1..ecc736504a04 100644
--- a/arch/sh/configs/sh03_defconfig
+++ b/arch/sh/configs/sh03_defconfig
@@ -47,7 +47,6 @@ CONFIG_BLK_DEV_IDETAPE=m
CONFIG_SCSI=m
CONFIG_BLK_DEV_SD=m
CONFIG_BLK_DEV_SR=m
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
diff --git a/arch/sh/drivers/dma/Kconfig b/arch/sh/drivers/dma/Kconfig
index d0de378beefe..7d54f284ce10 100644
--- a/arch/sh/drivers/dma/Kconfig
+++ b/arch/sh/drivers/dma/Kconfig
@@ -63,8 +63,7 @@ config PVR2_DMA
config G2_DMA
tristate "G2 Bus DMA support"
- depends on SH_DREAMCAST
- select SH_DMA_API
+ depends on SH_DREAMCAST && SH_DMA_API
help
This enables support for the DMA controller for the Dreamcast's
G2 bus. Drivers that want this will generally enable this on
diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h
index 32eb56e00c11..6e7816360a75 100644
--- a/arch/sh/include/asm/uaccess.h
+++ b/arch/sh/include/asm/uaccess.h
@@ -16,8 +16,11 @@
* sum := addr + size; carry? --> flag = true;
* if (sum >= addr_limit) flag = true;
*/
-#define __access_ok(addr, size) \
- (__addr_ok((addr) + (size)))
+#define __access_ok(addr, size) ({ \
+ unsigned long __ao_a = (addr), __ao_b = (size); \
+ unsigned long __ao_end = __ao_a + __ao_b - !!__ao_b; \
+ __ao_end >= __ao_a && __addr_ok(__ao_end); })
+
#define access_ok(type, addr, size) \
(__chk_user_ptr(addr), \
__access_ok((unsigned long __force)(addr), (size)))
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index 28cc61216b64..ed5b758c650d 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -203,7 +203,7 @@ syscall_trace_entry:
mov.l @(OFF_R7,r15), r7 ! arg3
mov.l @(OFF_R3,r15), r3 ! syscall_nr
!
- mov.l 2f, r10 ! Number of syscalls
+ mov.l 6f, r10 ! Number of syscalls
cmp/hs r10, r3
bf syscall_call
mov #-ENOSYS, r0
@@ -357,7 +357,7 @@ ENTRY(system_call)
tst r9, r8
bf syscall_trace_entry
!
- mov.l 2f, r8 ! Number of syscalls
+ mov.l 6f, r8 ! Number of syscalls
cmp/hs r8, r3
bt syscall_badsys
!
@@ -396,7 +396,7 @@ syscall_exit:
#if !defined(CONFIG_CPU_SH2)
1: .long TRA
#endif
-2: .long NR_syscalls
+6: .long NR_syscalls
3: .long sys_call_table
7: .long do_syscall_trace_enter
8: .long do_syscall_trace_leave
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index e6f2a38d2e61..1f1a7583fa90 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -554,7 +554,7 @@ config COMPAT
bool
depends on SPARC64
default y
- select COMPAT_BINFMT_ELF
+ select COMPAT_BINFMT_ELF if BINFMT_ELF
select HAVE_UID16
select ARCH_WANT_OLD_COMPAT_IPC
select COMPAT_OLD_SIGACTION
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
index 4d4e1cc6402f..fc45ec682e7a 100644
--- a/arch/sparc/configs/sparc64_defconfig
+++ b/arch/sparc/configs/sparc64_defconfig
@@ -74,7 +74,6 @@ CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=m
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
diff --git a/arch/sparc/include/asm/mman.h b/arch/sparc/include/asm/mman.h
index f94532f25db1..274217e7ed70 100644
--- a/arch/sparc/include/asm/mman.h
+++ b/arch/sparc/include/asm/mman.h
@@ -57,35 +57,39 @@ static inline int sparc_validate_prot(unsigned long prot, unsigned long addr)
{
if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_ADI))
return 0;
- if (prot & PROT_ADI) {
- if (!adi_capable())
- return 0;
+ return 1;
+}
- if (addr) {
- struct vm_area_struct *vma;
+#define arch_validate_flags(vm_flags) arch_validate_flags(vm_flags)
+/* arch_validate_flags() - Ensure combination of flags is valid for a
+ * VMA.
+ */
+static inline bool arch_validate_flags(unsigned long vm_flags)
+{
+ /* If ADI is being enabled on this VMA, check for ADI
+ * capability on the platform and ensure VMA is suitable
+ * for ADI
+ */
+ if (vm_flags & VM_SPARC_ADI) {
+ if (!adi_capable())
+ return false;
- vma = find_vma(current->mm, addr);
- if (vma) {
- /* ADI can not be enabled on PFN
- * mapped pages
- */
- if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
- return 0;
+ /* ADI can not be enabled on PFN mapped pages */
+ if (vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
+ return false;
- /* Mergeable pages can become unmergeable
- * if ADI is enabled on them even if they
- * have identical data on them. This can be
- * because ADI enabled pages with identical
- * data may still not have identical ADI
- * tags on them. Disallow ADI on mergeable
- * pages.
- */
- if (vma->vm_flags & VM_MERGEABLE)
- return 0;
- }
- }
+ /* Mergeable pages can become unmergeable
+ * if ADI is enabled on them even if they
+ * have identical data on them. This can be
+ * because ADI enabled pages with identical
+ * data may still not have identical ADI
+ * tags on them. Disallow ADI on mergeable
+ * pages.
+ */
+ if (vm_flags & VM_MERGEABLE)
+ return false;
}
- return 1;
+ return true;
}
#endif /* CONFIG_SPARC64 */
diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c
index 16b50afe7b52..646dd58169ec 100644
--- a/arch/sparc/kernel/ptrace_32.c
+++ b/arch/sparc/kernel/ptrace_32.c
@@ -46,82 +46,79 @@ enum sparc_regset {
REGSET_FP,
};
+static int regwindow32_get(struct task_struct *target,
+ const struct pt_regs *regs,
+ u32 *uregs)
+{
+ unsigned long reg_window = regs->u_regs[UREG_I6];
+ int size = 16 * sizeof(u32);
+
+ if (target == current) {
+ if (copy_from_user(uregs, (void __user *)reg_window, size))
+ return -EFAULT;
+ } else {
+ if (access_process_vm(target, reg_window, uregs, size,
+ FOLL_FORCE) != size)
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int regwindow32_set(struct task_struct *target,
+ const struct pt_regs *regs,
+ u32 *uregs)
+{
+ unsigned long reg_window = regs->u_regs[UREG_I6];
+ int size = 16 * sizeof(u32);
+
+ if (target == current) {
+ if (copy_to_user((void __user *)reg_window, uregs, size))
+ return -EFAULT;
+ } else {
+ if (access_process_vm(target, reg_window, uregs, size,
+ FOLL_FORCE | FOLL_WRITE) != size)
+ return -EFAULT;
+ }
+ return 0;
+}
+
static int genregs32_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
const struct pt_regs *regs = target->thread.kregs;
- unsigned long __user *reg_window;
- unsigned long *k = kbuf;
- unsigned long __user *u = ubuf;
- unsigned long reg;
+ u32 uregs[16];
+ int ret;
if (target == current)
flush_user_windows();
- pos /= sizeof(reg);
- count /= sizeof(reg);
-
- if (kbuf) {
- for (; count > 0 && pos < 16; count--)
- *k++ = regs->u_regs[pos++];
-
- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
- reg_window -= 16;
- for (; count > 0 && pos < 32; count--) {
- if (get_user(*k++, &reg_window[pos++]))
- return -EFAULT;
- }
- } else {
- for (; count > 0 && pos < 16; count--) {
- if (put_user(regs->u_regs[pos++], u++))
- return -EFAULT;
- }
-
- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
- reg_window -= 16;
- for (; count > 0 && pos < 32; count--) {
- if (get_user(reg, &reg_window[pos++]) ||
- put_user(reg, u++))
- return -EFAULT;
- }
- }
- while (count > 0) {
- switch (pos) {
- case 32: /* PSR */
- reg = regs->psr;
- break;
- case 33: /* PC */
- reg = regs->pc;
- break;
- case 34: /* NPC */
- reg = regs->npc;
- break;
- case 35: /* Y */
- reg = regs->y;
- break;
- case 36: /* WIM */
- case 37: /* TBR */
- reg = 0;
- break;
- default:
- goto finish;
- }
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ regs->u_regs,
+ 0, 16 * sizeof(u32));
+ if (ret || !count)
+ return ret;
- if (kbuf)
- *k++ = reg;
- else if (put_user(reg, u++))
+ if (pos < 32 * sizeof(u32)) {
+ if (regwindow32_get(target, regs, uregs))
return -EFAULT;
- pos++;
- count--;
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ uregs,
+ 16 * sizeof(u32), 32 * sizeof(u32));
+ if (ret || !count)
+ return ret;
}
-finish:
- pos *= sizeof(reg);
- count *= sizeof(reg);
- return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
- 38 * sizeof(reg), -1);
+ uregs[0] = regs->psr;
+ uregs[1] = regs->pc;
+ uregs[2] = regs->npc;
+ uregs[3] = regs->y;
+ uregs[4] = 0; /* WIM */
+ uregs[5] = 0; /* TBR */
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ uregs,
+ 32 * sizeof(u32), 38 * sizeof(u32));
}
static int genregs32_set(struct task_struct *target,
@@ -130,82 +127,58 @@ static int genregs32_set(struct task_struct *target,
const void *kbuf, const void __user *ubuf)
{
struct pt_regs *regs = target->thread.kregs;
- unsigned long __user *reg_window;
- const unsigned long *k = kbuf;
- const unsigned long __user *u = ubuf;
- unsigned long reg;
+ u32 uregs[16];
+ u32 psr;
+ int ret;
if (target == current)
flush_user_windows();
- pos /= sizeof(reg);
- count /= sizeof(reg);
-
- if (kbuf) {
- for (; count > 0 && pos < 16; count--)
- regs->u_regs[pos++] = *k++;
-
- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
- reg_window -= 16;
- for (; count > 0 && pos < 32; count--) {
- if (put_user(*k++, &reg_window[pos++]))
- return -EFAULT;
- }
- } else {
- for (; count > 0 && pos < 16; count--) {
- if (get_user(reg, u++))
- return -EFAULT;
- regs->u_regs[pos++] = reg;
- }
-
- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
- reg_window -= 16;
- for (; count > 0 && pos < 32; count--) {
- if (get_user(reg, u++) ||
- put_user(reg, &reg_window[pos++]))
- return -EFAULT;
- }
- }
- while (count > 0) {
- unsigned long psr;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ regs->u_regs,
+ 0, 16 * sizeof(u32));
+ if (ret || !count)
+ return ret;
- if (kbuf)
- reg = *k++;
- else if (get_user(reg, u++))
+ if (pos < 32 * sizeof(u32)) {
+ if (regwindow32_get(target, regs, uregs))
return -EFAULT;
-
- switch (pos) {
- case 32: /* PSR */
- psr = regs->psr;
- psr &= ~(PSR_ICC | PSR_SYSCALL);
- psr |= (reg & (PSR_ICC | PSR_SYSCALL));
- regs->psr = psr;
- break;
- case 33: /* PC */
- regs->pc = reg;
- break;
- case 34: /* NPC */
- regs->npc = reg;
- break;
- case 35: /* Y */
- regs->y = reg;
- break;
- case 36: /* WIM */
- case 37: /* TBR */
- break;
- default:
- goto finish;
- }
-
- pos++;
- count--;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ uregs,
+ 16 * sizeof(u32), 32 * sizeof(u32));
+ if (ret)
+ return ret;
+ if (regwindow32_set(target, regs, uregs))
+ return -EFAULT;
+ if (!count)
+ return 0;
}
-finish:
- pos *= sizeof(reg);
- count *= sizeof(reg);
-
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &psr,
+ 32 * sizeof(u32), 33 * sizeof(u32));
+ if (ret)
+ return ret;
+ regs->psr = (regs->psr & ~(PSR_ICC | PSR_SYSCALL)) |
+ (psr & (PSR_ICC | PSR_SYSCALL));
+ if (!count)
+ return 0;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->pc,
+ 33 * sizeof(u32), 34 * sizeof(u32));
+ if (ret || !count)
+ return ret;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->npc,
+ 34 * sizeof(u32), 35 * sizeof(u32));
+ if (ret || !count)
+ return ret;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->y,
+ 35 * sizeof(u32), 36 * sizeof(u32));
+ if (ret || !count)
+ return ret;
return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
- 38 * sizeof(reg), -1);
+ 36 * sizeof(u32), 38 * sizeof(u32));
}
static int fpregs32_get(struct task_struct *target,
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index e1d965e90e16..0c478c85e380 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -571,19 +571,13 @@ static int genregs32_get(struct task_struct *target,
for (; count > 0 && pos < 32; count--) {
if (access_process_vm(target,
(unsigned long)
- &reg_window[pos],
+ &reg_window[pos++],
&reg, sizeof(reg),
FOLL_FORCE)
!= sizeof(reg))
return -EFAULT;
- if (access_process_vm(target,
- (unsigned long) u,
- &reg, sizeof(reg),
- FOLL_FORCE | FOLL_WRITE)
- != sizeof(reg))
+ if (put_user(reg, u++))
return -EFAULT;
- pos++;
- u++;
}
}
}
@@ -683,12 +677,7 @@ static int genregs32_set(struct task_struct *target,
}
} else {
for (; count > 0 && pos < 32; count--) {
- if (access_process_vm(target,
- (unsigned long)
- u,
- &reg, sizeof(reg),
- FOLL_FORCE)
- != sizeof(reg))
+ if (get_user(reg, u++))
return -EFAULT;
if (access_process_vm(target,
(unsigned long)
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index d3ea1f3c06a0..a7d7b7ade42f 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1039,38 +1039,9 @@ void smp_fetch_global_pmu(void)
* are flush_tlb_*() routines, and these run after flush_cache_*()
* which performs the flushw.
*
- * The SMP TLB coherency scheme we use works as follows:
- *
- * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
- * space has (potentially) executed on, this is the heuristic
- * we use to avoid doing cross calls.
- *
- * Also, for flushing from kswapd and also for clones, we
- * use cpu_vm_mask as the list of cpus to make run the TLB.
- *
- * 2) TLB context numbers are shared globally across all processors
- * in the system, this allows us to play several games to avoid
- * cross calls.
- *
- * One invariant is that when a cpu switches to a process, and
- * that processes tsk->active_mm->cpu_vm_mask does not have the
- * current cpu's bit set, that tlb context is flushed locally.
- *
- * If the address space is non-shared (ie. mm->count == 1) we avoid
- * cross calls when we want to flush the currently running process's
- * tlb state. This is done by clearing all cpu bits except the current
- * processor's in current->mm->cpu_vm_mask and performing the
- * flush locally only. This will force any subsequent cpus which run
- * this task to flush the context from the local tlb if the process
- * migrates to another cpu (again).
- *
- * 3) For shared address spaces (threads) and swapping we bite the
- * bullet for most cases and perform the cross call (but only to
- * the cpus listed in cpu_vm_mask).
- *
- * The performance gain from "optimizing" away the cross call for threads is
- * questionable (in theory the big win for threads is the massive sharing of
- * address space state across processors).
+ * mm->cpu_vm_mask is a bit mask of which cpus an address
+ * space has (potentially) executed on, this is the heuristic
+ * we use to limit cross calls.
*/
/* This currently is only used by the hugetlb arch pre-fault
@@ -1080,18 +1051,13 @@ void smp_fetch_global_pmu(void)
void smp_flush_tlb_mm(struct mm_struct *mm)
{
u32 ctx = CTX_HWBITS(mm->context);
- int cpu = get_cpu();
- if (atomic_read(&mm->mm_users) == 1) {
- cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
- goto local_flush_and_out;
- }
+ get_cpu();
smp_cross_call_masked(&xcall_flush_tlb_mm,
ctx, 0, 0,
mm_cpumask(mm));
-local_flush_and_out:
__flush_tlb_mm(ctx, SECONDARY_CONTEXT);
put_cpu();
@@ -1114,17 +1080,15 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long
{
u32 ctx = CTX_HWBITS(mm->context);
struct tlb_pending_info info;
- int cpu = get_cpu();
+
+ get_cpu();
info.ctx = ctx;
info.nr = nr;
info.vaddrs = vaddrs;
- if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
- cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
- else
- smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
- &info, 1);
+ smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
+ &info, 1);
__flush_tlb_pending(ctx, nr, vaddrs);
@@ -1134,14 +1098,13 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long
void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
{
unsigned long context = CTX_HWBITS(mm->context);
- int cpu = get_cpu();
- if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
- cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
- else
- smp_cross_call_masked(&xcall_flush_tlb_page,
- context, vaddr, 0,
- mm_cpumask(mm));
+ get_cpu();
+
+ smp_cross_call_masked(&xcall_flush_tlb_page,
+ context, vaddr, 0,
+ mm_cpumask(mm));
+
__flush_tlb_page(context, vaddr);
put_cpu();
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index aa624ed79db1..86879c28910b 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -274,14 +274,13 @@ bool is_no_fault_exception(struct pt_regs *regs)
asi = (regs->tstate >> 24); /* saved %asi */
else
asi = (insn >> 5); /* immediate asi */
- if ((asi & 0xf2) == ASI_PNF) {
- if (insn & 0x1000000) { /* op3[5:4]=3 */
- handle_ldf_stq(insn, regs);
- return true;
- } else if (insn & 0x200000) { /* op3[2], stores */
+ if ((asi & 0xf6) == ASI_PNF) {
+ if (insn & 0x200000) /* op3[2], stores */
return false;
- }
- handle_ld_nf(insn, regs);
+ if (insn & 0x1000000) /* op3[5:4]=3 (fp) */
+ handle_ldf_stq(insn, regs);
+ else
+ handle_ld_nf(insn, regs);
return true;
}
}
diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S
index b89d42b29e34..f427f34b8b79 100644
--- a/arch/sparc/lib/memset.S
+++ b/arch/sparc/lib/memset.S
@@ -142,6 +142,7 @@ __bzero:
ZERO_LAST_BLOCKS(%o0, 0x48, %g2)
ZERO_LAST_BLOCKS(%o0, 0x08, %g2)
13:
+ EXT(12b, 13b, 21f)
be 8f
andcc %o1, 4, %g0
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index 92634d4e440c..89a9244f2cf0 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -199,6 +199,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
size = memblock_phys_mem_size() - memblock_reserved_size();
*pages_avail = (size >> PAGE_SHIFT) - high_pages;
+ /* Only allow low memory to be allocated via memblock allocation */
+ memblock_set_current_limit(max_low_pfn << PAGE_SHIFT);
+
return max_pfn;
}
diff --git a/arch/um/drivers/chan_user.c b/arch/um/drivers/chan_user.c
index 3fd7c3efdb18..9cffbbb15c56 100644
--- a/arch/um/drivers/chan_user.c
+++ b/arch/um/drivers/chan_user.c
@@ -26,10 +26,10 @@ int generic_read(int fd, char *c_out, void *unused)
n = read(fd, c_out, sizeof(*c_out));
if (n > 0)
return n;
- else if (errno == EAGAIN)
- return 0;
else if (n == 0)
return -EIO;
+ else if (errno == EAGAIN)
+ return 0;
return -errno;
}
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 83c470364dfb..788c80abff5d 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -891,7 +891,7 @@ static int ubd_disk_register(int major, u64 size, int unit,
disk->private_data = &ubd_devs[unit];
disk->queue = ubd_devs[unit].queue;
- device_add_disk(parent, disk);
+ device_add_disk(parent, disk, NULL);
*disk_out = disk;
return 0;
@@ -1574,7 +1574,9 @@ int io_thread(void *arg)
written = 0;
do {
- res = os_write_file(kernel_fd, ((char *) io_req_buffer) + written, n);
+ res = os_write_file(kernel_fd,
+ ((char *) io_req_buffer) + written,
+ n - written);
if (res >= 0) {
written += res;
} else {
diff --git a/arch/um/drivers/xterm.c b/arch/um/drivers/xterm.c
index 20e30be44795..e3b422ebce09 100644
--- a/arch/um/drivers/xterm.c
+++ b/arch/um/drivers/xterm.c
@@ -18,6 +18,7 @@
struct xterm_chan {
int pid;
int helper_pid;
+ int chan_fd;
char *title;
int device;
int raw;
@@ -33,6 +34,7 @@ static void *xterm_init(char *str, int device, const struct chan_opts *opts)
return NULL;
*data = ((struct xterm_chan) { .pid = -1,
.helper_pid = -1,
+ .chan_fd = -1,
.device = device,
.title = opts->xterm_title,
.raw = opts->raw } );
@@ -149,6 +151,7 @@ static int xterm_open(int input, int output, int primary, void *d,
goto out_kill;
}
+ data->chan_fd = fd;
new = xterm_fd(fd, &data->helper_pid);
if (new < 0) {
err = new;
@@ -206,6 +209,8 @@ static void xterm_close(int fd, void *d)
os_kill_process(data->helper_pid, 0);
data->helper_pid = -1;
+ if (data->chan_fd != -1)
+ os_close_file(data->chan_fd);
os_close_file(fd);
}
diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S
index 5568cf882371..899233625467 100644
--- a/arch/um/kernel/dyn.lds.S
+++ b/arch/um/kernel/dyn.lds.S
@@ -6,6 +6,12 @@ OUTPUT_ARCH(ELF_ARCH)
ENTRY(_start)
jiffies = jiffies_64;
+VERSION {
+ {
+ local: *;
+ };
+}
+
SECTIONS
{
PROVIDE (__executable_start = START);
diff --git a/arch/um/kernel/sigio.c b/arch/um/kernel/sigio.c
index b5e0cbb34382..476ded92affa 100644
--- a/arch/um/kernel/sigio.c
+++ b/arch/um/kernel/sigio.c
@@ -36,14 +36,14 @@ int write_sigio_irq(int fd)
}
/* These are called from os-Linux/sigio.c to protect its pollfds arrays. */
-static DEFINE_SPINLOCK(sigio_spinlock);
+static DEFINE_MUTEX(sigio_mutex);
void sigio_lock(void)
{
- spin_lock(&sigio_spinlock);
+ mutex_lock(&sigio_mutex);
}
void sigio_unlock(void)
{
- spin_unlock(&sigio_spinlock);
+ mutex_unlock(&sigio_mutex);
}
diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S
index 36b07ec09742..22ff701d9b71 100644
--- a/arch/um/kernel/uml.lds.S
+++ b/arch/um/kernel/uml.lds.S
@@ -7,6 +7,12 @@ OUTPUT_ARCH(ELF_ARCH)
ENTRY(_start)
jiffies = jiffies_64;
+VERSION {
+ {
+ local: *;
+ };
+}
+
SECTIONS
{
/* This must contain the right address - not quite the default ELF one.*/
diff --git a/arch/um/os-Linux/irq.c b/arch/um/os-Linux/irq.c
index 365823010346..90ef40462280 100644
--- a/arch/um/os-Linux/irq.c
+++ b/arch/um/os-Linux/irq.c
@@ -48,7 +48,7 @@ int os_epoll_triggered(int index, int events)
int os_event_mask(int irq_type)
{
if (irq_type == IRQ_READ)
- return EPOLLIN | EPOLLPRI;
+ return EPOLLIN | EPOLLPRI | EPOLLERR | EPOLLHUP | EPOLLRDHUP;
if (irq_type == IRQ_WRITE)
return EPOLLOUT;
return 0;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index af35f5caadbe..d994501d9179 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -533,6 +533,7 @@ config X86_UV
depends on X86_EXTENDED_PLATFORM
depends on NUMA
depends on EFI
+ depends on KEXEC_CORE
depends on X86_X2APIC
depends on PCI
---help---
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 4833dd7e2cc0..65a8722e784c 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -34,12 +34,13 @@ M16_CFLAGS := $(call cc-option, -m16, $(CODE16GCC_CFLAGS))
REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -DDISABLE_BRANCH_PROFILING \
-Wall -Wstrict-prototypes -march=i386 -mregparm=3 \
-fno-strict-aliasing -fomit-frame-pointer -fno-pic \
- -mno-mmx -mno-sse
+ -mno-mmx -mno-sse $(call cc-option,-fcf-protection=none)
REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding)
REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector)
REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
+REALMODE_CFLAGS += $(CLANG_FLAGS)
export REALMODE_CFLAGS
# BITS is used as extension for files which are available in a 32 bit
@@ -61,6 +62,9 @@ endif
KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow
KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
+# Intel CET isn't enabled in the kernel
+KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
+
ifeq ($(CONFIG_X86_32),y)
BITS := 32
UTS_MACHINE := i386
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index e2839b5c246c..6539c50fb9aa 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -87,7 +87,7 @@ $(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE
SETUP_OBJS = $(addprefix $(obj)/,$(setup-y))
-sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|_ehead\|_text\|z_.*\)$$/\#define ZO_\2 0x\1/p'
+sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [a-zA-Z] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|_ehead\|_text\|z_.*\)$$/\#define ZO_\2 0x\1/p'
quiet_cmd_zoffset = ZOFFSET $@
cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 466f66c8a7f8..5642f025b397 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -38,6 +38,8 @@ KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
KBUILD_CFLAGS += -Wno-pointer-sign
+# Disable relocation relaxation in case the link is not PIE.
+KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no)
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n
@@ -100,7 +102,7 @@ vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
quiet_cmd_check_data_rel = DATAREL $@
define cmd_check_data_rel
for obj in $(filter %.o,$^); do \
- ${CROSS_COMPILE}readelf -S $$obj | grep -qF .rel.local && { \
+ $(READELF) -S $$obj | grep -qF .rel.local && { \
echo "error: $$obj has data relocations!" >&2; \
exit 1; \
} || true; \
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index 37380c0d5999..c6c4b877f3d2 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -49,16 +49,17 @@
* Position Independent Executable (PIE) so that linker won't optimize
* R_386_GOT32X relocation to its fixed symbol address. Older
* linkers generate R_386_32 relocations against locally defined symbols,
- * _bss, _ebss, _got and _egot, in PIE. It isn't wrong, just less
+ * _bss, _ebss, _got, _egot and _end, in PIE. It isn't wrong, just less
* optimal than R_386_RELATIVE. But the x86 kernel fails to properly handle
* R_386_32 relocations when relocating the kernel. To generate
- * R_386_RELATIVE relocations, we mark _bss, _ebss, _got and _egot as
+ * R_386_RELATIVE relocations, we mark _bss, _ebss, _got, _egot and _end as
* hidden:
*/
.hidden _bss
.hidden _ebss
.hidden _got
.hidden _egot
+ .hidden _end
__HEAD
ENTRY(startup_32)
@@ -106,7 +107,7 @@ ENTRY(startup_32)
notl %eax
andl %eax, %ebx
cmpl $LOAD_PHYSICAL_ADDR, %ebx
- jge 1f
+ jae 1f
#endif
movl $LOAD_PHYSICAL_ADDR, %ebx
1:
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 4eaa724afce3..474733f8b330 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -42,6 +42,7 @@
.hidden _ebss
.hidden _got
.hidden _egot
+ .hidden _end
__HEAD
.code32
@@ -106,7 +107,7 @@ ENTRY(startup_32)
notl %eax
andl %eax, %ebx
cmpl $LOAD_PHYSICAL_ADDR, %ebx
- jge 1f
+ jae 1f
#endif
movl $LOAD_PHYSICAL_ADDR, %ebx
1:
@@ -297,7 +298,7 @@ ENTRY(startup_64)
notq %rax
andq %rax, %rbp
cmpq $LOAD_PHYSICAL_ADDR, %rbp
- jge 1f
+ jae 1f
#endif
movq $LOAD_PHYSICAL_ADDR, %rbp
1:
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 0eb9f92f3717..9218cb128661 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -149,7 +149,6 @@ CONFIG_CONNECTOR=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SPI_ATTRS=y
@@ -217,7 +216,6 @@ CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_TILEBLITTING=y
CONFIG_FB_EFI=y
# CONFIG_LCD_CLASS_DEVICE is not set
-CONFIG_VGACON_SOFT_SCROLLBACK=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
@@ -247,6 +245,7 @@ CONFIG_USB_HIDDEV=y
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_MON=y
+CONFIG_USB_XHCI_HCD=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_TT_NEWSCHED=y
CONFIG_USB_OHCI_HCD=y
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index e32fc1f274d8..146a12293396 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -148,7 +148,6 @@ CONFIG_CONNECTOR=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SPI_ATTRS=y
@@ -213,7 +212,6 @@ CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_TILEBLITTING=y
CONFIG_FB_EFI=y
# CONFIG_LCD_CLASS_DEVICE is not set
-CONFIG_VGACON_SOFT_SCROLLBACK=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
@@ -243,6 +241,7 @@ CONFIG_USB_HIDDEV=y
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_MON=y
+CONFIG_USB_XHCI_HCD=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_TT_NEWSCHED=y
CONFIG_USB_OHCI_HCD=y
diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
index 5f6a5af9c489..77043a82da51 100644
--- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
+++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
@@ -127,10 +127,6 @@ ddq_add_8:
/* generate a unique variable for ddq_add_x */
-.macro setddq n
- var_ddq_add = ddq_add_\n
-.endm
-
/* generate a unique variable for xmm register */
.macro setxdata n
var_xdata = %xmm\n
@@ -140,9 +136,7 @@ ddq_add_8:
.macro club name, id
.altmacro
- .if \name == DDQ_DATA
- setddq %\id
- .elseif \name == XDATA
+ .if \name == XDATA
setxdata %\id
.endif
.noaltmacro
@@ -165,9 +159,8 @@ ddq_add_8:
.set i, 1
.rept (by - 1)
- club DDQ_DATA, i
club XDATA, i
- vpaddq var_ddq_add(%rip), xcounter, var_xdata
+ vpaddq (ddq_add_1 + 16 * (i - 1))(%rip), xcounter, var_xdata
vptest ddq_low_msk(%rip), var_xdata
jnz 1f
vpaddq ddq_high_add_1(%rip), var_xdata, var_xdata
@@ -180,8 +173,7 @@ ddq_add_8:
vmovdqa 1*16(p_keys), xkeyA
vpxor xkey0, xdata0, xdata0
- club DDQ_DATA, by
- vpaddq var_ddq_add(%rip), xcounter, xcounter
+ vpaddq (ddq_add_1 + 16 * (by - 1))(%rip), xcounter, xcounter
vptest ddq_low_msk(%rip), xcounter
jnz 1f
vpaddq ddq_high_add_1(%rip), xcounter, xcounter
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index cb2deb61c5d9..29b27f9a6e1e 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -270,7 +270,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
PSHUFB_XMM %xmm2, %xmm0
movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv
- PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7
movdqu HashKey(%arg2), %xmm13
CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \
@@ -982,7 +982,7 @@ _initial_blocks_done\@:
* arg1, %arg3, %arg4 are used as pointers only, not modified
* %r11 is the data offset value
*/
-.macro GHASH_4_ENCRYPT_4_PARALLEL_ENC TMP1 TMP2 TMP3 TMP4 TMP5 \
+.macro GHASH_4_ENCRYPT_4_PARALLEL_enc TMP1 TMP2 TMP3 TMP4 TMP5 \
TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM1, \XMM5
@@ -1190,7 +1190,7 @@ aes_loop_par_enc_done\@:
* arg1, %arg3, %arg4 are used as pointers only, not modified
* %r11 is the data offset value
*/
-.macro GHASH_4_ENCRYPT_4_PARALLEL_DEC TMP1 TMP2 TMP3 TMP4 TMP5 \
+.macro GHASH_4_ENCRYPT_4_PARALLEL_dec TMP1 TMP2 TMP3 TMP4 TMP5 \
TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM1, \XMM5
diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
index d9b734d0c8cc..3c6e01520a97 100644
--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
@@ -170,7 +170,7 @@ continue_block:
## branch into array
lea jump_table(%rip), bufp
- movzxw (bufp, %rax, 2), len
+ movzwq (bufp, %rax, 2), len
lea crc_array(%rip), bufp
lea (bufp, len, 1), bufp
JMP_NOSPEC bufp
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 31fbb4a7d9f6..993dd06c8923 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -98,13 +98,6 @@ For 32-bit we have the following conventions - kernel is built with
#define SIZEOF_PTREGS 21*8
.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0
- /*
- * Push registers and sanitize registers of values that a
- * speculation attack might otherwise want to exploit. The
- * lower registers are likely clobbered well before they
- * could be put to use in a speculative execution gadget.
- * Interleave XOR with PUSH for better uop scheduling:
- */
.if \save_ret
pushq %rsi /* pt_regs->si */
movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */
@@ -114,34 +107,43 @@ For 32-bit we have the following conventions - kernel is built with
pushq %rsi /* pt_regs->si */
.endif
pushq \rdx /* pt_regs->dx */
- xorl %edx, %edx /* nospec dx */
pushq %rcx /* pt_regs->cx */
- xorl %ecx, %ecx /* nospec cx */
pushq \rax /* pt_regs->ax */
pushq %r8 /* pt_regs->r8 */
- xorl %r8d, %r8d /* nospec r8 */
pushq %r9 /* pt_regs->r9 */
- xorl %r9d, %r9d /* nospec r9 */
pushq %r10 /* pt_regs->r10 */
- xorl %r10d, %r10d /* nospec r10 */
pushq %r11 /* pt_regs->r11 */
- xorl %r11d, %r11d /* nospec r11*/
pushq %rbx /* pt_regs->rbx */
- xorl %ebx, %ebx /* nospec rbx*/
pushq %rbp /* pt_regs->rbp */
- xorl %ebp, %ebp /* nospec rbp*/
pushq %r12 /* pt_regs->r12 */
- xorl %r12d, %r12d /* nospec r12*/
pushq %r13 /* pt_regs->r13 */
- xorl %r13d, %r13d /* nospec r13*/
pushq %r14 /* pt_regs->r14 */
- xorl %r14d, %r14d /* nospec r14*/
pushq %r15 /* pt_regs->r15 */
- xorl %r15d, %r15d /* nospec r15*/
UNWIND_HINT_REGS
+
.if \save_ret
pushq %rsi /* return address on top of stack */
.endif
+
+ /*
+ * Sanitize registers of values that a speculation attack might
+ * otherwise want to exploit. The lower registers are likely clobbered
+ * well before they could be put to use in a speculative execution
+ * gadget.
+ */
+ xorl %edx, %edx /* nospec dx */
+ xorl %ecx, %ecx /* nospec cx */
+ xorl %r8d, %r8d /* nospec r8 */
+ xorl %r9d, %r9d /* nospec r9 */
+ xorl %r10d, %r10d /* nospec r10 */
+ xorl %r11d, %r11d /* nospec r11 */
+ xorl %ebx, %ebx /* nospec rbx */
+ xorl %ebp, %ebp /* nospec rbp */
+ xorl %r12d, %r12d /* nospec r12 */
+ xorl %r13d, %r13d /* nospec r13 */
+ xorl %r14d, %r14d /* nospec r14 */
+ xorl %r15d, %r15d /* nospec r15 */
+
.endm
.macro POP_REGS pop_rdi=1 skip_r11rcx=0
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index d07432062ee6..37d9016d4768 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -1489,6 +1489,7 @@ ENTRY(int3)
END(int3)
ENTRY(general_protection)
+ ASM_CLAC
pushl $do_general_protection
jmp common_exception
END(general_protection)
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index ccb5e3486aee..dfe26f3cfffc 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -312,7 +312,6 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
*/
syscall_return_via_sysret:
/* rcx and r11 are already restored (see code above) */
- UNWIND_HINT_EMPTY
POP_REGS pop_rdi=0 skip_r11rcx=1
/*
@@ -321,6 +320,7 @@ syscall_return_via_sysret:
*/
movq %rsp, %rdi
movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
+ UNWIND_HINT_EMPTY
pushq RSP-RDI(%rdi) /* RSP */
pushq (%rdi) /* RDI */
@@ -575,7 +575,7 @@ END(spurious_entries_start)
* +----------------------------------------------------+
*/
ENTRY(interrupt_entry)
- UNWIND_HINT_FUNC
+ UNWIND_HINT_IRET_REGS offset=16
ASM_CLAC
cld
@@ -607,9 +607,9 @@ ENTRY(interrupt_entry)
pushq 5*8(%rdi) /* regs->eflags */
pushq 4*8(%rdi) /* regs->cs */
pushq 3*8(%rdi) /* regs->ip */
+ UNWIND_HINT_IRET_REGS
pushq 2*8(%rdi) /* regs->orig_ax */
pushq 8(%rdi) /* return address */
- UNWIND_HINT_FUNC
movq (%rdi), %rdi
jmp 2f
@@ -700,6 +700,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
*/
movq %rsp, %rdi
movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
+ UNWIND_HINT_EMPTY
/* Copy the IRET frame to the trampoline stack. */
pushq 6*8(%rdi) /* SS */
@@ -1744,7 +1745,7 @@ ENTRY(rewind_stack_do_exit)
movq PER_CPU_VAR(cpu_current_top_of_stack), %rax
leaq -PTREGS_SIZE(%rax), %rsp
- UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE
+ UNWIND_HINT_REGS
call do_exit
END(rewind_stack_do_exit)
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 5b8b556dbb12..46caca4d9141 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -343,7 +343,7 @@ static void vgetcpu_cpu_init(void *arg)
#ifdef CONFIG_NUMA
node = cpu_to_node(cpu);
#endif
- if (static_cpu_has(X86_FEATURE_RDTSCP))
+ if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID))
write_rdtscp_aux((node << 12) | cpu);
/*
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index 07bf5517d9d8..2410bd4bb48f 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -89,6 +89,7 @@ struct perf_ibs {
u64 max_period;
unsigned long offset_mask[1];
int offset_max;
+ unsigned int fetch_count_reset_broken : 1;
struct cpu_perf_ibs __percpu *pcpu;
struct attribute **format_attrs;
@@ -346,11 +347,15 @@ static u64 get_ibs_op_count(u64 config)
{
u64 count = 0;
+ /*
+ * If the internal 27-bit counter rolled over, the count is MaxCnt
+ * and the lower 7 bits of CurCnt are randomized.
+ * Otherwise CurCnt has the full 27-bit current counter value.
+ */
if (config & IBS_OP_VAL)
- count += (config & IBS_OP_MAX_CNT) << 4; /* cnt rolled over */
-
- if (ibs_caps & IBS_CAPS_RDWROPCNT)
- count += (config & IBS_OP_CUR_CNT) >> 32;
+ count = (config & IBS_OP_MAX_CNT) << 4;
+ else if (ibs_caps & IBS_CAPS_RDWROPCNT)
+ count = (config & IBS_OP_CUR_CNT) >> 32;
return count;
}
@@ -375,7 +380,12 @@ perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event,
static inline void perf_ibs_enable_event(struct perf_ibs *perf_ibs,
struct hw_perf_event *hwc, u64 config)
{
- wrmsrl(hwc->config_base, hwc->config | config | perf_ibs->enable_mask);
+ u64 tmp = hwc->config | config;
+
+ if (perf_ibs->fetch_count_reset_broken)
+ wrmsrl(hwc->config_base, tmp & ~perf_ibs->enable_mask);
+
+ wrmsrl(hwc->config_base, tmp | perf_ibs->enable_mask);
}
/*
@@ -637,18 +647,24 @@ fail:
perf_ibs->offset_max,
offset + 1);
} while (offset < offset_max);
+ /*
+ * Read IbsBrTarget, IbsOpData4, and IbsExtdCtl separately
+ * depending on their availability.
+ * Can't add to offset_max as they are staggered
+ */
if (event->attr.sample_type & PERF_SAMPLE_RAW) {
- /*
- * Read IbsBrTarget and IbsOpData4 separately
- * depending on their availability.
- * Can't add to offset_max as they are staggered
- */
- if (ibs_caps & IBS_CAPS_BRNTRGT) {
- rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++);
- size++;
+ if (perf_ibs == &perf_ibs_op) {
+ if (ibs_caps & IBS_CAPS_BRNTRGT) {
+ rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++);
+ size++;
+ }
+ if (ibs_caps & IBS_CAPS_OPDATA4) {
+ rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++);
+ size++;
+ }
}
- if (ibs_caps & IBS_CAPS_OPDATA4) {
- rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++);
+ if (perf_ibs == &perf_ibs_fetch && (ibs_caps & IBS_CAPS_FETCHCTLEXTD)) {
+ rdmsrl(MSR_AMD64_ICIBSEXTDCTL, *buf++);
size++;
}
}
@@ -744,6 +760,13 @@ static __init void perf_event_ibs_init(void)
{
struct attribute **attr = ibs_op_format_attrs;
+ /*
+ * Some chips fail to reset the fetch count when it is written; instead
+ * they need a 0-1 transition of IbsFetchEn.
+ */
+ if (boot_cpu_data.x86 >= 0x16 && boot_cpu_data.x86 <= 0x18)
+ perf_ibs_fetch.fetch_count_reset_broken = 1;
+
perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
if (ibs_caps & IBS_CAPS_OPCNT) {
diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
index 3210fee27e7f..c08bcba5c3a9 100644
--- a/arch/x86/events/amd/iommu.c
+++ b/arch/x86/events/amd/iommu.c
@@ -84,12 +84,12 @@ static struct attribute_group amd_iommu_events_group = {
};
struct amd_iommu_event_desc {
- struct kobj_attribute attr;
+ struct device_attribute attr;
const char *event;
};
-static ssize_t _iommu_event_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+static ssize_t _iommu_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct amd_iommu_event_desc *event =
container_of(attr, struct amd_iommu_event_desc, attr);
@@ -387,7 +387,7 @@ static __init int _init_events_attrs(void)
while (amd_iommu_v2_event_descs[i].attr.attr.name)
i++;
- attrs = kcalloc(i + 1, sizeof(struct attribute **), GFP_KERNEL);
+ attrs = kcalloc(i + 1, sizeof(*attrs), GFP_KERNEL);
if (!attrs)
return -ENOMEM;
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 4a650eb3d94a..3b3cd12c0692 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -100,14 +100,14 @@
MODULE_LICENSE("GPL");
#define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \
-static ssize_t __cstate_##_var##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, \
+static ssize_t __cstate_##_var##_show(struct device *dev, \
+ struct device_attribute *attr, \
char *page) \
{ \
BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
return sprintf(page, _format "\n"); \
} \
-static struct kobj_attribute format_attr_##_var = \
+static struct device_attribute format_attr_##_var = \
__ATTR(_name, 0444, __cstate_##_var##_show, NULL)
static ssize_t cstate_get_attr_cpumask(struct device *dev,
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 79caeba8b6f0..b3279feff458 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1557,7 +1557,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
*/
if (!pebs_status && cpuc->pebs_enabled &&
!(cpuc->pebs_enabled & (cpuc->pebs_enabled-1)))
- pebs_status = cpuc->pebs_enabled;
+ pebs_status = p->status = cpuc->pebs_enabled;
bit = find_first_bit((unsigned long *)&pebs_status,
x86_pmu.max_pebs_events);
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 2413169ce362..bc348663da94 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -115,18 +115,6 @@ static const char *const rapl_domain_names[NR_RAPL_DOMAINS] __initconst = {
* any other bit is reserved
*/
#define RAPL_EVENT_MASK 0xFFULL
-
-#define DEFINE_RAPL_FORMAT_ATTR(_var, _name, _format) \
-static ssize_t __rapl_##_var##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, \
- char *page) \
-{ \
- BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
- return sprintf(page, _format "\n"); \
-} \
-static struct kobj_attribute format_attr_##_var = \
- __ATTR(_name, 0444, __rapl_##_var##_show, NULL)
-
#define RAPL_CNTR_WIDTH 32
#define RAPL_EVENT_ATTR_STR(_name, v, str) \
@@ -548,7 +536,7 @@ static struct attribute_group rapl_pmu_events_group = {
.attrs = NULL, /* patched at runtime */
};
-DEFINE_RAPL_FORMAT_ATTR(event, event, "config:0-7");
+PMU_FORMAT_ATTR(event, "config:0-7");
static struct attribute *rapl_formats_attr[] = {
&format_attr_event.attr,
NULL,
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 7098b9b05d56..2f4ed5aa08ba 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -90,8 +90,8 @@ end:
return map;
}
-ssize_t uncore_event_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+ssize_t uncore_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct uncore_event_desc *event =
container_of(attr, struct uncore_event_desc, attr);
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
index 40e040ec31b5..0fc86ac73b51 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -133,7 +133,7 @@ struct intel_uncore_box {
#define UNCORE_BOX_FLAG_CTL_OFFS8 1 /* event config registers are 8-byte apart */
struct uncore_event_desc {
- struct kobj_attribute attr;
+ struct device_attribute attr;
const char *config;
};
@@ -153,8 +153,8 @@ struct pci2phy_map {
struct pci2phy_map *__find_pci2phy_map(int segment);
-ssize_t uncore_event_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf);
+ssize_t uncore_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
#define INTEL_UNCORE_EVENT_DESC(_name, _config) \
{ \
@@ -163,14 +163,14 @@ ssize_t uncore_event_show(struct kobject *kobj,
}
#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
-static ssize_t __uncore_##_var##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, \
+static ssize_t __uncore_##_var##_show(struct device *dev, \
+ struct device_attribute *attr, \
char *page) \
{ \
BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
return sprintf(page, _format "\n"); \
} \
-static struct kobj_attribute format_attr_##_var = \
+static struct device_attribute format_attr_##_var = \
__ATTR(_name, 0444, __uncore_##_var##_show, NULL)
static inline bool uncore_pmc_fixed(int idx)
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 8e4e8e423839..c06074b847fa 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -1030,7 +1030,6 @@ enum {
SNBEP_PCI_QPI_PORT0_FILTER,
SNBEP_PCI_QPI_PORT1_FILTER,
BDX_PCI_QPI_PORT2_FILTER,
- HSWEP_PCI_PCU_3,
};
static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
@@ -2687,22 +2686,33 @@ static struct intel_uncore_type *hswep_msr_uncores[] = {
NULL,
};
-void hswep_uncore_cpu_init(void)
+#define HSWEP_PCU_DID 0x2fc0
+#define HSWEP_PCU_CAPID4_OFFET 0x94
+#define hswep_get_chop(_cap) (((_cap) >> 6) & 0x3)
+
+static bool hswep_has_limit_sbox(unsigned int device)
{
- int pkg = boot_cpu_data.logical_proc_id;
+ struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
+ u32 capid4;
+
+ if (!dev)
+ return false;
+
+ pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
+ if (!hswep_get_chop(capid4))
+ return true;
+ return false;
+}
+
+void hswep_uncore_cpu_init(void)
+{
if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
/* Detect 6-8 core systems with only two SBOXes */
- if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
- u32 capid4;
-
- pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3],
- 0x94, &capid4);
- if (((capid4 >> 6) & 0x3) == 0)
- hswep_uncore_sbox.num_boxes = 2;
- }
+ if (hswep_has_limit_sbox(HSWEP_PCU_DID))
+ hswep_uncore_sbox.num_boxes = 2;
uncore_msr_uncores = hswep_msr_uncores;
}
@@ -2965,11 +2975,6 @@ static const struct pci_device_id hswep_uncore_pci_ids[] = {
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
SNBEP_PCI_QPI_PORT1_FILTER),
},
- { /* PCU.3 (for Capability registers) */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
- .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
- HSWEP_PCI_PCU_3),
- },
{ /* end: all zeroes */ }
};
@@ -3061,27 +3066,18 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = {
EVENT_CONSTRAINT_END
};
+#define BDX_PCU_DID 0x6fc0
+
void bdx_uncore_cpu_init(void)
{
- int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id);
-
if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
uncore_msr_uncores = bdx_msr_uncores;
- /* BDX-DE doesn't have SBOX */
- if (boot_cpu_data.x86_model == 86) {
- uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
/* Detect systems with no SBOXes */
- } else if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
- struct pci_dev *pdev;
- u32 capid4;
-
- pdev = uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3];
- pci_read_config_dword(pdev, 0x94, &capid4);
- if (((capid4 >> 6) & 0x3) == 0)
- bdx_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
- }
+ if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID))
+ uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
+
hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
}
@@ -3302,11 +3298,6 @@ static const struct pci_device_id bdx_uncore_pci_ids[] = {
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
BDX_PCI_QPI_PORT2_FILTER),
},
- { /* PCU.3 (for Capability registers) */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0),
- .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
- HSWEP_PCI_PCU_3),
- },
{ /* end: all zeroes */ }
};
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index 8a9cff1f129d..1663ad84778b 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -30,6 +30,7 @@
#include <linux/clockchips.h>
#include <linux/hyperv.h>
#include <linux/slab.h>
+#include <linux/kernel.h>
#include <linux/cpuhotplug.h>
#ifdef CONFIG_HYPERV_TSCPAGE
@@ -427,11 +428,14 @@ void hyperv_cleanup(void)
}
EXPORT_SYMBOL_GPL(hyperv_cleanup);
-void hyperv_report_panic(struct pt_regs *regs, long err)
+void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die)
{
static bool panic_reported;
u64 guest_id;
+ if (in_die && !panic_on_oops)
+ return;
+
/*
* We prefer to report panic on 'die' chain as we have proper
* registers to report, but if we miss it (e.g. on BUG()) we need
diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
index 2f34d5275352..e666f7eaf32d 100644
--- a/arch/x86/hyperv/mmu.c
+++ b/arch/x86/hyperv/mmu.c
@@ -66,11 +66,17 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
if (!hv_hypercall_pg)
goto do_native;
- if (cpumask_empty(cpus))
- return;
-
local_irq_save(flags);
+ /*
+ * Only check the mask _after_ interrupt has been disabled to avoid the
+ * mask changing under our feet.
+ */
+ if (cpumask_empty(cpus)) {
+ local_irq_restore(flags);
+ return;
+ }
+
flush_pcpu = (struct hv_tlb_flush **)
this_cpu_ptr(hyperv_pcpu_input_arg);
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 3c1e51ead072..b5354e216b07 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -190,16 +190,6 @@ static inline void lapic_assign_legacy_vector(unsigned int i, bool r) { }
#endif /* !CONFIG_X86_LOCAL_APIC */
#ifdef CONFIG_X86_X2APIC
-/*
- * Make previous memory operations globally visible before
- * sending the IPI through x2apic wrmsr. We need a serializing instruction or
- * mfence for this.
- */
-static inline void x2apic_wrmsr_fence(void)
-{
- asm volatile("mfence" : : : "memory");
-}
-
static inline void native_apic_msr_write(u32 reg, u32 v)
{
if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR ||
@@ -252,6 +242,7 @@ static inline u64 native_x2apic_icr_read(void)
extern int x2apic_mode;
extern int x2apic_phys;
+extern void __init x2apic_set_max_apicid(u32 apicid);
extern void __init check_x2apic(void);
extern void x2apic_setup(void);
static inline int x2apic_enabled(void)
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 84f848c2541a..dd22cffc6b3f 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -85,4 +85,22 @@ do { \
#include <asm-generic/barrier.h>
+/*
+ * Make previous memory operations globally visible before
+ * a WRMSR.
+ *
+ * MFENCE makes writes visible, but only affects load/store
+ * instructions. WRMSR is unfortunately not a load/store
+ * instruction and is unaffected by MFENCE. The LFENCE ensures
+ * that the WRMSR is not reordered.
+ *
+ * Most WRMSRs are full serializing instructions themselves and
+ * do not require this barrier. This is only required for the
+ * IA32_TSC_DEADLINE and X2APIC MSRs.
+ */
+static inline void weak_wrmsr_fence(void)
+{
+ asm volatile("mfence; lfence" : : : "memory");
+}
+
#endif /* _ASM_X86_BARRIER_H */
diff --git a/arch/x86/include/asm/cpu_device_id.h b/arch/x86/include/asm/cpu_device_id.h
index baeba0567126..884466592943 100644
--- a/arch/x86/include/asm/cpu_device_id.h
+++ b/arch/x86/include/asm/cpu_device_id.h
@@ -9,6 +9,33 @@
#include <linux/mod_devicetable.h>
+#define X86_STEPPINGS(mins, maxs) GENMASK(maxs, mins)
+
+/**
+ * X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE - Base macro for CPU matching
+ * @_vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
+ * The name is expanded to X86_VENDOR_@_vendor
+ * @_family: The family number or X86_FAMILY_ANY
+ * @_model: The model number, model constant or X86_MODEL_ANY
+ * @_steppings: Bitmask for steppings, stepping constant or X86_STEPPING_ANY
+ * @_feature: A X86_FEATURE bit or X86_FEATURE_ANY
+ * @_data: Driver specific data or NULL. The internal storage
+ * format is unsigned long. The supplied value, pointer
+ * etc. is casted to unsigned long internally.
+ *
+ * Backport version to keep the SRBDS pile consistant. No shorter variants
+ * required for this.
+ */
+#define X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(_vendor, _family, _model, \
+ _steppings, _feature, _data) { \
+ .vendor = X86_VENDOR_##_vendor, \
+ .family = _family, \
+ .model = _model, \
+ .steppings = _steppings, \
+ .feature = _feature, \
+ .driver_data = (unsigned long) _data \
+}
+
extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
#endif
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 8c13b99b9507..f7f9604b10cc 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -291,6 +291,7 @@
#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */
#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */
#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_AMD_STIBP_ALWAYS_ON (13*32+17) /* "" Single Thread Indirect Branch Predictors always-on preferred */
#define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
@@ -347,6 +348,7 @@
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_SRBDS_CTRL (18*32+ 9) /* "" SRBDS mitigation MSR available */
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
@@ -391,5 +393,6 @@
#define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */
#define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */
#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
+#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/dma.h b/arch/x86/include/asm/dma.h
index 00f7cf45e699..8e95aa4b0d17 100644
--- a/arch/x86/include/asm/dma.h
+++ b/arch/x86/include/asm/dma.h
@@ -74,7 +74,7 @@
#define MAX_DMA_PFN ((16UL * 1024 * 1024) >> PAGE_SHIFT)
/* 4GB broken PCI/AGP hardware bus master zone */
-#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
+#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT))
#ifdef CONFIG_X86_32
/* The maximum address that we can perform a DMA transfer to on this platform */
diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
index c2c01f84df75..3e0e18d376d2 100644
--- a/arch/x86/include/asm/insn.h
+++ b/arch/x86/include/asm/insn.h
@@ -208,6 +208,21 @@ static inline int insn_offset_immediate(struct insn *insn)
return insn_offset_displacement(insn) + insn->displacement.nbytes;
}
+/**
+ * for_each_insn_prefix() -- Iterate prefixes in the instruction
+ * @insn: Pointer to struct insn.
+ * @idx: Index storage.
+ * @prefix: Prefix byte.
+ *
+ * Iterate prefix bytes of given @insn. Each prefix byte is stored in @prefix
+ * and the index is stored in @idx (note that this @idx is just for a cursor,
+ * do not change it.)
+ * Since prefixes.nbytes can be bigger than 4 if some prefixes
+ * are repeated, it cannot be used for looping over the prefixes.
+ */
+#define for_each_insn_prefix(insn, idx, prefix) \
+ for (idx = 0; idx < ARRAY_SIZE(insn->prefixes.bytes) && (prefix = insn->prefixes.bytes[idx]) != 0; idx++)
+
#define POP_SS_OPCODE 0x1f
#define MOV_SREG_OPCODE 0x8e
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 067288d4ef6e..98b74711e6b7 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -622,10 +622,10 @@ struct kvm_vcpu_arch {
bool pvclock_set_guest_stopped_request;
struct {
+ u8 preempted;
u64 msr_val;
u64 last_steal;
- struct gfn_to_hva_cache stime;
- struct kvm_steal_time steal;
+ struct gfn_to_pfn_cache cache;
} st;
u64 tsc_offset;
@@ -1070,7 +1070,7 @@ struct kvm_x86_ops {
bool (*xsaves_supported)(void);
bool (*umip_emulated)(void);
- int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
+ int (*check_nested_events)(struct kvm_vcpu *vcpu);
void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
@@ -1099,7 +1099,7 @@ struct kvm_x86_ops {
void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t offset, unsigned long mask);
- int (*write_log_dirty)(struct kvm_vcpu *vcpu);
+ int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
/* pmu operations of sub-arch */
const struct kvm_pmu_ops *pmu_ops;
@@ -1465,12 +1465,14 @@ asmlinkage void __noreturn kvm_spurious_fault(void);
____kvm_handle_fault_on_reboot(insn, "")
#define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
+ bool blockable);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
+int kvm_cpu_has_extint(struct kvm_vcpu *v);
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h
index 209492849566..5c524d4f71cd 100644
--- a/arch/x86/include/asm/microcode_amd.h
+++ b/arch/x86/include/asm/microcode_amd.h
@@ -41,7 +41,7 @@ struct microcode_amd {
unsigned int mpb[0];
};
-#define PATCH_MAX_SIZE PAGE_SIZE
+#define PATCH_MAX_SIZE (3 * PAGE_SIZE)
#ifdef CONFIG_MICROCODE_AMD
extern void __init load_ucode_amd_bsp(unsigned int family);
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index f37704497d8f..5b58a6cf487f 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -338,7 +338,7 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
void __init hyperv_init(void);
void hyperv_setup_mmu_ops(void);
-void hyperv_report_panic(struct pt_regs *regs, long err);
+void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die);
void hyperv_report_panic_msg(phys_addr_t pa, size_t size);
bool hv_is_hyperv_initialized(void);
void hyperv_cleanup(void);
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index d2c25a13e1ce..892af8ab95d7 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -110,6 +110,10 @@
#define TSX_CTRL_RTM_DISABLE BIT(0) /* Disable RTM feature */
#define TSX_CTRL_CPUID_CLEAR BIT(1) /* Disable TSX enumeration */
+/* SRBDS support */
+#define MSR_IA32_MCU_OPT_CTRL 0x00000123
+#define RNGDS_MITG_DIS BIT(0)
+
#define MSR_IA32_SYSENTER_CS 0x00000174
#define MSR_IA32_SYSENTER_ESP 0x00000175
#define MSR_IA32_SYSENTER_EIP 0x00000176
@@ -373,6 +377,7 @@
#define MSR_AMD64_IBSOP_REG_MASK ((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1)
#define MSR_AMD64_IBSCTL 0xc001103a
#define MSR_AMD64_IBSBRTARGET 0xc001103b
+#define MSR_AMD64_ICIBSEXTDCTL 0xc001103c
#define MSR_AMD64_IBSOPDATA4 0xc001103d
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
#define MSR_AMD64_SEV 0xc0010131
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 04addd6e0a4a..2571e2017a8b 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -88,7 +88,7 @@ static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
* think of extending them - you will be slapped with a stinking trout or a frozen
* shark will reach you, wherever you are! You've been warned.
*/
-static inline unsigned long long notrace __rdmsr(unsigned int msr)
+static __always_inline unsigned long long __rdmsr(unsigned int msr)
{
DECLARE_ARGS(val, low, high);
@@ -100,7 +100,7 @@ static inline unsigned long long notrace __rdmsr(unsigned int msr)
return EAX_EDX_VAL(val, low, high);
}
-static inline void notrace __wrmsr(unsigned int msr, u32 low, u32 high)
+static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high)
{
asm volatile("1: wrmsr\n"
"2:\n"
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 09c7466c4880..62f9903544b5 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -232,6 +232,7 @@ enum spectre_v2_mitigation {
enum spectre_v2_user_mitigation {
SPECTRE_V2_USER_NONE,
SPECTRE_V2_USER_STRICT,
+ SPECTRE_V2_USER_STRICT_PREFERRED,
SPECTRE_V2_USER_PRCTL,
SPECTRE_V2_USER_SECCOMP,
};
@@ -329,7 +330,7 @@ DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
* combination with microcode which triggers a CPU buffer flush when the
* instruction is executed.
*/
-static inline void mds_clear_cpu_buffers(void)
+static __always_inline void mds_clear_cpu_buffers(void)
{
static const u16 ds = __KERNEL_DS;
@@ -350,7 +351,7 @@ static inline void mds_clear_cpu_buffers(void)
*
* Clear CPU buffers if the corresponding static key is enabled
*/
-static inline void mds_user_clear_cpu_buffers(void)
+static __always_inline void mds_user_clear_cpu_buffers(void)
{
if (static_branch_likely(&mds_user_clear))
mds_clear_cpu_buffers();
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 690c0307afed..2a9c12ffb5cb 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -237,6 +237,7 @@ static inline int pmd_large(pmd_t pte)
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_large */
static inline int pmd_trans_huge(pmd_t pmd)
{
return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
@@ -608,12 +609,15 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
return __pmd(val);
}
-/* mprotect needs to preserve PAT bits when updating vm_page_prot */
+/*
+ * mprotect needs to preserve PAT and encryption bits when updating
+ * vm_page_prot
+ */
#define pgprot_modify pgprot_modify
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
{
pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
- pgprotval_t addbits = pgprot_val(newprot);
+ pgprotval_t addbits = pgprot_val(newprot) & ~_PAGE_CHG_MASK;
return __pgprot(preservebits | addbits);
}
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 106b7d0e2dae..02806d95ad6e 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -124,7 +124,7 @@
*/
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
_PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
- _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
+ _PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC)
#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
/*
@@ -148,6 +148,7 @@ enum page_cache_mode {
#endif
#define _PAGE_CACHE_MASK (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)
+#define _PAGE_LARGE_CACHE_MASK (_PAGE_PWT | _PAGE_PCD | _PAGE_PAT_LARGE)
#define _PAGE_NOCACHE (cachemode2protval(_PAGE_CACHE_MODE_UC))
#define _PAGE_CACHE_WP (cachemode2protval(_PAGE_CACHE_MODE_WP))
diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h
index 19b137f1b3be..2ff9b98812b7 100644
--- a/arch/x86/include/asm/pkeys.h
+++ b/arch/x86/include/asm/pkeys.h
@@ -4,6 +4,11 @@
#define ARCH_DEFAULT_PKEY 0
+/*
+ * If more than 16 keys are ever supported, a thorough audit
+ * will be necessary to ensure that the types that store key
+ * numbers and masks have sufficient capacity.
+ */
#define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1)
extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index efb44bd3a714..cc4bb218f1c6 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -336,7 +336,7 @@ struct x86_hw_tss {
#define INVALID_IO_BITMAP_OFFSET 0x8000
struct entry_stack {
- unsigned long words[64];
+ char stack[PAGE_SIZE];
};
struct entry_stack_page {
@@ -522,15 +522,6 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset,
}
/*
- * Thread-synchronous status.
- *
- * This is different from the flags in that nobody else
- * ever touches our thread-synchronous status, so we don't
- * have to worry about atomic accesses.
- */
-#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
-
-/*
* Set IOPL bits in EFLAGS from given mask
*/
static inline void native_set_iopl_mask(unsigned mask)
diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h
index 07a25753e85c..bc97b8c0fdf7 100644
--- a/arch/x86/include/asm/set_memory.h
+++ b/arch/x86/include/asm/set_memory.h
@@ -90,28 +90,35 @@ void set_kernel_text_rw(void);
void set_kernel_text_ro(void);
#ifdef CONFIG_X86_64
-static inline int set_mce_nospec(unsigned long pfn)
+/*
+ * Prevent speculative access to the page by either unmapping
+ * it (if we do not require access to any part of the page) or
+ * marking it uncacheable (if we want to try to retrieve data
+ * from non-poisoned lines in the page).
+ */
+static inline int set_mce_nospec(unsigned long pfn, bool unmap)
{
unsigned long decoy_addr;
int rc;
/*
- * Mark the linear address as UC to make sure we don't log more
- * errors because of speculative access to the page.
* We would like to just call:
- * set_memory_uc((unsigned long)pfn_to_kaddr(pfn), 1);
+ * set_memory_XX((unsigned long)pfn_to_kaddr(pfn), 1);
* but doing that would radically increase the odds of a
* speculative access to the poison page because we'd have
* the virtual address of the kernel 1:1 mapping sitting
* around in registers.
* Instead we get tricky. We create a non-canonical address
* that looks just like the one we want, but has bit 63 flipped.
- * This relies on set_memory_uc() properly sanitizing any __pa()
+ * This relies on set_memory_XX() properly sanitizing any __pa()
* results with __PHYSICAL_MASK or PTE_PFN_MASK.
*/
decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63));
- rc = set_memory_uc(decoy_addr, 1);
+ if (unmap)
+ rc = set_memory_np(decoy_addr, 1);
+ else
+ rc = set_memory_uc(decoy_addr, 1);
if (rc)
pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
return rc;
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index 8ec97a62c245..9c556ea2eaa7 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -55,8 +55,13 @@
/*
* Initialize the stackprotector canary value.
*
- * NOTE: this must only be called from functions that never return,
+ * NOTE: this must only be called from functions that never return
* and it must always be inlined.
+ *
+ * In addition, it should be called from a compilation unit for which
+ * stack protector is disabled. Alternatively, the caller should not end
+ * with a function call which gets tail-call optimized as that would
+ * lead to checking a modified canary value.
*/
static __always_inline void boot_init_stack_canary(void)
{
diff --git a/arch/x86/include/asm/sync_core.h b/arch/x86/include/asm/sync_core.h
index c67caafd3381..43b5e02a7b4b 100644
--- a/arch/x86/include/asm/sync_core.h
+++ b/arch/x86/include/asm/sync_core.h
@@ -16,12 +16,13 @@ static inline void sync_core_before_usermode(void)
/* With PTI, we unconditionally serialize before running user code. */
if (static_cpu_has(X86_FEATURE_PTI))
return;
+
/*
- * Return from interrupt and NMI is done through iret, which is core
- * serializing.
+ * Even if we're in an interrupt, we might reschedule before returning,
+ * in which case we could switch to a different thread in the same mm
+ * and return using SYSRET or SYSEXIT. Instead of trying to keep
+ * track of our need to sync the core, just sync right away.
*/
- if (in_irq() || in_nmi())
- return;
sync_core();
}
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 82b73b75d67c..b5e4c357523e 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -227,10 +227,31 @@ static inline int arch_within_stack_frames(const void * const stack,
#endif
+/*
+ * Thread-synchronous status.
+ *
+ * This is different from the flags in that nobody else
+ * ever touches our thread-synchronous status, so we don't
+ * have to worry about atomic accesses.
+ */
+#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
+
+#ifndef __ASSEMBLY__
#ifdef CONFIG_COMPAT
#define TS_I386_REGS_POKED 0x0004 /* regs poked by 32-bit ptracer */
+#define TS_COMPAT_RESTART 0x0008
+
+#define arch_set_restart_data arch_set_restart_data
+
+static inline void arch_set_restart_data(struct restart_block *restart)
+{
+ struct thread_info *ti = current_thread_info();
+ if (ti->status & TS_COMPAT)
+ ti->status |= TS_COMPAT_RESTART;
+ else
+ ti->status &= ~TS_COMPAT_RESTART;
+}
#endif
-#ifndef __ASSEMBLY__
#ifdef CONFIG_X86_32
#define in_ia32_syscall() true
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 971830341061..82b0ff6cac97 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -711,7 +711,17 @@ extern struct movsl_mask {
* checking before using them, but you have to surround them with the
* user_access_begin/end() pair.
*/
-#define user_access_begin() __uaccess_begin()
+static __must_check inline bool user_access_begin(int type,
+ const void __user *ptr,
+ size_t len)
+{
+ if (unlikely(!access_ok(type, ptr, len)))
+ return 0;
+ __uaccess_begin_nospec();
+ return 1;
+}
+
+#define user_access_begin(a, b, c) user_access_begin(a, b, c)
#define user_access_end() __uaccess_end()
#define unsafe_put_user(x, ptr, err_label) \
diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h
index 499578f7e6d7..70fc159ebe69 100644
--- a/arch/x86/include/asm/unwind.h
+++ b/arch/x86/include/asm/unwind.h
@@ -19,7 +19,7 @@ struct unwind_state {
#if defined(CONFIG_UNWINDER_ORC)
bool signal, full_regs;
unsigned long sp, bp, ip;
- struct pt_regs *regs;
+ struct pt_regs *regs, *prev_regs;
#elif defined(CONFIG_UNWINDER_FRAME_POINTER)
bool got_irq;
unsigned long *bp, *orig_sp, ip;
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 3b20607d581b..b35c34cfbe52 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -1565,10 +1565,18 @@ void __init acpi_boot_table_init(void)
/*
* Initialize the ACPI boot-time table parser.
*/
- if (acpi_table_init()) {
+ if (acpi_locate_initial_tables())
disable_acpi();
- return;
- }
+ else
+ acpi_reserve_initial_tables();
+}
+
+int __init early_acpi_boot_init(void)
+{
+ if (acpi_disabled)
+ return 1;
+
+ acpi_table_init_complete();
acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
@@ -1581,18 +1589,9 @@ void __init acpi_boot_table_init(void)
} else {
printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
disable_acpi();
- return;
+ return 1;
}
}
-}
-
-int __init early_acpi_boot_init(void)
-{
- /*
- * If acpi_disabled, bail out
- */
- if (acpi_disabled)
- return 1;
/*
* Process the Multiple APIC Description Table (MADT), if present
@@ -1752,7 +1751,7 @@ int __acpi_acquire_global_lock(unsigned int *lock)
new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
val = cmpxchg(lock, old, new);
} while (unlikely (val != old));
- return (new < 3) ? -1 : 0;
+ return ((new & 0x3) < 3) ? -1 : 0;
}
int __acpi_release_global_lock(unsigned int *lock)
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index 158ad1483c43..92539a1c3e31 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -133,7 +133,8 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
/* Make sure we are running on right CPU */
- retval = work_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx);
+ retval = call_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx,
+ false);
if (retval == 0) {
/* Use the hint in CST */
percpu_entry->states[cx->index].eax = cx->address;
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index b481b95bd8f6..923b4bac9613 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -11,14 +11,17 @@
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/spinlock.h>
+#include <linux/pci_ids.h>
#include <asm/amd_nb.h>
#define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450
#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0
-#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
+#define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480
#define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
-#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
+#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
+#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
+#define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654
/* Protect the PCI config register pairs used for SMN and DF indirect access. */
static DEFINE_MUTEX(smn_mutex);
@@ -28,9 +31,11 @@ static u32 *flush_words;
static const struct pci_device_id amd_root_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
{}
};
+
#define PCI_DEVICE_ID_AMD_CNB17H_F4 0x1704
const struct pci_device_id amd_nb_misc_ids[] = {
@@ -44,7 +49,10 @@ const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
{}
};
EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
@@ -57,6 +65,9 @@ static const struct pci_device_id amd_nb_link_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
{}
};
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 1ca76ca944ba..da6b52c70964 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -41,6 +41,7 @@
#include <asm/x86_init.h>
#include <asm/pgalloc.h>
#include <linux/atomic.h>
+#include <asm/barrier.h>
#include <asm/mpspec.h>
#include <asm/i8259.h>
#include <asm/proto.h>
@@ -345,8 +346,6 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
* According to Intel, MFENCE can do the serialization here.
*/
asm volatile("mfence" : : : "memory");
-
- printk_once(KERN_DEBUG "TSC deadline timer enabled\n");
return;
}
@@ -467,6 +466,9 @@ static int lapic_next_deadline(unsigned long delta,
{
u64 tsc;
+ /* This MSR is special and need a special fence: */
+ weak_wrmsr_fence();
+
tsc = rdtsc();
wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
return 0;
@@ -545,7 +547,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
#define DEADLINE_MODEL_MATCH_REV(model, rev) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)rev }
-static u32 hsx_deadline_rev(void)
+static __init u32 hsx_deadline_rev(void)
{
switch (boot_cpu_data.x86_stepping) {
case 0x02: return 0x3a; /* EP */
@@ -555,7 +557,7 @@ static u32 hsx_deadline_rev(void)
return ~0U;
}
-static u32 bdx_deadline_rev(void)
+static __init u32 bdx_deadline_rev(void)
{
switch (boot_cpu_data.x86_stepping) {
case 0x02: return 0x00000011;
@@ -567,7 +569,7 @@ static u32 bdx_deadline_rev(void)
return ~0U;
}
-static u32 skx_deadline_rev(void)
+static __init u32 skx_deadline_rev(void)
{
switch (boot_cpu_data.x86_stepping) {
case 0x03: return 0x01000136;
@@ -580,7 +582,7 @@ static u32 skx_deadline_rev(void)
return ~0U;
}
-static const struct x86_cpu_id deadline_match[] = {
+static const struct x86_cpu_id deadline_match[] __initconst = {
DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X, hsx_deadline_rev),
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X, 0x0b000020),
DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_XEON_D, bdx_deadline_rev),
@@ -602,18 +604,19 @@ static const struct x86_cpu_id deadline_match[] = {
{},
};
-static void apic_check_deadline_errata(void)
+static __init bool apic_validate_deadline_timer(void)
{
const struct x86_cpu_id *m;
u32 rev;
- if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER) ||
- boot_cpu_has(X86_FEATURE_HYPERVISOR))
- return;
+ if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
+ return false;
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ return true;
m = x86_match_cpu(deadline_match);
if (!m)
- return;
+ return true;
/*
* Function pointers will have the MSB set due to address layout,
@@ -625,11 +628,12 @@ static void apic_check_deadline_errata(void)
rev = (u32)m->driver_data;
if (boot_cpu_data.microcode >= rev)
- return;
+ return true;
setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
pr_err(FW_BUG "TSC_DEADLINE disabled due to Errata; "
"please update microcode to version: 0x%x (or later)\n", rev);
+ return false;
}
/*
@@ -1813,20 +1817,22 @@ static __init void try_to_enable_x2apic(int remap_mode)
return;
if (remap_mode != IRQ_REMAP_X2APIC_MODE) {
- /* IR is required if there is APIC ID > 255 even when running
- * under KVM
+ /*
+ * Using X2APIC without IR is not architecturally supported
+ * on bare metal but may be supported in guests.
*/
- if (max_physical_apicid > 255 ||
- !x86_init.hyper.x2apic_available()) {
+ if (!x86_init.hyper.x2apic_available()) {
pr_info("x2apic: IRQ remapping doesn't support X2APIC mode\n");
x2apic_disable();
return;
}
/*
- * without IR all CPUs can be addressed by IOAPIC/MSI
- * only in physical mode
+ * Without IR, all CPUs can be addressed by IOAPIC/MSI only
+ * in physical mode, and CPUs with an APIC ID that cannnot
+ * be addressed must not be brought online.
*/
+ x2apic_set_max_apicid(255);
x2apic_phys = 1;
}
x2apic_enable();
@@ -2023,7 +2029,8 @@ void __init init_apic_mappings(void)
{
unsigned int new_apicid;
- apic_check_deadline_errata();
+ if (apic_validate_deadline_timer())
+ pr_info("TSC deadline timer available\n");
if (x2apic_mode) {
boot_cpu_physical_apicid = read_apic_id();
@@ -2272,6 +2279,11 @@ static int cpuid_to_apicid[] = {
[0 ... NR_CPUS - 1] = -1,
};
+bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
+{
+ return phys_id == cpuid_to_apicid[cpu];
+}
+
#ifdef CONFIG_SMP
/**
* apic_id_is_primary_thread - Check whether APIC ID belongs to a primary thread
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index fa3b85b222e3..a89dac380243 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1043,6 +1043,16 @@ static int mp_map_pin_to_irq(u32 gsi, int idx, int ioapic, int pin,
if (idx >= 0 && test_bit(mp_irqs[idx].srcbus, mp_bus_not_pci)) {
irq = mp_irqs[idx].srcbusirq;
legacy = mp_is_legacy_irq(irq);
+ /*
+ * IRQ2 is unusable for historical reasons on systems which
+ * have a legacy PIC. See the comment vs. IRQ2 further down.
+ *
+ * If this gets removed at some point then the related code
+ * in lapic_assign_system_vectors() needs to be adjusted as
+ * well.
+ */
+ if (legacy && irq == PIC_CASCADE_IR)
+ return -EINVAL;
}
mutex_lock(&ioapic_mutex);
@@ -2250,6 +2260,7 @@ static inline void __init check_timer(void)
legacy_pic->init(0);
legacy_pic->make_irq(0);
apic_write(APIC_LVT0, APIC_DM_EXTINT);
+ legacy_pic->unmask(0);
unlock_ExtINT_logic();
@@ -2323,12 +2334,12 @@ static int mp_irqdomain_create(int ioapic)
ip->irqdomain = irq_domain_create_linear(fn, hwirqs, cfg->ops,
(void *)(long)ioapic);
- /* Release fw handle if it was allocated above */
- if (!cfg->dev)
- irq_domain_free_fwnode(fn);
-
- if (!ip->irqdomain)
+ if (!ip->irqdomain) {
+ /* Release fw handle if it was allocated above */
+ if (!cfg->dev)
+ irq_domain_free_fwnode(fn);
return -ENOMEM;
+ }
ip->irqdomain->parent = parent;
@@ -2342,8 +2353,13 @@ static int mp_irqdomain_create(int ioapic)
static void ioapic_destroy_irqdomain(int idx)
{
+ struct ioapic_domain_cfg *cfg = &ioapics[idx].irqdomain_cfg;
+ struct fwnode_handle *fn = ioapics[idx].irqdomain->fwnode;
+
if (ioapics[idx].irqdomain) {
irq_domain_remove(ioapics[idx].irqdomain);
+ if (!cfg->dev)
+ irq_domain_free_fwnode(fn);
ioapics[idx].irqdomain = NULL;
}
}
diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
index 1f5df339e48f..fb26c956c442 100644
--- a/arch/x86/kernel/apic/msi.c
+++ b/arch/x86/kernel/apic/msi.c
@@ -265,12 +265,13 @@ void __init arch_init_msi_domain(struct irq_domain *parent)
msi_default_domain =
pci_msi_create_irq_domain(fn, &pci_msi_domain_info,
parent);
- irq_domain_free_fwnode(fn);
}
- if (!msi_default_domain)
+ if (!msi_default_domain) {
+ irq_domain_free_fwnode(fn);
pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n");
- else
+ } else {
msi_default_domain->flags |= IRQ_DOMAIN_MSI_NOMASK_QUIRK;
+ }
}
#ifdef CONFIG_IRQ_REMAP
@@ -303,7 +304,8 @@ struct irq_domain *arch_create_remap_msi_irq_domain(struct irq_domain *parent,
if (!fn)
return NULL;
d = pci_msi_create_irq_domain(fn, &pci_msi_ir_domain_info, parent);
- irq_domain_free_fwnode(fn);
+ if (!d)
+ irq_domain_free_fwnode(fn);
return d;
}
#endif
@@ -366,7 +368,8 @@ static struct irq_domain *dmar_get_irq_domain(void)
if (fn) {
dmar_domain = msi_create_irq_domain(fn, &dmar_msi_domain_info,
x86_vector_domain);
- irq_domain_free_fwnode(fn);
+ if (!dmar_domain)
+ irq_domain_free_fwnode(fn);
}
out:
mutex_unlock(&dmar_lock);
@@ -491,7 +494,10 @@ struct irq_domain *hpet_create_irq_domain(int hpet_id)
}
d = msi_create_irq_domain(fn, domain_info, parent);
- irq_domain_free_fwnode(fn);
+ if (!d) {
+ irq_domain_free_fwnode(fn);
+ kfree(domain_info);
+ }
return d;
}
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index c352ca2e1456..f0d0535e8f34 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -274,20 +274,24 @@ static int assign_irq_vector_any_locked(struct irq_data *irqd)
const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
int node = irq_data_get_node(irqd);
- if (node == NUMA_NO_NODE)
- goto all;
- /* Try the intersection of @affmsk and node mask */
- cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk);
- if (!assign_vector_locked(irqd, vector_searchmask))
- return 0;
- /* Try the node mask */
- if (!assign_vector_locked(irqd, cpumask_of_node(node)))
- return 0;
-all:
+ if (node != NUMA_NO_NODE) {
+ /* Try the intersection of @affmsk and node mask */
+ cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk);
+ if (!assign_vector_locked(irqd, vector_searchmask))
+ return 0;
+ }
+
/* Try the full affinity mask */
cpumask_and(vector_searchmask, affmsk, cpu_online_mask);
if (!assign_vector_locked(irqd, vector_searchmask))
return 0;
+
+ if (node != NUMA_NO_NODE) {
+ /* Try the node mask */
+ if (!assign_vector_locked(irqd, cpumask_of_node(node)))
+ return 0;
+ }
+
/* Try the full online mask */
return assign_vector_locked(irqd, cpu_online_mask);
}
@@ -448,12 +452,10 @@ static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
trace_vector_activate(irqd->irq, apicd->is_managed,
apicd->can_reserve, reserve);
- /* Nothing to do for fixed assigned vectors */
- if (!apicd->can_reserve && !apicd->is_managed)
- return 0;
-
raw_spin_lock_irqsave(&vector_lock, flags);
- if (reserve || irqd_is_managed_and_shutdown(irqd))
+ if (!apicd->can_reserve && !apicd->is_managed)
+ assign_irq_vector_any_locked(irqd);
+ else if (reserve || irqd_is_managed_and_shutdown(irqd))
vector_assign_managed_shutdown(irqd);
else if (apicd->is_managed)
ret = activate_managed(irqd);
@@ -558,6 +560,10 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
irqd->chip_data = apicd;
irqd->hwirq = virq + i;
irqd_set_single_target(irqd);
+
+ /* Don't invoke affinity setter on deactivated interrupts */
+ irqd_set_affinity_on_activate(irqd);
+
/*
* Legacy vectors are already assigned when the IOAPIC
* takes them over. They stay on the same vector. This is
@@ -705,7 +711,6 @@ int __init arch_early_irq_init(void)
x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops,
NULL);
BUG_ON(x86_vector_domain == NULL);
- irq_domain_free_fwnode(fn);
irq_set_default_host(x86_vector_domain);
arch_init_msi_domain(x86_vector_domain);
@@ -771,20 +776,10 @@ void lapic_offline(void)
static int apic_set_affinity(struct irq_data *irqd,
const struct cpumask *dest, bool force)
{
- struct apic_chip_data *apicd = apic_chip_data(irqd);
int err;
- /*
- * Core code can call here for inactive interrupts. For inactive
- * interrupts which use managed or reservation mode there is no
- * point in going through the vector assignment right now as the
- * activation will assign a vector which fits the destination
- * cpumask. Let the core code store the destination mask and be
- * done with it.
- */
- if (!irqd_is_activated(irqd) &&
- (apicd->is_managed || apicd->can_reserve))
- return IRQ_SET_MASK_OK;
+ if (WARN_ON_ONCE(!irqd_is_activated(irqd)))
+ return -EIO;
raw_spin_lock(&vector_lock);
cpumask_and(vector_searchmask, dest, cpu_online_mask);
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index 145517934171..b2c7e3bc55d2 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -31,7 +31,8 @@ static void x2apic_send_IPI(int cpu, int vector)
{
u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
- x2apic_wrmsr_fence();
+ /* x2apic MSRs are special and need a special fence: */
+ weak_wrmsr_fence();
__x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
}
@@ -43,7 +44,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
unsigned long flags;
u32 dest;
- x2apic_wrmsr_fence();
+ /* x2apic MSRs are special and need a special fence: */
+ weak_wrmsr_fence();
local_irq_save(flags);
tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask);
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index b5cf9e7b3830..8e70c2ba21b3 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -13,6 +13,12 @@
int x2apic_phys;
static struct apic apic_x2apic_phys;
+static u32 x2apic_max_apicid __ro_after_init;
+
+void __init x2apic_set_max_apicid(u32 apicid)
+{
+ x2apic_max_apicid = apicid;
+}
static int __init set_x2apic_phys_mode(char *arg)
{
@@ -42,7 +48,8 @@ static void x2apic_send_IPI(int cpu, int vector)
{
u32 dest = per_cpu(x86_cpu_to_apicid, cpu);
- x2apic_wrmsr_fence();
+ /* x2apic MSRs are special and need a special fence: */
+ weak_wrmsr_fence();
__x2apic_send_IPI_dest(dest, vector, APIC_DEST_PHYSICAL);
}
@@ -53,7 +60,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
unsigned long this_cpu;
unsigned long flags;
- x2apic_wrmsr_fence();
+ /* x2apic MSRs are special and need a special fence: */
+ weak_wrmsr_fence();
local_irq_save(flags);
@@ -103,6 +111,9 @@ static int x2apic_phys_probe(void)
/* Common x2apic functions, also used by x2apic_cluster */
int x2apic_apic_id_valid(u32 apicid)
{
+ if (x2apic_max_apicid && apicid > x2apic_max_apicid)
+ return 0;
+
return 1;
}
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 120769955687..de69090ca142 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -1122,8 +1122,7 @@ static const int amd_erratum_383[] =
/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
static const int amd_erratum_1054[] =
- AMD_OSVW_ERRATUM(0, AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
-
+ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
{
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 2d23a448e72d..c524fa1f4c0e 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -41,6 +41,7 @@ static void __init l1tf_select_mitigation(void);
static void __init mds_select_mitigation(void);
static void __init mds_print_mitigation(void);
static void __init taa_select_mitigation(void);
+static void __init srbds_select_mitigation(void);
/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
u64 x86_spec_ctrl_base;
@@ -60,7 +61,7 @@ static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
u64 __ro_after_init x86_amd_ls_cfg_base;
u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
-/* Control conditional STIPB in switch_to() */
+/* Control conditional STIBP in switch_to() */
DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
/* Control conditional IBPB in switch_mm() */
DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
@@ -108,6 +109,7 @@ void __init check_bugs(void)
l1tf_select_mitigation();
mds_select_mitigation();
taa_select_mitigation();
+ srbds_select_mitigation();
/*
* As MDS and TAA mitigations are inter-related, print MDS
@@ -391,6 +393,97 @@ static int __init tsx_async_abort_parse_cmdline(char *str)
early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
#undef pr_fmt
+#define pr_fmt(fmt) "SRBDS: " fmt
+
+enum srbds_mitigations {
+ SRBDS_MITIGATION_OFF,
+ SRBDS_MITIGATION_UCODE_NEEDED,
+ SRBDS_MITIGATION_FULL,
+ SRBDS_MITIGATION_TSX_OFF,
+ SRBDS_MITIGATION_HYPERVISOR,
+};
+
+static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL;
+
+static const char * const srbds_strings[] = {
+ [SRBDS_MITIGATION_OFF] = "Vulnerable",
+ [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
+ [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode",
+ [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled",
+ [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
+};
+
+static bool srbds_off;
+
+void update_srbds_msr(void)
+{
+ u64 mcu_ctrl;
+
+ if (!boot_cpu_has_bug(X86_BUG_SRBDS))
+ return;
+
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ return;
+
+ if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED)
+ return;
+
+ rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
+
+ switch (srbds_mitigation) {
+ case SRBDS_MITIGATION_OFF:
+ case SRBDS_MITIGATION_TSX_OFF:
+ mcu_ctrl |= RNGDS_MITG_DIS;
+ break;
+ case SRBDS_MITIGATION_FULL:
+ mcu_ctrl &= ~RNGDS_MITG_DIS;
+ break;
+ default:
+ break;
+ }
+
+ wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
+}
+
+static void __init srbds_select_mitigation(void)
+{
+ u64 ia32_cap;
+
+ if (!boot_cpu_has_bug(X86_BUG_SRBDS))
+ return;
+
+ /*
+ * Check to see if this is one of the MDS_NO systems supporting
+ * TSX that are only exposed to SRBDS when TSX is enabled.
+ */
+ ia32_cap = x86_read_arch_cap_msr();
+ if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM))
+ srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
+ else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
+ else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
+ srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
+ else if (cpu_mitigations_off() || srbds_off)
+ srbds_mitigation = SRBDS_MITIGATION_OFF;
+
+ update_srbds_msr();
+ pr_info("%s\n", srbds_strings[srbds_mitigation]);
+}
+
+static int __init srbds_parse_cmdline(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ if (!boot_cpu_has_bug(X86_BUG_SRBDS))
+ return 0;
+
+ srbds_off = !strcmp(str, "off");
+ return 0;
+}
+early_param("srbds", srbds_parse_cmdline);
+
+#undef pr_fmt
#define pr_fmt(fmt) "Spectre V1 : " fmt
enum spectre_v1_mitigation {
@@ -488,7 +581,9 @@ early_param("nospectre_v1", nospectre_v1_cmdline);
static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
SPECTRE_V2_NONE;
-static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
+static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
+ SPECTRE_V2_USER_NONE;
+static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
SPECTRE_V2_USER_NONE;
#ifdef CONFIG_RETPOLINE
@@ -540,10 +635,11 @@ enum spectre_v2_user_cmd {
};
static const char * const spectre_v2_user_strings[] = {
- [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
- [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
- [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
- [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
+ [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
+ [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
+ [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection",
+ [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
+ [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
};
static const struct {
@@ -637,11 +733,13 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
if (boot_cpu_has(X86_FEATURE_IBPB)) {
setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
+ spectre_v2_user_ibpb = mode;
switch (cmd) {
case SPECTRE_V2_USER_CMD_FORCE:
case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
static_branch_enable(&switch_mm_always_ibpb);
+ spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
break;
case SPECTRE_V2_USER_CMD_PRCTL:
case SPECTRE_V2_USER_CMD_AUTO:
@@ -657,21 +755,32 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
"always-on" : "conditional");
}
- /* If enhanced IBRS is enabled no STIPB required */
- if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+ /*
+ * If enhanced IBRS is enabled or SMT impossible, STIBP is not
+ * required.
+ */
+ if (!smt_possible || spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
return;
/*
- * If SMT is not possible or STIBP is not available clear the STIPB
- * mode.
+ * At this point, an STIBP mode other than "off" has been set.
+ * If STIBP support is not being forced, check if STIBP always-on
+ * is preferred.
*/
- if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
+ if (mode != SPECTRE_V2_USER_STRICT &&
+ boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
+ mode = SPECTRE_V2_USER_STRICT_PREFERRED;
+
+ /*
+ * If STIBP is not available, clear the STIBP mode.
+ */
+ if (!boot_cpu_has(X86_FEATURE_STIBP))
mode = SPECTRE_V2_USER_NONE;
+
+ spectre_v2_user_stibp = mode;
+
set_mode:
- spectre_v2_user = mode;
- /* Only print the STIBP mode when SMT possible */
- if (smt_possible)
- pr_info("%s\n", spectre_v2_user_strings[mode]);
+ pr_info("%s\n", spectre_v2_user_strings[mode]);
}
static const char * const spectre_v2_strings[] = {
@@ -902,10 +1011,11 @@ void arch_smt_update(void)
{
mutex_lock(&spec_ctrl_mutex);
- switch (spectre_v2_user) {
+ switch (spectre_v2_user_stibp) {
case SPECTRE_V2_USER_NONE:
break;
case SPECTRE_V2_USER_STRICT:
+ case SPECTRE_V2_USER_STRICT_PREFERRED:
update_stibp_strict();
break;
case SPECTRE_V2_USER_PRCTL:
@@ -1130,18 +1240,41 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
return 0;
}
+static bool is_spec_ib_user_controlled(void)
+{
+ return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
+ spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP;
+}
+
static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
{
switch (ctrl) {
case PR_SPEC_ENABLE:
- if (spectre_v2_user == SPECTRE_V2_USER_NONE)
+ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
+ spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
return 0;
+
/*
- * Indirect branch speculation is always disabled in strict
- * mode.
+ * With strict mode for both IBPB and STIBP, the instruction
+ * code paths avoid checking this task flag and instead,
+ * unconditionally run the instruction. However, STIBP and IBPB
+ * are independent and either can be set to conditionally
+ * enabled regardless of the mode of the other.
+ *
+ * If either is set to conditional, allow the task flag to be
+ * updated, unless it was force-disabled by a previous prctl
+ * call. Currently, this is possible on an AMD CPU which has the
+ * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
+ * kernel is booted with 'spectre_v2_user=seccomp', then
+ * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
+ * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
*/
- if (spectre_v2_user == SPECTRE_V2_USER_STRICT)
+ if (!is_spec_ib_user_controlled() ||
+ task_spec_ib_force_disable(task))
return -EPERM;
+
task_clear_spec_ib_disable(task);
task_update_spec_tif(task);
break;
@@ -1151,10 +1284,13 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
* Indirect branch speculation is always allowed when
* mitigation is force disabled.
*/
- if (spectre_v2_user == SPECTRE_V2_USER_NONE)
+ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
+ spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
return -EPERM;
- if (spectre_v2_user == SPECTRE_V2_USER_STRICT)
+
+ if (!is_spec_ib_user_controlled())
return 0;
+
task_set_spec_ib_disable(task);
if (ctrl == PR_SPEC_FORCE_DISABLE)
task_set_spec_ib_force_disable(task);
@@ -1184,7 +1320,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task)
{
if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
- if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
+ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
}
#endif
@@ -1213,21 +1350,21 @@ static int ib_prctl_get(struct task_struct *task)
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
return PR_SPEC_NOT_AFFECTED;
- switch (spectre_v2_user) {
- case SPECTRE_V2_USER_NONE:
+ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
+ spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
return PR_SPEC_ENABLE;
- case SPECTRE_V2_USER_PRCTL:
- case SPECTRE_V2_USER_SECCOMP:
+ else if (is_spec_ib_user_controlled()) {
if (task_spec_ib_force_disable(task))
return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
if (task_spec_ib_disable(task))
return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
- case SPECTRE_V2_USER_STRICT:
+ } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
return PR_SPEC_DISABLE;
- default:
+ else
return PR_SPEC_NOT_AFFECTED;
- }
}
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
@@ -1466,11 +1603,13 @@ static char *stibp_state(void)
if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
return "";
- switch (spectre_v2_user) {
+ switch (spectre_v2_user_stibp) {
case SPECTRE_V2_USER_NONE:
return ", STIBP: disabled";
case SPECTRE_V2_USER_STRICT:
return ", STIBP: forced";
+ case SPECTRE_V2_USER_STRICT_PREFERRED:
+ return ", STIBP: always-on";
case SPECTRE_V2_USER_PRCTL:
case SPECTRE_V2_USER_SECCOMP:
if (static_key_enabled(&switch_to_cond_stibp))
@@ -1491,6 +1630,11 @@ static char *ibpb_state(void)
return "";
}
+static ssize_t srbds_show_state(char *buf)
+{
+ return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
+}
+
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
char *buf, unsigned int bug)
{
@@ -1535,6 +1679,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
case X86_BUG_ITLB_MULTIHIT:
return itlb_multihit_show_state(buf);
+ case X86_BUG_SRBDS:
+ return srbds_show_state(buf);
+
default:
break;
}
@@ -1581,4 +1728,9 @@ ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr
{
return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
}
+
+ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
+}
#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 7f43eba8d0c1..2058e8c0e61d 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1013,9 +1013,30 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
{}
};
-static bool __init cpu_matches(unsigned long which)
+#define VULNBL_INTEL_STEPPINGS(model, steppings, issues) \
+ X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, \
+ INTEL_FAM6_##model, steppings, \
+ X86_FEATURE_ANY, issues)
+
+#define SRBDS BIT(0)
+
+static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+ VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(HASWELL_CORE, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(HASWELL_ULT, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(HASWELL_GT3E, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(BROADWELL_GT3E, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(BROADWELL_CORE, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(SKYLAKE_MOBILE, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(SKYLAKE_DESKTOP, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(KABYLAKE_MOBILE, X86_STEPPINGS(0x0, 0xC), SRBDS),
+ VULNBL_INTEL_STEPPINGS(KABYLAKE_DESKTOP,X86_STEPPINGS(0x0, 0xD), SRBDS),
+ {}
+};
+
+static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which)
{
- const struct x86_cpu_id *m = x86_match_cpu(cpu_vuln_whitelist);
+ const struct x86_cpu_id *m = x86_match_cpu(table);
return m && !!(m->driver_data & which);
}
@@ -1035,29 +1056,32 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
u64 ia32_cap = x86_read_arch_cap_msr();
/* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
- if (!cpu_matches(NO_ITLB_MULTIHIT) && !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
+ if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
+ !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
- if (cpu_matches(NO_SPECULATION))
+ if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
return;
setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
- if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) &&
+ if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
+ !(ia32_cap & ARCH_CAP_SSB_NO) &&
!cpu_has(c, X86_FEATURE_AMD_SSB_NO))
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
if (ia32_cap & ARCH_CAP_IBRS_ALL)
setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
- if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO)) {
+ if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
+ !(ia32_cap & ARCH_CAP_MDS_NO)) {
setup_force_cpu_bug(X86_BUG_MDS);
- if (cpu_matches(MSBDS_ONLY))
+ if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
}
- if (!cpu_matches(NO_SWAPGS))
+ if (!cpu_matches(cpu_vuln_whitelist, NO_SWAPGS))
setup_force_cpu_bug(X86_BUG_SWAPGS);
/*
@@ -1075,7 +1099,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
(ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
setup_force_cpu_bug(X86_BUG_TAA);
- if (cpu_matches(NO_MELTDOWN))
+ /*
+ * SRBDS affects CPUs which support RDRAND or RDSEED and are listed
+ * in the vulnerability blacklist.
+ */
+ if ((cpu_has(c, X86_FEATURE_RDRAND) ||
+ cpu_has(c, X86_FEATURE_RDSEED)) &&
+ cpu_matches(cpu_vuln_blacklist, SRBDS))
+ setup_force_cpu_bug(X86_BUG_SRBDS);
+
+ if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
return;
/* Rogue Data Cache Load? No! */
@@ -1084,7 +1117,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
- if (cpu_matches(NO_L1TF))
+ if (cpu_matches(cpu_vuln_whitelist, NO_L1TF))
return;
setup_force_cpu_bug(X86_BUG_L1TF);
@@ -1519,6 +1552,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
mtrr_ap_init();
validate_apic_and_package_id(c);
x86_spec_ctrl_setup_ap();
+ update_srbds_msr();
}
static __init int setup_noclflush(char *arg)
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 236582c90d3f..e89602d2aff5 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -80,6 +80,7 @@ extern void detect_ht(struct cpuinfo_x86 *c);
unsigned int aperfmperf_get_khz(int cpu);
extern void x86_spec_ctrl_setup_ap(void);
+extern void update_srbds_msr(void);
extern u64 x86_read_arch_cap_msr(void);
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index 1c92cd08c3b4..b32fa6bcf811 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -555,6 +555,8 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
d->id = id;
cpumask_set_cpu(cpu, &d->cpu_mask);
+ rdt_domain_reconfigure_cdp(r);
+
if (r->alloc_capable && domain_setup_ctrlval(r, d)) {
kfree(d);
return;
diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/intel_rdt.h
index 3736f6dc9545..8412234eabd3 100644
--- a/arch/x86/kernel/cpu/intel_rdt.h
+++ b/arch/x86/kernel/cpu/intel_rdt.h
@@ -251,7 +251,6 @@ struct rftype {
* struct mbm_state - status for each MBM counter in each domain
* @chunks: Total data moved (multiply by rdt_group.mon_scale to get bytes)
* @prev_msr Value of IA32_QM_CTR for this RMID last time we read it
- * @chunks_bw Total local data moved. Used for bandwidth calculation
* @prev_bw_msr:Value of previous IA32_QM_CTR for bandwidth counting
* @prev_bw The most recent bandwidth in MBps
* @delta_bw Difference between the current and previous bandwidth
@@ -260,7 +259,6 @@ struct rftype {
struct mbm_state {
u64 chunks;
u64 prev_msr;
- u64 chunks_bw;
u64 prev_bw_msr;
u32 prev_bw;
u32 delta_bw;
@@ -567,5 +565,6 @@ void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms);
void cqm_handle_limbo(struct work_struct *work);
bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d);
void __check_limbo(struct rdt_domain *d, bool force_free);
+void rdt_domain_reconfigure_cdp(struct rdt_resource *r);
#endif /* _ASM_X86_INTEL_RDT_H */
diff --git a/arch/x86/kernel/cpu/intel_rdt_monitor.c b/arch/x86/kernel/cpu/intel_rdt_monitor.c
index 3d4ec80a6bb9..5dfa5ab9a5ae 100644
--- a/arch/x86/kernel/cpu/intel_rdt_monitor.c
+++ b/arch/x86/kernel/cpu/intel_rdt_monitor.c
@@ -290,8 +290,6 @@ static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
return;
chunks = mbm_overflow_count(m->prev_bw_msr, tval);
- m->chunks_bw += chunks;
- m->chunks = m->chunks_bw;
cur_bw = (chunks * r->mon_scale) >> 20;
if (m->delta_comp)
@@ -461,15 +459,14 @@ static void mbm_update(struct rdt_domain *d, int rmid)
}
if (is_mbm_local_enabled()) {
rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID;
+ __mon_event_count(rmid, &rr);
/*
* Call the MBA software controller only for the
* control groups and when user has enabled
* the software controller explicitly.
*/
- if (!is_mba_sc(NULL))
- __mon_event_count(rmid, &rr);
- else
+ if (is_mba_sc(NULL))
mbm_bw_count(rmid, &rr);
}
}
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index 11c5accfa4db..f406e3b85bdb 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -515,85 +515,88 @@ unlock:
return ret ?: nbytes;
}
-struct task_move_callback {
- struct callback_head work;
- struct rdtgroup *rdtgrp;
-};
-
-static void move_myself(struct callback_head *head)
+/**
+ * rdtgroup_remove - the helper to remove resource group safely
+ * @rdtgrp: resource group to remove
+ *
+ * On resource group creation via a mkdir, an extra kernfs_node reference is
+ * taken to ensure that the rdtgroup structure remains accessible for the
+ * rdtgroup_kn_unlock() calls where it is removed.
+ *
+ * Drop the extra reference here, then free the rdtgroup structure.
+ *
+ * Return: void
+ */
+static void rdtgroup_remove(struct rdtgroup *rdtgrp)
{
- struct task_move_callback *callback;
- struct rdtgroup *rdtgrp;
-
- callback = container_of(head, struct task_move_callback, work);
- rdtgrp = callback->rdtgrp;
+ kernfs_put(rdtgrp->kn);
+ kfree(rdtgrp);
+}
+static void _update_task_closid_rmid(void *task)
+{
/*
- * If resource group was deleted before this task work callback
- * was invoked, then assign the task to root group and free the
- * resource group.
+ * If the task is still current on this CPU, update PQR_ASSOC MSR.
+ * Otherwise, the MSR is updated when the task is scheduled in.
*/
- if (atomic_dec_and_test(&rdtgrp->waitcount) &&
- (rdtgrp->flags & RDT_DELETED)) {
- current->closid = 0;
- current->rmid = 0;
- kfree(rdtgrp);
- }
-
- preempt_disable();
- /* update PQR_ASSOC MSR to make resource group go into effect */
- intel_rdt_sched_in();
- preempt_enable();
+ if (task == current)
+ intel_rdt_sched_in();
+}
- kfree(callback);
+static void update_task_closid_rmid(struct task_struct *t)
+{
+ if (IS_ENABLED(CONFIG_SMP) && task_curr(t))
+ smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1);
+ else
+ _update_task_closid_rmid(t);
}
static int __rdtgroup_move_task(struct task_struct *tsk,
struct rdtgroup *rdtgrp)
{
- struct task_move_callback *callback;
- int ret;
-
- callback = kzalloc(sizeof(*callback), GFP_KERNEL);
- if (!callback)
- return -ENOMEM;
- callback->work.func = move_myself;
- callback->rdtgrp = rdtgrp;
+ /* If the task is already in rdtgrp, no need to move the task. */
+ if ((rdtgrp->type == RDTCTRL_GROUP && tsk->closid == rdtgrp->closid &&
+ tsk->rmid == rdtgrp->mon.rmid) ||
+ (rdtgrp->type == RDTMON_GROUP && tsk->rmid == rdtgrp->mon.rmid &&
+ tsk->closid == rdtgrp->mon.parent->closid))
+ return 0;
/*
- * Take a refcount, so rdtgrp cannot be freed before the
- * callback has been invoked.
+ * Set the task's closid/rmid before the PQR_ASSOC MSR can be
+ * updated by them.
+ *
+ * For ctrl_mon groups, move both closid and rmid.
+ * For monitor groups, can move the tasks only from
+ * their parent CTRL group.
*/
- atomic_inc(&rdtgrp->waitcount);
- ret = task_work_add(tsk, &callback->work, true);
- if (ret) {
- /*
- * Task is exiting. Drop the refcount and free the callback.
- * No need to check the refcount as the group cannot be
- * deleted before the write function unlocks rdtgroup_mutex.
- */
- atomic_dec(&rdtgrp->waitcount);
- kfree(callback);
- rdt_last_cmd_puts("task exited\n");
- } else {
- /*
- * For ctrl_mon groups move both closid and rmid.
- * For monitor groups, can move the tasks only from
- * their parent CTRL group.
- */
- if (rdtgrp->type == RDTCTRL_GROUP) {
- tsk->closid = rdtgrp->closid;
+
+ if (rdtgrp->type == RDTCTRL_GROUP) {
+ tsk->closid = rdtgrp->closid;
+ tsk->rmid = rdtgrp->mon.rmid;
+ } else if (rdtgrp->type == RDTMON_GROUP) {
+ if (rdtgrp->mon.parent->closid == tsk->closid) {
tsk->rmid = rdtgrp->mon.rmid;
- } else if (rdtgrp->type == RDTMON_GROUP) {
- if (rdtgrp->mon.parent->closid == tsk->closid) {
- tsk->rmid = rdtgrp->mon.rmid;
- } else {
- rdt_last_cmd_puts("Can't move task to different control group\n");
- ret = -EINVAL;
- }
+ } else {
+ rdt_last_cmd_puts("Can't move task to different control group\n");
+ return -EINVAL;
}
}
- return ret;
+
+ /*
+ * Ensure the task's closid and rmid are written before determining if
+ * the task is current that will decide if it will be interrupted.
+ */
+ barrier();
+
+ /*
+ * By now, the task's closid and rmid are set. If the task is current
+ * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource
+ * group go into effect. If the task is not current, the MSR will be
+ * updated when the task is scheduled in.
+ */
+ update_task_closid_rmid(tsk);
+
+ return 0;
}
/**
@@ -1035,6 +1038,7 @@ static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d,
_d_cdp = rdt_find_domain(_r_cdp, d->id, NULL);
if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) {
_r_cdp = NULL;
+ _d_cdp = NULL;
ret = -EINVAL;
}
@@ -1625,7 +1629,6 @@ static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name,
if (IS_ERR(kn_subdir))
return PTR_ERR(kn_subdir);
- kernfs_get(kn_subdir);
ret = rdtgroup_kn_set_ugid(kn_subdir);
if (ret)
return ret;
@@ -1648,7 +1651,6 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL);
if (IS_ERR(kn_info))
return PTR_ERR(kn_info);
- kernfs_get(kn_info);
ret = rdtgroup_add_files(kn_info, RF_TOP_INFO);
if (ret)
@@ -1669,12 +1671,6 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
goto out_destroy;
}
- /*
- * This extra ref will be put in kernfs_remove() and guarantees
- * that @rdtgrp->kn is always accessible.
- */
- kernfs_get(kn_info);
-
ret = rdtgroup_kn_set_ugid(kn_info);
if (ret)
goto out_destroy;
@@ -1703,12 +1699,6 @@ mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp,
if (dest_kn)
*dest_kn = kn;
- /*
- * This extra ref will be put in kernfs_remove() and guarantees
- * that @rdtgrp->kn is always accessible.
- */
- kernfs_get(kn);
-
ret = rdtgroup_kn_set_ugid(kn);
if (ret)
goto out_destroy;
@@ -1777,6 +1767,19 @@ static int set_cache_qos_cfg(int level, bool enable)
return 0;
}
+/* Restore the qos cfg state when a domain comes online */
+void rdt_domain_reconfigure_cdp(struct rdt_resource *r)
+{
+ if (!r->alloc_capable)
+ return;
+
+ if (r == &rdt_resources_all[RDT_RESOURCE_L2DATA])
+ l2_qos_cfg_update(&r->alloc_enabled);
+
+ if (r == &rdt_resources_all[RDT_RESOURCE_L3DATA])
+ l3_qos_cfg_update(&r->alloc_enabled);
+}
+
/*
* Enable or disable the MBA software controller
* which helps user specify bandwidth in MBps.
@@ -1959,8 +1962,7 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn)
rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
rdtgroup_pseudo_lock_remove(rdtgrp);
kernfs_unbreak_active_protection(kn);
- kernfs_put(rdtgrp->kn);
- kfree(rdtgrp);
+ rdtgroup_remove(rdtgrp);
} else {
kernfs_unbreak_active_protection(kn);
}
@@ -2011,7 +2013,6 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type,
dentry = ERR_PTR(ret);
goto out_info;
}
- kernfs_get(kn_mongrp);
ret = mkdir_mondata_all(rdtgroup_default.kn,
&rdtgroup_default, &kn_mondata);
@@ -2019,7 +2020,6 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type,
dentry = ERR_PTR(ret);
goto out_mongrp;
}
- kernfs_get(kn_mondata);
rdtgroup_default.mon.mon_data_kn = kn_mondata;
}
@@ -2171,7 +2171,7 @@ static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp)
if (atomic_read(&sentry->waitcount) != 0)
sentry->flags = RDT_DELETED;
else
- kfree(sentry);
+ rdtgroup_remove(sentry);
}
}
@@ -2213,7 +2213,7 @@ static void rmdir_all_sub(void)
if (atomic_read(&rdtgrp->waitcount) != 0)
rdtgrp->flags = RDT_DELETED;
else
- kfree(rdtgrp);
+ rdtgroup_remove(rdtgrp);
}
/* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
update_closid_rmid(cpu_online_mask, &rdtgroup_default);
@@ -2312,11 +2312,6 @@ static int mkdir_mondata_subdir(struct kernfs_node *parent_kn,
if (IS_ERR(kn))
return PTR_ERR(kn);
- /*
- * This extra ref will be put in kernfs_remove() and guarantees
- * that kn is always accessible.
- */
- kernfs_get(kn);
ret = rdtgroup_kn_set_ugid(kn);
if (ret)
goto out_destroy;
@@ -2608,8 +2603,8 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
/*
* kernfs_remove() will drop the reference count on "kn" which
* will free it. But we still need it to stick around for the
- * rdtgroup_kn_unlock(kn} call below. Take one extra reference
- * here, which will be dropped inside rdtgroup_kn_unlock().
+ * rdtgroup_kn_unlock(kn) call. Take one extra reference here,
+ * which will be dropped by kernfs_put() in rdtgroup_remove().
*/
kernfs_get(kn);
@@ -2650,6 +2645,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
out_idfree:
free_rmid(rdtgrp->mon.rmid);
out_destroy:
+ kernfs_put(rdtgrp->kn);
kernfs_remove(rdtgrp->kn);
out_free_rgrp:
kfree(rdtgrp);
@@ -2662,7 +2658,7 @@ static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp)
{
kernfs_remove(rgrp->kn);
free_rmid(rgrp->mon.rmid);
- kfree(rgrp);
+ rdtgroup_remove(rgrp);
}
/*
@@ -2824,11 +2820,6 @@ static int rdtgroup_rmdir_mon(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list));
list_del(&rdtgrp->mon.crdtgrp_list);
- /*
- * one extra hold on this, will drop when we kfree(rdtgrp)
- * in rdtgroup_kn_unlock()
- */
- kernfs_get(kn);
kernfs_remove(rdtgrp->kn);
return 0;
@@ -2840,11 +2831,6 @@ static int rdtgroup_ctrl_remove(struct kernfs_node *kn,
rdtgrp->flags = RDT_DELETED;
list_del(&rdtgrp->rdtgroup_list);
- /*
- * one extra hold on this, will drop when we kfree(rdtgrp)
- * in rdtgroup_kn_unlock()
- */
- kernfs_get(kn);
kernfs_remove(rdtgrp->kn);
return 0;
}
@@ -2910,7 +2896,8 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
* If the rdtgroup is a mon group and parent directory
* is a valid "mon_groups" directory, remove the mon group.
*/
- if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn) {
+ if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn &&
+ rdtgrp != &rdtgroup_default) {
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
ret = rdtgroup_ctrl_remove(kn, rdtgrp);
diff --git a/arch/x86/kernel/cpu/match.c b/arch/x86/kernel/cpu/match.c
index 3fed38812eea..751e59057466 100644
--- a/arch/x86/kernel/cpu/match.c
+++ b/arch/x86/kernel/cpu/match.c
@@ -34,13 +34,18 @@ const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match)
const struct x86_cpu_id *m;
struct cpuinfo_x86 *c = &boot_cpu_data;
- for (m = match; m->vendor | m->family | m->model | m->feature; m++) {
+ for (m = match;
+ m->vendor | m->family | m->model | m->steppings | m->feature;
+ m++) {
if (m->vendor != X86_VENDOR_ANY && c->x86_vendor != m->vendor)
continue;
if (m->family != X86_FAMILY_ANY && c->x86 != m->family)
continue;
if (m->model != X86_MODEL_ANY && c->x86_model != m->model)
continue;
+ if (m->steppings != X86_STEPPING_ANY &&
+ !(BIT(c->x86_stepping) & m->steppings))
+ continue;
if (m->feature != X86_FEATURE_ANY && !cpu_has(c, m->feature))
continue;
return m;
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
index 1ceccc4a5472..9cc524be3c94 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
@@ -518,7 +518,7 @@ static void do_inject(void)
*/
if (inj_type == DFR_INT_INJ) {
i_mce.status |= MCI_STATUS_DEFERRED;
- i_mce.status |= (i_mce.status & ~MCI_STATUS_UC);
+ i_mce.status &= ~MCI_STATUS_UC;
}
/*
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 1f69b12d5bb8..2a13468f8773 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -535,6 +535,13 @@ bool mce_is_memory_error(struct mce *m)
}
EXPORT_SYMBOL_GPL(mce_is_memory_error);
+static bool whole_page(struct mce *m)
+{
+ if (!mca_cfg.ser || !(m->status & MCI_STATUS_MISCV))
+ return true;
+ return MCI_MISC_ADDR_LSB(m->misc) >= PAGE_SHIFT;
+}
+
bool mce_is_correctable(struct mce *m)
{
if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
@@ -600,7 +607,7 @@ static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) {
pfn = mce->addr >> PAGE_SHIFT;
if (!memory_failure(pfn, 0))
- set_mce_nospec(pfn);
+ set_mce_nospec(pfn, whole_page(mce));
}
return NOTIFY_OK;
@@ -1101,7 +1108,7 @@ static int do_memory_failure(struct mce *m)
if (ret)
pr_err("Memory error not recovered");
else
- set_mce_nospec(m->addr >> PAGE_SHIFT);
+ set_mce_nospec(m->addr >> PAGE_SHIFT, whole_page(m));
return ret;
}
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index a96091d44a45..eab4de387ce6 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -627,16 +627,16 @@ static ssize_t reload_store(struct device *dev,
if (val != 1)
return size;
- tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
- if (tmp_ret != UCODE_NEW)
- return size;
-
get_online_cpus();
ret = check_online_cpus();
if (ret)
goto put;
+ tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
+ if (tmp_ret != UCODE_NEW)
+ goto put;
+
mutex_lock(&microcode_mutex);
ret = microcode_reload_late();
mutex_unlock(&microcode_mutex);
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 16936a24795c..3aa0e5a45303 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -103,53 +103,6 @@ static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev
return find_matching_signature(mc, csig, cpf);
}
-/*
- * Given CPU signature and a microcode patch, this function finds if the
- * microcode patch has matching family and model with the CPU.
- *
- * %true - if there's a match
- * %false - otherwise
- */
-static bool microcode_matches(struct microcode_header_intel *mc_header,
- unsigned long sig)
-{
- unsigned long total_size = get_totalsize(mc_header);
- unsigned long data_size = get_datasize(mc_header);
- struct extended_sigtable *ext_header;
- unsigned int fam_ucode, model_ucode;
- struct extended_signature *ext_sig;
- unsigned int fam, model;
- int ext_sigcount, i;
-
- fam = x86_family(sig);
- model = x86_model(sig);
-
- fam_ucode = x86_family(mc_header->sig);
- model_ucode = x86_model(mc_header->sig);
-
- if (fam == fam_ucode && model == model_ucode)
- return true;
-
- /* Look for ext. headers: */
- if (total_size <= data_size + MC_HEADER_SIZE)
- return false;
-
- ext_header = (void *) mc_header + data_size + MC_HEADER_SIZE;
- ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
- ext_sigcount = ext_header->count;
-
- for (i = 0; i < ext_sigcount; i++) {
- fam_ucode = x86_family(ext_sig->sig);
- model_ucode = x86_model(ext_sig->sig);
-
- if (fam == fam_ucode && model == model_ucode)
- return true;
-
- ext_sig++;
- }
- return false;
-}
-
static struct ucode_patch *memdup_patch(void *data, unsigned int size)
{
struct ucode_patch *p;
@@ -167,7 +120,7 @@ static struct ucode_patch *memdup_patch(void *data, unsigned int size)
return p;
}
-static void save_microcode_patch(void *data, unsigned int size)
+static void save_microcode_patch(struct ucode_cpu_info *uci, void *data, unsigned int size)
{
struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
struct ucode_patch *iter, *tmp, *p = NULL;
@@ -213,6 +166,9 @@ static void save_microcode_patch(void *data, unsigned int size)
if (!p)
return;
+ if (!find_matching_signature(p->data, uci->cpu_sig.sig, uci->cpu_sig.pf))
+ return;
+
/*
* Save for early loading. On 32-bit, that needs to be a physical
* address as the APs are running from physical addresses, before
@@ -347,13 +303,14 @@ scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save)
size -= mc_size;
- if (!microcode_matches(mc_header, uci->cpu_sig.sig)) {
+ if (!find_matching_signature(data, uci->cpu_sig.sig,
+ uci->cpu_sig.pf)) {
data += mc_size;
continue;
}
if (save) {
- save_microcode_patch(data, mc_size);
+ save_microcode_patch(uci, data, mc_size);
goto next;
}
@@ -486,14 +443,14 @@ static void show_saved_mc(void)
* Save this microcode patch. It will be loaded early when a CPU is
* hot-added or resumes.
*/
-static void save_mc_for_early(u8 *mc, unsigned int size)
+static void save_mc_for_early(struct ucode_cpu_info *uci, u8 *mc, unsigned int size)
{
/* Synchronization during CPU hotplug. */
static DEFINE_MUTEX(x86_cpu_microcode_mutex);
mutex_lock(&x86_cpu_microcode_mutex);
- save_microcode_patch(mc, size);
+ save_microcode_patch(uci, mc, size);
show_saved_mc();
mutex_unlock(&x86_cpu_microcode_mutex);
@@ -937,7 +894,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
* permanent memory. So it will be loaded early when a CPU is hot added
* or resumes.
*/
- save_mc_for_early(new_mc, new_mc_size);
+ save_mc_for_early(uci, new_mc, new_mc_size);
pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
cpu, new_rev, uci->cpu_sig.rev);
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 852e74e48890..f8b0fa2dbe37 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -214,8 +214,8 @@ static void __init ms_hyperv_init_platform(void)
ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
- pr_info("Hyper-V: features 0x%x, hints 0x%x\n",
- ms_hyperv.features, ms_hyperv.hints);
+ pr_info("Hyper-V: features 0x%x, hints 0x%x, misc 0x%x\n",
+ ms_hyperv.features, ms_hyperv.hints, ms_hyperv.misc_features);
ms_hyperv.max_vp_index = cpuid_eax(HYPERV_CPUID_IMPLEMENT_LIMITS);
ms_hyperv.max_lp_index = cpuid_ebx(HYPERV_CPUID_IMPLEMENT_LIMITS);
@@ -250,6 +250,16 @@ static void __init ms_hyperv_init_platform(void)
cpuid_eax(HYPERV_CPUID_NESTED_FEATURES);
}
+ /*
+ * Hyper-V expects to get crash register data or kmsg when
+ * crash enlightment is available and system crashes. Set
+ * crash_kexec_post_notifiers to be true to make sure that
+ * calling crash enlightment interface before running kdump
+ * kernel.
+ */
+ if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE)
+ crash_kexec_post_notifiers = true;
+
#ifdef CONFIG_X86_LOCAL_APIC
if (ms_hyperv.features & HV_X64_ACCESS_FREQUENCY_MSRS &&
ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) {
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index e12ee86906c6..9436f3452049 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -166,9 +166,6 @@ static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end,
*repeat = 0;
*uniform = 1;
- /* Make end inclusive instead of exclusive */
- end--;
-
prev_match = MTRR_TYPE_INVALID;
for (i = 0; i < num_var_ranges; ++i) {
unsigned short start_state, end_state, inclusive;
@@ -260,6 +257,9 @@ u8 mtrr_type_lookup(u64 start, u64 end, u8 *uniform)
int repeat;
u64 partial_end;
+ /* Make end inclusive instead of exclusive */
+ end--;
+
if (!mtrr_state_set)
return MTRR_TYPE_INVALID;
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index f631a3f15587..91b3483e5085 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -356,7 +356,7 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
struct crash_memmap_data cmd;
struct crash_mem *cmem;
- cmem = vzalloc(sizeof(struct crash_mem));
+ cmem = vzalloc(struct_size(cmem, ranges, 1));
if (!cmem)
return -ENOMEM;
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 6abd83572b01..9692ccc583bb 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -249,9 +249,9 @@ static void __init fpu__init_system_ctx_switch(void)
*/
static void __init fpu__init_parse_early_param(void)
{
- char arg[32];
+ char arg[128];
char *argptr = arg;
- int bit;
+ int arglen, res, bit;
if (cmdline_find_option_bool(boot_command_line, "no387"))
setup_clear_cpu_cap(X86_FEATURE_FPU);
@@ -271,12 +271,26 @@ static void __init fpu__init_parse_early_param(void)
if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
setup_clear_cpu_cap(X86_FEATURE_XSAVES);
- if (cmdline_find_option(boot_command_line, "clearcpuid", arg,
- sizeof(arg)) &&
- get_option(&argptr, &bit) &&
- bit >= 0 &&
- bit < NCAPINTS * 32)
- setup_clear_cpu_cap(bit);
+ arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg));
+ if (arglen <= 0)
+ return;
+
+ pr_info("Clearing CPUID bits:");
+ do {
+ res = get_option(&argptr, &bit);
+ if (res == 0 || res == 3)
+ break;
+
+ /* If the argument was too long, the last bit may be cut off */
+ if (res == 1 && arglen >= sizeof(arg))
+ break;
+
+ if (bit >= 0 && bit < NCAPINTS * 32) {
+ pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit));
+ setup_clear_cpu_cap(bit);
+ }
+ } while (res == 2);
+ pr_cont("\n");
}
/*
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 87a57b7642d3..601a5da1d196 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -907,8 +907,6 @@ const void *get_xsave_field_ptr(int xsave_state)
#ifdef CONFIG_ARCH_HAS_PKEYS
-#define NR_VALID_PKRU_BITS (CONFIG_NR_PROTECTION_KEYS * 2)
-#define PKRU_VALID_MASK (NR_VALID_PKRU_BITS - 1)
/*
* This will go out and modify PKRU register to set the access
* rights for @pkey to @init_val.
@@ -927,6 +925,13 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
if (!boot_cpu_has(X86_FEATURE_OSPKE))
return -EINVAL;
+ /*
+ * This code should only be called with valid 'pkey'
+ * values originating from in-kernel users. Complain
+ * if a bad value is observed.
+ */
+ WARN_ON_ONCE(pkey >= arch_max_pkey());
+
/* Set the bits we need in PKRU: */
if (init_val & PKEY_DISABLE_ACCESS)
new_pkru_bits |= PKRU_AD_BIT;
@@ -964,18 +969,31 @@ static inline bool xfeatures_mxcsr_quirk(u64 xfeatures)
return true;
}
-/*
- * This is similar to user_regset_copyout(), but will not add offset to
- * the source data pointer or increment pos, count, kbuf, and ubuf.
- */
-static inline void
-__copy_xstate_to_kernel(void *kbuf, const void *data,
- unsigned int offset, unsigned int size, unsigned int size_total)
+static void fill_gap(unsigned to, void **kbuf, unsigned *pos, unsigned *count)
{
- if (offset < size_total) {
- unsigned int copy = min(size, size_total - offset);
+ if (*pos < to) {
+ unsigned size = to - *pos;
+
+ if (size > *count)
+ size = *count;
+ memcpy(*kbuf, (void *)&init_fpstate.xsave + *pos, size);
+ *kbuf += size;
+ *pos += size;
+ *count -= size;
+ }
+}
- memcpy(kbuf + offset, data, copy);
+static void copy_part(unsigned offset, unsigned size, void *from,
+ void **kbuf, unsigned *pos, unsigned *count)
+{
+ fill_gap(offset, kbuf, pos, count);
+ if (size > *count)
+ size = *count;
+ if (size) {
+ memcpy(*kbuf, from, size);
+ *kbuf += size;
+ *pos += size;
+ *count -= size;
}
}
@@ -988,8 +1006,9 @@ __copy_xstate_to_kernel(void *kbuf, const void *data,
*/
int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total)
{
- unsigned int offset, size;
struct xstate_header header;
+ const unsigned off_mxcsr = offsetof(struct fxregs_state, mxcsr);
+ unsigned count = size_total;
int i;
/*
@@ -1005,46 +1024,42 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int of
header.xfeatures = xsave->header.xfeatures;
header.xfeatures &= ~XFEATURE_MASK_SUPERVISOR;
+ if (header.xfeatures & XFEATURE_MASK_FP)
+ copy_part(0, off_mxcsr,
+ &xsave->i387, &kbuf, &offset_start, &count);
+ if (header.xfeatures & (XFEATURE_MASK_SSE | XFEATURE_MASK_YMM))
+ copy_part(off_mxcsr, MXCSR_AND_FLAGS_SIZE,
+ &xsave->i387.mxcsr, &kbuf, &offset_start, &count);
+ if (header.xfeatures & XFEATURE_MASK_FP)
+ copy_part(offsetof(struct fxregs_state, st_space), 128,
+ &xsave->i387.st_space, &kbuf, &offset_start, &count);
+ if (header.xfeatures & XFEATURE_MASK_SSE)
+ copy_part(xstate_offsets[XFEATURE_SSE], 256,
+ &xsave->i387.xmm_space, &kbuf, &offset_start, &count);
+ /*
+ * Fill xsave->i387.sw_reserved value for ptrace frame:
+ */
+ copy_part(offsetof(struct fxregs_state, sw_reserved), 48,
+ xstate_fx_sw_bytes, &kbuf, &offset_start, &count);
/*
* Copy xregs_state->header:
*/
- offset = offsetof(struct xregs_state, header);
- size = sizeof(header);
-
- __copy_xstate_to_kernel(kbuf, &header, offset, size, size_total);
+ copy_part(offsetof(struct xregs_state, header), sizeof(header),
+ &header, &kbuf, &offset_start, &count);
- for (i = 0; i < XFEATURE_MAX; i++) {
+ for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
/*
* Copy only in-use xstates:
*/
if ((header.xfeatures >> i) & 1) {
void *src = __raw_xsave_addr(xsave, 1 << i);
- offset = xstate_offsets[i];
- size = xstate_sizes[i];
-
- /* The next component has to fit fully into the output buffer: */
- if (offset + size > size_total)
- break;
-
- __copy_xstate_to_kernel(kbuf, src, offset, size, size_total);
+ copy_part(xstate_offsets[i], xstate_sizes[i],
+ src, &kbuf, &offset_start, &count);
}
}
-
- if (xfeatures_mxcsr_quirk(header.xfeatures)) {
- offset = offsetof(struct fxregs_state, mxcsr);
- size = MXCSR_AND_FLAGS_SIZE;
- __copy_xstate_to_kernel(kbuf, &xsave->i387.mxcsr, offset, size, size_total);
- }
-
- /*
- * Fill xsave->i387.sw_reserved value for ptrace frame:
- */
- offset = offsetof(struct fxregs_state, sw_reserved);
- size = sizeof(xstate_fx_sw_bytes);
-
- __copy_xstate_to_kernel(kbuf, xstate_fx_sw_bytes, offset, size, size_total);
+ fill_gap(size_total, &kbuf, &offset_start, &count);
return 0;
}
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index 519649ddf100..fe522691ac71 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -207,7 +207,7 @@ spurious_8259A_irq:
* lets ACK and report it. [once per IRQ]
*/
if (!(spurious_irq_mask & irqmask)) {
- printk(KERN_DEBUG
+ printk_deferred(KERN_DEBUG
"spurious 8259A interrupt: IRQ%d.\n", irq);
spurious_irq_mask |= irqmask;
}
diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
index a7e0e975043f..e1f9355307b8 100644
--- a/arch/x86/kernel/idt.c
+++ b/arch/x86/kernel/idt.c
@@ -320,7 +320,11 @@ void __init idt_setup_apic_and_irq_gates(void)
#ifdef CONFIG_X86_LOCAL_APIC
for_each_clear_bit_from(i, system_vectors, NR_VECTORS) {
- set_bit(i, system_vectors);
+ /*
+ * Don't set the non assigned system vectors in the
+ * system_vectors bitmap. Otherwise they show up in
+ * /proc/interrupts.
+ */
entry = spurious_entries_start + 8 * (i - FIRST_SYSTEM_VECTOR);
set_intr_gate(i, entry);
}
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index 9490a2845f14..273687986a26 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -211,8 +211,7 @@ setup_boot_parameters(struct kimage *image, struct boot_params *params,
params->hdr.hardware_subarch = boot_params.hdr.hardware_subarch;
/* Copying screen_info will do? */
- memcpy(&params->screen_info, &boot_params.screen_info,
- sizeof(struct screen_info));
+ memcpy(&params->screen_info, &screen_info, sizeof(struct screen_info));
/* Fill in memsize later */
params->screen_info.ext_mem_k = 0;
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 173e915e11d5..3334e1400345 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -170,6 +170,8 @@ NOKPROBE_SYMBOL(skip_prefixes);
int can_boost(struct insn *insn, void *addr)
{
kprobe_opcode_t opcode;
+ insn_byte_t prefix;
+ int i;
if (search_exception_tables((unsigned long)addr))
return 0; /* Page fault may occur on this address. */
@@ -182,9 +184,14 @@ int can_boost(struct insn *insn, void *addr)
if (insn->opcode.nbytes != 1)
return 0;
- /* Can't boost Address-size override prefix */
- if (unlikely(inat_is_address_size_prefix(insn->attr)))
- return 0;
+ for_each_insn_prefix(insn, i, prefix) {
+ insn_attr_t attr;
+
+ attr = inat_get_opcode_attribute(prefix);
+ /* Can't boost Address-size override prefix and CS override prefix */
+ if (prefix == 0x2e || inat_is_address_size_prefix(attr))
+ return 0;
+ }
opcode = insn->opcode.bytes[0];
@@ -209,8 +216,8 @@ int can_boost(struct insn *insn, void *addr)
/* clear and set flags are boostable */
return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
default:
- /* CS override prefix and call are not boostable */
- return (opcode != 0x2e && opcode != 0x9a);
+ /* call is not boostable */
+ return opcode != 0x9a;
}
}
@@ -765,16 +772,11 @@ asm(
NOKPROBE_SYMBOL(kretprobe_trampoline);
STACK_FRAME_NON_STANDARD(kretprobe_trampoline);
-static struct kprobe kretprobe_kprobe = {
- .addr = (void *)kretprobe_trampoline,
-};
-
/*
* Called from kretprobe_trampoline
*/
__visible __used void *trampoline_handler(struct pt_regs *regs)
{
- struct kprobe_ctlblk *kcb;
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
struct hlist_node *tmp;
@@ -784,16 +786,12 @@ __visible __used void *trampoline_handler(struct pt_regs *regs)
void *frame_pointer;
bool skipped = false;
- preempt_disable();
-
/*
* Set a dummy kprobe for avoiding kretprobe recursion.
* Since kretprobe never run in kprobe handler, kprobe must not
* be running at this point.
*/
- kcb = get_kprobe_ctlblk();
- __this_cpu_write(current_kprobe, &kretprobe_kprobe);
- kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+ kprobe_busy_begin();
INIT_HLIST_HEAD(&empty_rp);
kretprobe_hash_lock(current, &head, &flags);
@@ -872,7 +870,7 @@ __visible __used void *trampoline_handler(struct pt_regs *regs)
__this_cpu_write(current_kprobe, &ri->rp->kp);
ri->ret_addr = correct_ret_addr;
ri->rp->handler(ri, regs);
- __this_cpu_write(current_kprobe, &kretprobe_kprobe);
+ __this_cpu_write(current_kprobe, &kprobe_busy);
}
recycle_rp_inst(ri, &empty_rp);
@@ -888,8 +886,7 @@ __visible __used void *trampoline_handler(struct pt_regs *regs)
kretprobe_hash_unlock(current, &flags);
- __this_cpu_write(current_kprobe, NULL);
- preempt_enable();
+ kprobe_busy_end();
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
@@ -1051,6 +1048,11 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
* So clear it by resetting the current kprobe:
*/
regs->flags &= ~X86_EFLAGS_TF;
+ /*
+ * Since the single step (trap) has been cancelled,
+ * we need to restore BTF here.
+ */
+ restore_btf();
/*
* If the TF flag was set before the kprobe hit,
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 6645f123419c..9f0be2c7e346 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -126,6 +126,7 @@ int apply_relocate(Elf32_Shdr *sechdrs,
*location += sym->st_value;
break;
case R_386_PC32:
+ case R_386_PLT32:
/* Add the value, subtract its position */
*location += sym->st_value - (uint32_t)location;
break;
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 0f8b9b900b0e..996eb53f8eb7 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -104,7 +104,6 @@ fs_initcall(nmi_warning_debugfs);
static void nmi_check_duration(struct nmiaction *action, u64 duration)
{
- u64 whole_msecs = READ_ONCE(action->max_duration);
int remainder_ns, decimal_msecs;
if (duration < nmi_longest_ns || duration < action->max_duration)
@@ -112,12 +111,12 @@ static void nmi_check_duration(struct nmiaction *action, u64 duration)
action->max_duration = duration;
- remainder_ns = do_div(whole_msecs, (1000 * 1000));
+ remainder_ns = do_div(duration, (1000 * 1000));
decimal_msecs = remainder_ns / 1000;
printk_ratelimited(KERN_INFO
"INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
- action->handler, whole_msecs, decimal_msecs);
+ action->handler, duration, decimal_msecs);
}
static int nmi_handle(unsigned int type, struct pt_regs *regs)
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index b8b08e61ac73..cd138bfd926c 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -413,28 +413,20 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
lockdep_assert_irqs_disabled();
- /*
- * If TIF_SSBD is different, select the proper mitigation
- * method. Note that if SSBD mitigation is disabled or permanentely
- * enabled this branch can't be taken because nothing can set
- * TIF_SSBD.
- */
- if (tif_diff & _TIF_SSBD) {
- if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
+ /* Handle change of TIF_SSBD depending on the mitigation method. */
+ if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
+ if (tif_diff & _TIF_SSBD)
amd_set_ssb_virt_state(tifn);
- } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
+ } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
+ if (tif_diff & _TIF_SSBD)
amd_set_core_ssb_state(tifn);
- } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
- static_cpu_has(X86_FEATURE_AMD_SSBD)) {
- msr |= ssbd_tif_to_spec_ctrl(tifn);
- updmsr = true;
- }
+ } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+ static_cpu_has(X86_FEATURE_AMD_SSBD)) {
+ updmsr |= !!(tif_diff & _TIF_SSBD);
+ msr |= ssbd_tif_to_spec_ctrl(tifn);
}
- /*
- * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled,
- * otherwise avoid the MSR write.
- */
+ /* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */
if (IS_ENABLED(CONFIG_SMP) &&
static_branch_unlikely(&switch_to_cond_stibp)) {
updmsr |= !!(tif_diff & _TIF_SPEC_IB);
diff --git a/arch/x86/kernel/process.h b/arch/x86/kernel/process.h
index 898e97cf6629..320ab978fb1f 100644
--- a/arch/x86/kernel/process.h
+++ b/arch/x86/kernel/process.h
@@ -19,7 +19,7 @@ static inline void switch_to_extra(struct task_struct *prev,
if (IS_ENABLED(CONFIG_SMP)) {
/*
* Avoid __switch_to_xtra() invocation when conditional
- * STIPB is disabled and the only different bit is
+ * STIBP is disabled and the only different bit is
* TIF_SPEC_IB. For CONFIG_SMP=n TIF_SPEC_IB is not
* in the TIF_WORK_CTXSW masks.
*/
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 8d4d50645310..1401f86e4007 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -374,7 +374,7 @@ static unsigned long task_seg_base(struct task_struct *task,
*/
mutex_lock(&task->mm->context.lock);
ldt = task->mm->context.ldt;
- if (unlikely(idx >= ldt->nr_entries))
+ if (unlikely(!ldt || idx >= ldt->nr_entries))
base = 0;
else
base = get_desc_base(ldt->entries + idx);
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 8fd3cedd9acc..6489cc19ed06 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -197,6 +197,14 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"),
},
},
+ { /* Handle problems with rebooting on Apple MacBook6,1 */
+ .callback = set_pci_reboot,
+ .ident = "Apple MacBook6,1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook6,1"),
+ },
+ },
{ /* Handle problems with rebooting on Apple MacBookPro5 */
.callback = set_pci_reboot,
.ident = "Apple MacBookPro5",
@@ -469,6 +477,15 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = {
},
},
+ { /* PCIe Wifi card isn't detected after reboot otherwise */
+ .callback = set_pci_reboot,
+ .ident = "Zotac ZBOX CI327 nano",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "NA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZBOX-CI327NANO-GS-01"),
+ },
+ },
+
/* Sony */
{ /* Handle problems with rebooting on Sony VGN-Z540N */
.callback = set_bios_reboot,
@@ -530,29 +547,20 @@ static void emergency_vmx_disable_all(void)
local_irq_disable();
/*
- * We need to disable VMX on all CPUs before rebooting, otherwise
- * we risk hanging up the machine, because the CPU ignore INIT
- * signals when VMX is enabled.
- *
- * We can't take any locks and we may be on an inconsistent
- * state, so we use NMIs as IPIs to tell the other CPUs to disable
- * VMX and halt.
+ * Disable VMX on all CPUs before rebooting, otherwise we risk hanging
+ * the machine, because the CPU blocks INIT when it's in VMX root.
*
- * For safety, we will avoid running the nmi_shootdown_cpus()
- * stuff unnecessarily, but we don't have a way to check
- * if other CPUs have VMX enabled. So we will call it only if the
- * CPU we are running on has VMX enabled.
+ * We can't take any locks and we may be on an inconsistent state, so
+ * use NMIs as IPIs to tell the other CPUs to exit VMX root and halt.
*
- * We will miss cases where VMX is not enabled on all CPUs. This
- * shouldn't do much harm because KVM always enable VMX on all
- * CPUs anyway. But we can miss it on the small window where KVM
- * is still enabling VMX.
+ * Do the NMI shootdown even if VMX if off on _this_ CPU, as that
+ * doesn't prevent a different CPU from being in VMX root operation.
*/
- if (cpu_has_vmx() && cpu_vmx_enabled()) {
- /* Disable VMX on this CPU. */
- cpu_vmxoff();
+ if (cpu_has_vmx()) {
+ /* Safely force _this_ CPU out of VMX root operation. */
+ __cpu_emergency_vmxoff();
- /* Halt and disable VMX on the other CPUs */
+ /* Halt and exit VMX root operation on the other CPUs. */
nmi_shootdown_cpus(vmxoff_nmi);
}
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 90ecc108bc8a..652a10a3219d 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1176,6 +1176,8 @@ void __init setup_arch(char **cmdline_p)
reserve_initrd();
acpi_table_upgrade();
+ /* Look for ACPI tables and reserve memory occupied by them. */
+ acpi_boot_table_init();
vsmp_init();
@@ -1183,11 +1185,6 @@ void __init setup_arch(char **cmdline_p)
early_platform_quirks();
- /*
- * Parse the ACPI tables for possible boot-time SMP configuration.
- */
- acpi_boot_table_init();
-
early_acpi_boot_init();
initmem_init();
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 44e647a65de8..16e908000550 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -776,30 +776,8 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
{
- /*
- * This function is fundamentally broken as currently
- * implemented.
- *
- * The idea is that we want to trigger a call to the
- * restart_block() syscall and that we want in_ia32_syscall(),
- * in_x32_syscall(), etc. to match whatever they were in the
- * syscall being restarted. We assume that the syscall
- * instruction at (regs->ip - 2) matches whatever syscall
- * instruction we used to enter in the first place.
- *
- * The problem is that we can get here when ptrace pokes
- * syscall-like values into regs even if we're not in a syscall
- * at all.
- *
- * For now, we maintain historical behavior and guess based on
- * stored state. We could do better by saving the actual
- * syscall arch in restart_block or (with caveats on x32) by
- * checking if regs->ip points to 'int $0x80'. The current
- * behavior is incorrect if a tracer has a different bitness
- * than the tracee.
- */
#ifdef CONFIG_IA32_EMULATION
- if (current_thread_info()->status & (TS_COMPAT|TS_I386_REGS_POKED))
+ if (current_thread_info()->status & TS_COMPAT_RESTART)
return __NR_ia32_restart_syscall;
#endif
#ifdef CONFIG_X86_X32_ABI
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 6489067b78a4..8783d065f927 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -269,6 +269,14 @@ static void notrace start_secondary(void *unused)
wmb();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+
+ /*
+ * Prevent tail call to cpu_startup_entry() because the stack protector
+ * guard has been changed a couple of function calls up, in
+ * boot_init_stack_canary() and must not be checked before tail calling
+ * another function.
+ */
+ prevent_tail_call_optimization();
}
/**
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index fddaefc51fb6..0680a2e9e06b 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -24,10 +24,6 @@
#include <asm/hpet.h>
#include <asm/time.h>
-#ifdef CONFIG_X86_64
-__visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JIFFIES;
-#endif
-
unsigned long profile_pc(struct pt_regs *regs)
{
unsigned long pc = instruction_pointer(regs);
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
index 89be1be1790c..4f17c1c94949 100644
--- a/arch/x86/kernel/unwind_orc.c
+++ b/arch/x86/kernel/unwind_orc.c
@@ -131,9 +131,6 @@ static struct orc_entry *orc_find(unsigned long ip)
{
static struct orc_entry *orc;
- if (!orc_init)
- return NULL;
-
if (ip == 0)
return &null_orc_entry;
@@ -349,8 +346,8 @@ static bool deref_stack_regs(struct unwind_state *state, unsigned long addr,
if (!stack_access_ok(state, addr, sizeof(struct pt_regs)))
return false;
- *ip = regs->ip;
- *sp = regs->sp;
+ *ip = READ_ONCE_NOCHECK(regs->ip);
+ *sp = READ_ONCE_NOCHECK(regs->sp);
return true;
}
@@ -362,14 +359,43 @@ static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr
if (!stack_access_ok(state, addr, IRET_FRAME_SIZE))
return false;
- *ip = regs->ip;
- *sp = regs->sp;
+ *ip = READ_ONCE_NOCHECK(regs->ip);
+ *sp = READ_ONCE_NOCHECK(regs->sp);
return true;
}
+/*
+ * If state->regs is non-NULL, and points to a full pt_regs, just get the reg
+ * value from state->regs.
+ *
+ * Otherwise, if state->regs just points to IRET regs, and the previous frame
+ * had full regs, it's safe to get the value from the previous regs. This can
+ * happen when early/late IRQ entry code gets interrupted by an NMI.
+ */
+static bool get_reg(struct unwind_state *state, unsigned int reg_off,
+ unsigned long *val)
+{
+ unsigned int reg = reg_off/8;
+
+ if (!state->regs)
+ return false;
+
+ if (state->full_regs) {
+ *val = READ_ONCE_NOCHECK(((unsigned long *)state->regs)[reg]);
+ return true;
+ }
+
+ if (state->prev_regs) {
+ *val = READ_ONCE_NOCHECK(((unsigned long *)state->prev_regs)[reg]);
+ return true;
+ }
+
+ return false;
+}
+
bool unwind_next_frame(struct unwind_state *state)
{
- unsigned long ip_p, sp, orig_ip = state->ip, prev_sp = state->sp;
+ unsigned long ip_p, sp, tmp, orig_ip = state->ip, prev_sp = state->sp;
enum stack_type prev_type = state->stack_info.type;
struct orc_entry *orc;
bool indirect = false;
@@ -387,8 +413,11 @@ bool unwind_next_frame(struct unwind_state *state)
/*
* Find the orc_entry associated with the text address.
*
- * Decrement call return addresses by one so they work for sibling
- * calls and calls to noreturn functions.
+ * For a call frame (as opposed to a signal frame), state->ip points to
+ * the instruction after the call. That instruction's stack layout
+ * could be different from the call instruction's layout, for example
+ * if the call was to a noreturn function. So get the ORC data for the
+ * call instruction itself.
*/
orc = orc_find(state->signal ? state->ip : state->ip - 1);
if (!orc)
@@ -423,39 +452,35 @@ bool unwind_next_frame(struct unwind_state *state)
break;
case ORC_REG_R10:
- if (!state->regs || !state->full_regs) {
+ if (!get_reg(state, offsetof(struct pt_regs, r10), &sp)) {
orc_warn("missing regs for base reg R10 at ip %pB\n",
(void *)state->ip);
goto err;
}
- sp = state->regs->r10;
break;
case ORC_REG_R13:
- if (!state->regs || !state->full_regs) {
+ if (!get_reg(state, offsetof(struct pt_regs, r13), &sp)) {
orc_warn("missing regs for base reg R13 at ip %pB\n",
(void *)state->ip);
goto err;
}
- sp = state->regs->r13;
break;
case ORC_REG_DI:
- if (!state->regs || !state->full_regs) {
+ if (!get_reg(state, offsetof(struct pt_regs, di), &sp)) {
orc_warn("missing regs for base reg DI at ip %pB\n",
(void *)state->ip);
goto err;
}
- sp = state->regs->di;
break;
case ORC_REG_DX:
- if (!state->regs || !state->full_regs) {
+ if (!get_reg(state, offsetof(struct pt_regs, dx), &sp)) {
orc_warn("missing regs for base reg DX at ip %pB\n",
(void *)state->ip);
goto err;
}
- sp = state->regs->dx;
break;
default:
@@ -482,6 +507,7 @@ bool unwind_next_frame(struct unwind_state *state)
state->sp = sp;
state->regs = NULL;
+ state->prev_regs = NULL;
state->signal = false;
break;
@@ -493,6 +519,7 @@ bool unwind_next_frame(struct unwind_state *state)
}
state->regs = (struct pt_regs *)sp;
+ state->prev_regs = NULL;
state->full_regs = true;
state->signal = true;
break;
@@ -504,6 +531,8 @@ bool unwind_next_frame(struct unwind_state *state)
goto err;
}
+ if (state->full_regs)
+ state->prev_regs = state->regs;
state->regs = (void *)sp - IRET_FRAME_OFFSET;
state->full_regs = false;
state->signal = true;
@@ -512,14 +541,14 @@ bool unwind_next_frame(struct unwind_state *state)
default:
orc_warn("unknown .orc_unwind entry type %d for ip %pB\n",
orc->type, (void *)orig_ip);
- break;
+ goto err;
}
/* Find BP: */
switch (orc->bp_reg) {
case ORC_REG_UNDEFINED:
- if (state->regs && state->full_regs)
- state->bp = state->regs->bp;
+ if (get_reg(state, offsetof(struct pt_regs, bp), &tmp))
+ state->bp = tmp;
break;
case ORC_REG_PREV_SP:
@@ -566,17 +595,20 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
memset(state, 0, sizeof(*state));
state->task = task;
+ if (!orc_init)
+ goto err;
+
/*
* Refuse to unwind the stack of a task while it's executing on another
* CPU. This check is racy, but that's ok: the unwinder has other
* checks to prevent it from going off the rails.
*/
if (task_on_another_cpu(task))
- goto done;
+ goto err;
if (regs) {
if (user_mode(regs))
- goto done;
+ goto the_end;
state->ip = regs->ip;
state->sp = kernel_stack_pointer(regs);
@@ -595,9 +627,10 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
} else {
struct inactive_task_frame *frame = (void *)task->thread.sp;
- state->sp = task->thread.sp;
+ state->sp = task->thread.sp + sizeof(*frame);
state->bp = READ_ONCE_NOCHECK(frame->bp);
state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
+ state->signal = (void *)state->ip == ret_from_fork;
}
if (get_stack_info((unsigned long *)state->sp, state->task,
@@ -609,6 +642,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
* generate some kind of backtrace if this happens.
*/
void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp);
+ state->error = true;
if (get_stack_info(next_page, state->task, &state->stack_info,
&state->stack_mask))
return;
@@ -629,13 +663,14 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
/* Otherwise, skip ahead to the user-specified starting frame: */
while (!unwind_done(state) &&
(!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
- state->sp <= (unsigned long)first_frame))
+ state->sp < (unsigned long)first_frame))
unwind_next_frame(state);
return;
-done:
+err:
+ state->error = true;
+the_end:
state->stack_info.type = STACK_TYPE_UNKNOWN;
- return;
}
EXPORT_SYMBOL_GPL(__unwind_start);
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 420aa7d3a2e6..ae9e806a11de 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -268,12 +268,13 @@ static volatile u32 good_2byte_insns[256 / 32] = {
static bool is_prefix_bad(struct insn *insn)
{
+ insn_byte_t p;
int i;
- for (i = 0; i < insn->prefixes.nbytes; i++) {
+ for_each_insn_prefix(insn, i, p) {
insn_attr_t attr;
- attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]);
+ attr = inat_get_opcode_attribute(p);
switch (attr) {
case INAT_MAKE_PREFIX(INAT_PFX_ES):
case INAT_MAKE_PREFIX(INAT_PFX_CS):
@@ -728,6 +729,7 @@ static const struct uprobe_xol_ops push_xol_ops = {
static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
{
u8 opc1 = OPCODE1(insn);
+ insn_byte_t p;
int i;
switch (opc1) {
@@ -758,8 +760,8 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
* Intel and AMD behavior differ in 64-bit mode: Intel ignores 66 prefix.
* No one uses these insns, reject any branch insns with such prefix.
*/
- for (i = 0; i < insn->prefixes.nbytes; i++) {
- if (insn->prefixes.bytes[i] == 0x66)
+ for_each_insn_prefix(insn, i, p) {
+ if (p == 0x66)
return -ENOTSUPP;
}
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 85e6d5620188..34c0652ca8b4 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -36,13 +36,13 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
#ifdef CONFIG_X86_32
OUTPUT_ARCH(i386)
ENTRY(phys_startup_32)
-jiffies = jiffies_64;
#else
OUTPUT_ARCH(i386:x86-64)
ENTRY(phys_startup_64)
-jiffies_64 = jiffies;
#endif
+jiffies = jiffies_64;
+
#if defined(CONFIG_X86_64)
/*
* On 64-bit, align RODATA to 2MB so we retain large page mappings for
@@ -372,6 +372,7 @@ SECTIONS
.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
__bss_start = .;
*(.bss..page_aligned)
+ . = ALIGN(PAGE_SIZE);
*(BSS_MAIN)
BSS_DECRYPTED
. = ALIGN(PAGE_SIZE);
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 48c24d0e9e75..1fe9ccabc082 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -509,7 +509,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
entry->edx |= F(SPEC_CTRL);
if (boot_cpu_has(X86_FEATURE_STIBP))
entry->edx |= F(INTEL_STIBP);
- if (boot_cpu_has(X86_FEATURE_SSBD))
+ if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+ boot_cpu_has(X86_FEATURE_AMD_SSBD))
entry->edx |= F(SPEC_CTRL_SSBD);
/*
* We emulate ARCH_CAPABILITIES in software even
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index d78a61408243..7dec43b2c420 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -154,6 +154,20 @@ static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
return x86_stepping(best->eax);
}
+static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu)
+{
+ return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
+ guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) ||
+ guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) ||
+ guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD));
+}
+
+static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu)
+{
+ return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
+ guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB));
+}
+
static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
{
return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 210eabd71ab2..3e182c7ae771 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -3561,7 +3561,7 @@ static int em_rdpid(struct x86_emulate_ctxt *ctxt)
u64 tsc_aux = 0;
if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
- return emulate_gp(ctxt, 0);
+ return emulate_ud(ctxt);
ctxt->dst.val = tsc_aux;
return X86EMUL_CONTINUE;
}
@@ -3994,6 +3994,12 @@ static int em_clflush(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
+static int em_clflushopt(struct x86_emulate_ctxt *ctxt)
+{
+ /* emulating clflushopt regardless of cpuid */
+ return X86EMUL_CONTINUE;
+}
+
static int em_movsxd(struct x86_emulate_ctxt *ctxt)
{
ctxt->dst.val = (s32) ctxt->src.val;
@@ -4507,7 +4513,7 @@ static const struct opcode group11[] = {
};
static const struct gprefix pfx_0f_ae_7 = {
- I(SrcMem | ByteOp, em_clflush), N, N, N,
+ I(SrcMem | ByteOp, em_clflush), I(SrcMem | ByteOp, em_clflushopt), N, N,
};
static const struct group_dual group15 = { {
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index 007bc654f928..295ebadb8f2c 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -52,29 +52,10 @@ static int pending_userspace_extint(struct kvm_vcpu *v)
* check if there is pending interrupt from
* non-APIC source without intack.
*/
-static int kvm_cpu_has_extint(struct kvm_vcpu *v)
-{
- u8 accept = kvm_apic_accept_pic_intr(v);
-
- if (accept) {
- if (irqchip_split(v->kvm))
- return pending_userspace_extint(v);
- else
- return v->kvm->arch.vpic->output;
- } else
- return 0;
-}
-
-/*
- * check if there is injectable interrupt:
- * when virtual interrupt delivery enabled,
- * interrupt from apic will handled by hardware,
- * we don't need to check it here.
- */
-int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
+int kvm_cpu_has_extint(struct kvm_vcpu *v)
{
/*
- * FIXME: interrupt.injected represents an interrupt that it's
+ * FIXME: interrupt.injected represents an interrupt whose
* side-effects have already been applied (e.g. bit from IRR
* already moved to ISR). Therefore, it is incorrect to rely
* on interrupt.injected to know if there is a pending
@@ -87,6 +68,23 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
if (!lapic_in_kernel(v))
return v->arch.interrupt.injected;
+ if (!kvm_apic_accept_pic_intr(v))
+ return 0;
+
+ if (irqchip_split(v->kvm))
+ return pending_userspace_extint(v);
+ else
+ return v->kvm->arch.vpic->output;
+}
+
+/*
+ * check if there is injectable interrupt:
+ * when virtual interrupt delivery enabled,
+ * interrupt from apic will handled by hardware,
+ * we don't need to check it here.
+ */
+int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
+{
if (kvm_cpu_has_extint(v))
return 1;
@@ -102,20 +100,6 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
*/
int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
{
- /*
- * FIXME: interrupt.injected represents an interrupt that it's
- * side-effects have already been applied (e.g. bit from IRR
- * already moved to ISR). Therefore, it is incorrect to rely
- * on interrupt.injected to know if there is a pending
- * interrupt in the user-mode LAPIC.
- * This leads to nVMX/nSVM not be able to distinguish
- * if it should exit from L2 to L1 on EXTERNAL_INTERRUPT on
- * pending interrupt or should re-inject an injected
- * interrupt.
- */
- if (!lapic_in_kernel(v))
- return v->arch.interrupt.injected;
-
if (kvm_cpu_has_extint(v))
return 1;
@@ -129,16 +113,21 @@ EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
*/
static int kvm_cpu_get_extint(struct kvm_vcpu *v)
{
- if (kvm_cpu_has_extint(v)) {
- if (irqchip_split(v->kvm)) {
- int vector = v->arch.pending_external_vector;
-
- v->arch.pending_external_vector = -1;
- return vector;
- } else
- return kvm_pic_read_irq(v->kvm); /* PIC */
- } else
+ if (!kvm_cpu_has_extint(v)) {
+ WARN_ON(!lapic_in_kernel(v));
return -1;
+ }
+
+ if (!lapic_in_kernel(v))
+ return v->arch.interrupt.nr;
+
+ if (irqchip_split(v->kvm)) {
+ int vector = v->arch.pending_external_vector;
+
+ v->arch.pending_external_vector = -1;
+ return vector;
+ } else
+ return kvm_pic_read_irq(v->kvm); /* PIC */
}
/*
@@ -146,13 +135,7 @@ static int kvm_cpu_get_extint(struct kvm_vcpu *v)
*/
int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
{
- int vector;
-
- if (!lapic_in_kernel(v))
- return v->arch.interrupt.nr;
-
- vector = kvm_cpu_get_extint(v);
-
+ int vector = kvm_cpu_get_extint(v);
if (vector != -1)
return vector; /* PIC */
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 9619dcc2b325..7df600410d69 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -5,7 +5,7 @@
#define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
#define KVM_POSSIBLE_CR4_GUEST_BITS \
(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
- | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE)
+ | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE | X86_CR4_TSD)
static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu,
enum kvm_reg reg)
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 8c6392534d14..56a4b9762b0c 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -2034,7 +2034,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
{
struct kvm_lapic *apic = vcpu->arch.apic;
- if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
+ if (!kvm_apic_present(vcpu) || apic_lvtt_oneshot(apic) ||
apic_lvtt_period(apic))
return;
@@ -2284,7 +2284,7 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
struct kvm_lapic *apic = vcpu->arch.apic;
u32 ppr;
- if (!kvm_apic_hw_enabled(apic))
+ if (!kvm_apic_present(vcpu))
return -1;
__apic_update_ppr(apic, &ppr);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 62f1e4663bc3..5faa49a95ac9 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -281,6 +281,11 @@ static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
*/
static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
+/*
+ * The number of non-reserved physical address bits irrespective of features
+ * that repurpose legal bits, e.g. MKTME.
+ */
+static u8 __read_mostly shadow_phys_bits;
static void mmu_spte_set(u64 *sptep, u64 spte);
static bool is_executable_pte(u64 spte);
@@ -294,11 +299,18 @@ kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value)
{
BUG_ON((mmio_mask & mmio_value) != mmio_value);
+ WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << shadow_nonpresent_or_rsvd_mask_len));
+ WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask);
shadow_mmio_value = mmio_value | SPTE_SPECIAL_MASK;
shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK;
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
+static bool is_mmio_spte(u64 spte)
+{
+ return (spte & shadow_mmio_mask) == shadow_mmio_value;
+}
+
static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
{
return sp->role.ad_disabled;
@@ -306,7 +318,7 @@ static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
static inline bool spte_ad_enabled(u64 spte)
{
- MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value);
+ MMU_WARN_ON(is_mmio_spte(spte));
return !(spte & shadow_acc_track_value);
}
@@ -317,13 +329,13 @@ static bool is_nx_huge_page_enabled(void)
static inline u64 spte_shadow_accessed_mask(u64 spte)
{
- MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value);
+ MMU_WARN_ON(is_mmio_spte(spte));
return spte_ad_enabled(spte) ? shadow_accessed_mask : 0;
}
static inline u64 spte_shadow_dirty_mask(u64 spte)
{
- MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value);
+ MMU_WARN_ON(is_mmio_spte(spte));
return spte_ad_enabled(spte) ? shadow_dirty_mask : 0;
}
@@ -393,11 +405,6 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
mmu_spte_set(sptep, mask);
}
-static bool is_mmio_spte(u64 spte)
-{
- return (spte & shadow_mmio_mask) == shadow_mmio_value;
-}
-
static gfn_t get_mmio_spte_gfn(u64 spte)
{
u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
@@ -462,6 +469,21 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
+static u8 kvm_get_shadow_phys_bits(void)
+{
+ /*
+ * boot_cpu_data.x86_phys_bits is reduced when MKTME is detected
+ * in CPU detection code, but MKTME treats those reduced bits as
+ * 'keyID' thus they are not reserved bits. Therefore for MKTME
+ * we should still return physical address bits reported by CPUID.
+ */
+ if (!boot_cpu_has(X86_FEATURE_TME) ||
+ WARN_ON_ONCE(boot_cpu_data.extended_cpuid_level < 0x80000008))
+ return boot_cpu_data.x86_phys_bits;
+
+ return cpuid_eax(0x80000008) & 0xff;
+}
+
static void kvm_mmu_reset_all_pte_masks(void)
{
u8 low_phys_bits;
@@ -475,20 +497,29 @@ static void kvm_mmu_reset_all_pte_masks(void)
shadow_present_mask = 0;
shadow_acc_track_mask = 0;
+ shadow_phys_bits = kvm_get_shadow_phys_bits();
+
/*
* If the CPU has 46 or less physical address bits, then set an
* appropriate mask to guard against L1TF attacks. Otherwise, it is
* assumed that the CPU is not vulnerable to L1TF.
+ *
+ * Some Intel CPUs address the L1 cache using more PA bits than are
+ * reported by CPUID. Use the PA width of the L1 cache when possible
+ * to achieve more effective mitigation, e.g. if system RAM overlaps
+ * the most significant bits of legal physical address space.
*/
+ shadow_nonpresent_or_rsvd_mask = 0;
low_phys_bits = boot_cpu_data.x86_phys_bits;
- if (boot_cpu_data.x86_phys_bits <
- 52 - shadow_nonpresent_or_rsvd_mask_len) {
+ if (boot_cpu_has_bug(X86_BUG_L1TF) &&
+ !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >=
+ 52 - shadow_nonpresent_or_rsvd_mask_len)) {
+ low_phys_bits = boot_cpu_data.x86_cache_bits
+ - shadow_nonpresent_or_rsvd_mask_len;
shadow_nonpresent_or_rsvd_mask =
- rsvd_bits(boot_cpu_data.x86_phys_bits -
- shadow_nonpresent_or_rsvd_mask_len,
- boot_cpu_data.x86_phys_bits - 1);
- low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
+ rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1);
}
+
shadow_nonpresent_or_rsvd_lower_gfn_mask =
GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
}
@@ -1701,10 +1732,10 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
* Emulate arch specific page modification logging for the
* nested hypervisor
*/
-int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu)
+int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa)
{
if (kvm_x86_ops->write_log_dirty)
- return kvm_x86_ops->write_log_dirty(vcpu);
+ return kvm_x86_ops->write_log_dirty(vcpu, l2_gpa);
return 0;
}
@@ -1925,7 +1956,8 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
}
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
+ bool blockable)
{
return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
}
@@ -4443,7 +4475,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
rsvd_bits(maxphyaddr, 51);
rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd |
- nonleaf_bit8_rsvd | gbpages_bit_rsvd |
+ gbpages_bit_rsvd |
rsvd_bits(maxphyaddr, 51);
rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 51);
@@ -4535,7 +4567,7 @@ reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
*/
shadow_zero_check = &context->shadow_zero_check;
__reset_rsvds_bits_mask(vcpu, shadow_zero_check,
- boot_cpu_data.x86_phys_bits,
+ shadow_phys_bits,
context->shadow_root_level, uses_nx,
guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
is_pse(vcpu), true);
@@ -4572,13 +4604,13 @@ reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
if (boot_cpu_is_amd())
__reset_rsvds_bits_mask(vcpu, shadow_zero_check,
- boot_cpu_data.x86_phys_bits,
+ shadow_phys_bits,
context->shadow_root_level, false,
boot_cpu_has(X86_FEATURE_GBPAGES),
true, true);
else
__reset_rsvds_bits_mask_ept(shadow_zero_check,
- boot_cpu_data.x86_phys_bits,
+ shadow_phys_bits,
false);
if (!shadow_me_mask)
@@ -4599,7 +4631,7 @@ reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
struct kvm_mmu *context, bool execonly)
{
__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
- boot_cpu_data.x86_phys_bits, execonly);
+ shadow_phys_bits, execonly);
}
#define BYTE_MASK(access) \
@@ -6041,6 +6073,25 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
return 0;
}
+static void kvm_set_mmio_spte_mask(void)
+{
+ u64 mask;
+
+ /*
+ * Set a reserved PA bit in MMIO SPTEs to generate page faults with
+ * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT
+ * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports
+ * 52-bit physical addresses then there are no reserved PA bits in the
+ * PTEs and so the reserved PA approach must be disabled.
+ */
+ if (shadow_phys_bits < 52)
+ mask = BIT_ULL(51) | PT_PRESENT_MASK;
+ else
+ mask = 0;
+
+ kvm_mmu_set_mmio_spte_mask(mask, mask);
+}
+
int kvm_mmu_module_init(void)
{
int ret = -ENOMEM;
@@ -6050,6 +6101,8 @@ int kvm_mmu_module_init(void)
kvm_mmu_reset_all_pte_masks();
+ kvm_set_mmio_spte_mask();
+
pte_list_desc_cache = kmem_cache_create("pte_list_desc",
sizeof(struct pte_list_desc),
0, SLAB_ACCOUNT, NULL);
@@ -6172,6 +6225,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
cond_resched_lock(&kvm->mmu_lock);
}
}
+ kvm_mmu_commit_zap_page(kvm, &invalid_list);
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, rcu_idx);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index f7b2de7b6382..05a02b8ace6b 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -53,7 +53,7 @@ static inline u64 rsvd_bits(int s, int e)
if (e < s)
return 0;
- return ((1ULL << (e - s + 1)) - 1) << s;
+ return ((2ULL << (e - s)) - 1) << s;
}
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value);
@@ -215,7 +215,7 @@ void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
struct kvm_memory_slot *slot, u64 gfn);
-int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
+int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
int kvm_mmu_post_init_vm(struct kvm *kvm);
void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
index cb41b036eb26..7e0dc8c7da2c 100644
--- a/arch/x86/kvm/mmutrace.h
+++ b/arch/x86/kvm/mmutrace.h
@@ -339,7 +339,7 @@ TRACE_EVENT(
/* These depend on page entry type, so compute them now. */
__field(bool, r)
__field(bool, x)
- __field(u8, u)
+ __field(signed char, u)
),
TP_fast_assign(
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 61f10a4fd807..8220190b0605 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -202,7 +202,7 @@ static inline unsigned FNAME(gpte_access)(u64 gpte)
static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
struct kvm_mmu *mmu,
struct guest_walker *walker,
- int write_fault)
+ gpa_t addr, int write_fault)
{
unsigned level, index;
pt_element_t pte, orig_pte;
@@ -227,7 +227,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
!(pte & PT_GUEST_DIRTY_MASK)) {
trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
#if PTTYPE == PTTYPE_EPT
- if (kvm_arch_write_log_dirty(vcpu))
+ if (kvm_arch_write_log_dirty(vcpu, addr))
return -EINVAL;
#endif
pte |= PT_GUEST_DIRTY_MASK;
@@ -424,7 +424,8 @@ retry_walk:
(PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT);
if (unlikely(!accessed_dirty)) {
- ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault);
+ ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker,
+ addr, write_fault);
if (unlikely(ret < 0))
goto error;
else if (ret)
diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
index 2ab8c20c8bfa..611f9e60f815 100644
--- a/arch/x86/kvm/pmu_intel.c
+++ b/arch/x86/kvm/pmu_intel.c
@@ -29,7 +29,7 @@ static struct kvm_event_hw_type_mapping intel_arch_events[] = {
[4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
[5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
[6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
- [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
+ [7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES },
};
/* mapping between fixed pmc index and intel_arch_events array */
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index cc8f3b41a1b2..8cb9277aa6ff 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -892,6 +892,11 @@ static int has_svm(void)
return 0;
}
+ if (sev_active()) {
+ pr_info("KVM is unsupported when running as an SEV guest\n");
+ return 0;
+ }
+
return 1;
}
@@ -998,33 +1003,32 @@ static void svm_cpu_uninit(int cpu)
static int svm_cpu_init(int cpu)
{
struct svm_cpu_data *sd;
- int r;
sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
if (!sd)
return -ENOMEM;
sd->cpu = cpu;
- r = -ENOMEM;
sd->save_area = alloc_page(GFP_KERNEL);
if (!sd->save_area)
- goto err_1;
+ goto free_cpu_data;
if (svm_sev_enabled()) {
- r = -ENOMEM;
sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1,
sizeof(void *),
GFP_KERNEL);
if (!sd->sev_vmcbs)
- goto err_1;
+ goto free_save_area;
}
per_cpu(svm_data, cpu) = sd;
return 0;
-err_1:
+free_save_area:
+ __free_page(sd->save_area);
+free_cpu_data:
kfree(sd);
- return r;
+ return -ENOMEM;
}
@@ -1828,6 +1832,8 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
struct page **pages;
unsigned long first, last;
+ lockdep_assert_held(&kvm->lock);
+
if (ulen == 0 || uaddr + ulen < uaddr)
return NULL;
@@ -1917,6 +1923,10 @@ static void __unregister_enc_region_locked(struct kvm *kvm,
static struct kvm *svm_vm_alloc(void)
{
struct kvm_svm *kvm_svm = vzalloc(sizeof(struct kvm_svm));
+
+ if (!kvm_svm)
+ return NULL;
+
return &kvm_svm->kvm;
}
@@ -3226,8 +3236,8 @@ static int nested_svm_exit_special(struct vcpu_svm *svm)
return NESTED_EXIT_HOST;
break;
case SVM_EXIT_EXCP_BASE + PF_VECTOR:
- /* When we're shadowing, trap PFs, but not async PF */
- if (!npt_enabled && svm->vcpu.arch.apf.host_apf_reason == 0)
+ /* Trap async PF even if not shadowing */
+ if (!npt_enabled || svm->vcpu.arch.apf.host_apf_reason)
return NESTED_EXIT_HOST;
break;
default:
@@ -3316,7 +3326,7 @@ static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *fr
dst->iopm_base_pa = from->iopm_base_pa;
dst->msrpm_base_pa = from->msrpm_base_pa;
dst->tsc_offset = from->tsc_offset;
- dst->asid = from->asid;
+ /* asid not copied, it is handled manually for svm->vmcb. */
dst->tlb_ctl = from->tlb_ctl;
dst->int_ctl = from->int_ctl;
dst->int_vector = from->int_vector;
@@ -3939,6 +3949,12 @@ static int iret_interception(struct vcpu_svm *svm)
return 1;
}
+static int invd_interception(struct vcpu_svm *svm)
+{
+ /* Treat an INVD instruction as a NOP and just skip it. */
+ return kvm_skip_emulated_instruction(&svm->vcpu);
+}
+
static int invlpg_interception(struct vcpu_svm *svm)
{
if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
@@ -4200,8 +4216,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_IA32_SPEC_CTRL:
if (!msr_info->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
- !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
+ !guest_has_spec_ctrl_msr(vcpu))
return 1;
msr_info->data = svm->spec_ctrl;
@@ -4303,8 +4318,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
break;
case MSR_IA32_SPEC_CTRL:
if (!msr->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
- !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
+ !guest_has_spec_ctrl_msr(vcpu))
return 1;
/* The STIBP bit doesn't fault even if it's not advertised */
@@ -4331,12 +4345,11 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
break;
case MSR_IA32_PRED_CMD:
if (!msr->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB))
+ !guest_has_pred_cmd_msr(vcpu))
return 1;
if (data & ~PRED_CMD_IBPB)
return 1;
-
if (!data)
break;
@@ -4828,7 +4841,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_RDPMC] = rdpmc_interception,
[SVM_EXIT_CPUID] = cpuid_interception,
[SVM_EXIT_IRET] = iret_interception,
- [SVM_EXIT_INVD] = emulate_on_interception,
+ [SVM_EXIT_INVD] = invd_interception,
[SVM_EXIT_PAUSE] = pause_interception,
[SVM_EXIT_HLT] = halt_interception,
[SVM_EXIT_INVLPG] = invlpg_interception,
@@ -5371,6 +5384,7 @@ static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
* - Tell IOMMU to use legacy mode for this interrupt.
* - Retrieve ga_tag of prior interrupt remapping data.
*/
+ pi.prev_ga_tag = 0;
pi.is_guest_mode = false;
ret = irq_set_vcpu_affinity(host_irq, &pi);
@@ -7072,12 +7086,20 @@ static int svm_register_enc_region(struct kvm *kvm,
if (!region)
return -ENOMEM;
+ mutex_lock(&kvm->lock);
region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
if (!region->pages) {
ret = -ENOMEM;
+ mutex_unlock(&kvm->lock);
goto e_free;
}
+ region->uaddr = range->addr;
+ region->size = range->size;
+
+ list_add_tail(&region->list, &sev->regions_list);
+ mutex_unlock(&kvm->lock);
+
/*
* The guest may change the memory encryption attribute from C=0 -> C=1
* or vice versa for this memory range. Lets make sure caches are
@@ -7086,13 +7108,6 @@ static int svm_register_enc_region(struct kvm *kvm,
*/
sev_clflush_pages(region->pages, region->npages);
- region->uaddr = range->addr;
- region->size = range->size;
-
- mutex_lock(&kvm->lock);
- list_add_tail(&region->list, &sev->regions_list);
- mutex_unlock(&kvm->lock);
-
return ret;
e_free:
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a81d7d9ce9d6..77b9ed5223f3 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2156,43 +2156,15 @@ static void vmcs_load(struct vmcs *vmcs)
}
#ifdef CONFIG_KEXEC_CORE
-/*
- * This bitmap is used to indicate whether the vmclear
- * operation is enabled on all cpus. All disabled by
- * default.
- */
-static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
-
-static inline void crash_enable_local_vmclear(int cpu)
-{
- cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
-}
-
-static inline void crash_disable_local_vmclear(int cpu)
-{
- cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
-}
-
-static inline int crash_local_vmclear_enabled(int cpu)
-{
- return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
-}
-
static void crash_vmclear_local_loaded_vmcss(void)
{
int cpu = raw_smp_processor_id();
struct loaded_vmcs *v;
- if (!crash_local_vmclear_enabled(cpu))
- return;
-
list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
loaded_vmcss_on_cpu_link)
vmcs_clear(v->vmcs);
}
-#else
-static inline void crash_enable_local_vmclear(int cpu) { }
-static inline void crash_disable_local_vmclear(int cpu) { }
#endif /* CONFIG_KEXEC_CORE */
static void __loaded_vmcs_clear(void *arg)
@@ -2204,19 +2176,24 @@ static void __loaded_vmcs_clear(void *arg)
return; /* vcpu migration can race with cpu offline */
if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
per_cpu(current_vmcs, cpu) = NULL;
- crash_disable_local_vmclear(cpu);
+
+ vmcs_clear(loaded_vmcs->vmcs);
+ if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
+ vmcs_clear(loaded_vmcs->shadow_vmcs);
+
list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
/*
- * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link
- * is before setting loaded_vmcs->vcpu to -1 which is done in
- * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist
- * then adds the vmcs into percpu list before it is deleted.
+ * Ensure all writes to loaded_vmcs, including deleting it from its
+ * current percpu list, complete before setting loaded_vmcs->vcpu to
+ * -1, otherwise a different cpu can see vcpu == -1 first and add
+ * loaded_vmcs to its percpu list before it's deleted from this cpu's
+ * list. Pairs with the smp_rmb() in vmx_vcpu_load_vmcs().
*/
smp_wmb();
- loaded_vmcs_init(loaded_vmcs);
- crash_enable_local_vmclear(cpu);
+ loaded_vmcs->cpu = -1;
+ loaded_vmcs->launched = 0;
}
static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
@@ -3067,18 +3044,17 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (!already_loaded) {
loaded_vmcs_clear(vmx->loaded_vmcs);
local_irq_disable();
- crash_disable_local_vmclear(cpu);
/*
- * Read loaded_vmcs->cpu should be before fetching
- * loaded_vmcs->loaded_vmcss_on_cpu_link.
- * See the comments in __loaded_vmcs_clear().
+ * Ensure loaded_vmcs->cpu is read before adding loaded_vmcs to
+ * this cpu's percpu list, otherwise it may not yet be deleted
+ * from its previous cpu's percpu list. Pairs with the
+ * smb_wmb() in __loaded_vmcs_clear().
*/
smp_rmb();
list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
&per_cpu(loaded_vmcss_on_cpu, cpu));
- crash_enable_local_vmclear(cpu);
local_irq_enable();
}
@@ -4090,7 +4066,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return kvm_get_msr_common(vcpu, msr_info);
case MSR_IA32_SPEC_CTRL:
if (!msr_info->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
+ !guest_has_spec_ctrl_msr(vcpu))
return 1;
msr_info->data = to_vmx(vcpu)->spec_ctrl;
@@ -4204,7 +4180,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_IA32_SPEC_CTRL:
if (!msr_info->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
+ !guest_has_spec_ctrl_msr(vcpu))
return 1;
/* The STIBP bit doesn't fault even if it's not advertised */
@@ -4234,7 +4210,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_IA32_PRED_CMD:
if (!msr_info->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
+ !guest_has_pred_cmd_msr(vcpu))
return 1;
if (data & ~PRED_CMD_IBPB)
@@ -4422,21 +4398,6 @@ static int hardware_enable(void)
!hv_get_vp_assist_page(cpu))
return -EFAULT;
- INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
- INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
- spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
-
- /*
- * Now we can enable the vmclear operation in kdump
- * since the loaded_vmcss_on_cpu list on this cpu
- * has been initialized.
- *
- * Though the cpu is not in VMX operation now, there
- * is no problem to enable the vmclear operation
- * for the loaded_vmcss_on_cpu list is empty!
- */
- crash_enable_local_vmclear(cpu);
-
rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
test_bits = FEATURE_CONTROL_LOCKED;
@@ -6374,6 +6335,8 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
{
+ BUILD_BUG_ON(KVM_CR4_GUEST_OWNED_BITS & ~KVM_POSSIBLE_CR4_GUEST_BITS);
+
vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
if (enable_ept)
vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
@@ -6954,8 +6917,13 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
{
- return (!to_vmx(vcpu)->nested.nested_run_pending &&
- vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
+ if (to_vmx(vcpu)->nested.nested_run_pending)
+ return false;
+
+ if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
+ return true;
+
+ return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
}
@@ -7049,7 +7017,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
*/
static void kvm_machine_check(void)
{
-#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
+#if defined(CONFIG_X86_MCE)
struct pt_regs regs = {
.cs = 3, /* Fake ring 3 no matter what the guest ran on */
.flags = X86_EFLAGS_IF,
@@ -9717,7 +9685,7 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
KVM_ISA_VMX);
- switch (exit_reason) {
+ switch ((u16)exit_reason) {
case EXIT_REASON_EXCEPTION_NMI:
if (is_nmi(intr_info))
return false;
@@ -10160,6 +10128,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
(exit_reason != EXIT_REASON_EXCEPTION_NMI &&
exit_reason != EXIT_REASON_EPT_VIOLATION &&
exit_reason != EXIT_REASON_PML_FULL &&
+ exit_reason != EXIT_REASON_APIC_ACCESS &&
exit_reason != EXIT_REASON_TASK_SWITCH)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
@@ -10805,14 +10774,14 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
else if (static_branch_unlikely(&mds_user_clear))
mds_clear_cpu_buffers();
- asm(
+ asm volatile (
/* Store host registers */
"push %%" _ASM_DX "; push %%" _ASM_BP ";"
"push %%" _ASM_CX " \n\t" /* placeholder for guest rcx */
"push %%" _ASM_CX " \n\t"
- "cmp %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
+ "cmp %%" _ASM_SP ", %c[host_rsp](%%" _ASM_CX ") \n\t"
"je 1f \n\t"
- "mov %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
+ "mov %%" _ASM_SP ", %c[host_rsp](%%" _ASM_CX ") \n\t"
/* Avoid VMWRITE when Enlightened VMCS is in use */
"test %%" _ASM_SI ", %%" _ASM_SI " \n\t"
"jz 2f \n\t"
@@ -10822,32 +10791,33 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
__ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
"1: \n\t"
/* Reload cr2 if changed */
- "mov %c[cr2](%0), %%" _ASM_AX " \n\t"
+ "mov %c[cr2](%%" _ASM_CX "), %%" _ASM_AX " \n\t"
"mov %%cr2, %%" _ASM_DX " \n\t"
"cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t"
"je 3f \n\t"
"mov %%" _ASM_AX", %%cr2 \n\t"
"3: \n\t"
/* Check if vmlaunch of vmresume is needed */
- "cmpb $0, %c[launched](%0) \n\t"
+ "cmpb $0, %c[launched](%%" _ASM_CX ") \n\t"
/* Load guest registers. Don't clobber flags. */
- "mov %c[rax](%0), %%" _ASM_AX " \n\t"
- "mov %c[rbx](%0), %%" _ASM_BX " \n\t"
- "mov %c[rdx](%0), %%" _ASM_DX " \n\t"
- "mov %c[rsi](%0), %%" _ASM_SI " \n\t"
- "mov %c[rdi](%0), %%" _ASM_DI " \n\t"
- "mov %c[rbp](%0), %%" _ASM_BP " \n\t"
+ "mov %c[rax](%%" _ASM_CX "), %%" _ASM_AX " \n\t"
+ "mov %c[rbx](%%" _ASM_CX "), %%" _ASM_BX " \n\t"
+ "mov %c[rdx](%%" _ASM_CX "), %%" _ASM_DX " \n\t"
+ "mov %c[rsi](%%" _ASM_CX "), %%" _ASM_SI " \n\t"
+ "mov %c[rdi](%%" _ASM_CX "), %%" _ASM_DI " \n\t"
+ "mov %c[rbp](%%" _ASM_CX "), %%" _ASM_BP " \n\t"
#ifdef CONFIG_X86_64
- "mov %c[r8](%0), %%r8 \n\t"
- "mov %c[r9](%0), %%r9 \n\t"
- "mov %c[r10](%0), %%r10 \n\t"
- "mov %c[r11](%0), %%r11 \n\t"
- "mov %c[r12](%0), %%r12 \n\t"
- "mov %c[r13](%0), %%r13 \n\t"
- "mov %c[r14](%0), %%r14 \n\t"
- "mov %c[r15](%0), %%r15 \n\t"
+ "mov %c[r8](%%" _ASM_CX "), %%r8 \n\t"
+ "mov %c[r9](%%" _ASM_CX "), %%r9 \n\t"
+ "mov %c[r10](%%" _ASM_CX "), %%r10 \n\t"
+ "mov %c[r11](%%" _ASM_CX "), %%r11 \n\t"
+ "mov %c[r12](%%" _ASM_CX "), %%r12 \n\t"
+ "mov %c[r13](%%" _ASM_CX "), %%r13 \n\t"
+ "mov %c[r14](%%" _ASM_CX "), %%r14 \n\t"
+ "mov %c[r15](%%" _ASM_CX "), %%r15 \n\t"
#endif
- "mov %c[rcx](%0), %%" _ASM_CX " \n\t" /* kills %0 (ecx) */
+ /* Load guest RCX. This kills the vmx_vcpu pointer! */
+ "mov %c[rcx](%%" _ASM_CX "), %%" _ASM_CX " \n\t"
/* Enter guest mode */
"jne 1f \n\t"
@@ -10855,26 +10825,42 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
"jmp 2f \n\t"
"1: " __ex(ASM_VMX_VMRESUME) "\n\t"
"2: "
- /* Save guest registers, load host registers, keep flags */
- "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
- "pop %0 \n\t"
- "setbe %c[fail](%0)\n\t"
- "mov %%" _ASM_AX ", %c[rax](%0) \n\t"
- "mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
- __ASM_SIZE(pop) " %c[rcx](%0) \n\t"
- "mov %%" _ASM_DX ", %c[rdx](%0) \n\t"
- "mov %%" _ASM_SI ", %c[rsi](%0) \n\t"
- "mov %%" _ASM_DI ", %c[rdi](%0) \n\t"
- "mov %%" _ASM_BP ", %c[rbp](%0) \n\t"
+
+ /* Save guest's RCX to the stack placeholder (see above) */
+ "mov %%" _ASM_CX ", %c[wordsize](%%" _ASM_SP ") \n\t"
+
+ /* Load host's RCX, i.e. the vmx_vcpu pointer */
+ "pop %%" _ASM_CX " \n\t"
+
+ /* Set vmx->fail based on EFLAGS.{CF,ZF} */
+ "setbe %c[fail](%%" _ASM_CX ")\n\t"
+
+ /* Save all guest registers, including RCX from the stack */
+ "mov %%" _ASM_AX ", %c[rax](%%" _ASM_CX ") \n\t"
+ "mov %%" _ASM_BX ", %c[rbx](%%" _ASM_CX ") \n\t"
+ __ASM_SIZE(pop) " %c[rcx](%%" _ASM_CX ") \n\t"
+ "mov %%" _ASM_DX ", %c[rdx](%%" _ASM_CX ") \n\t"
+ "mov %%" _ASM_SI ", %c[rsi](%%" _ASM_CX ") \n\t"
+ "mov %%" _ASM_DI ", %c[rdi](%%" _ASM_CX ") \n\t"
+ "mov %%" _ASM_BP ", %c[rbp](%%" _ASM_CX ") \n\t"
#ifdef CONFIG_X86_64
- "mov %%r8, %c[r8](%0) \n\t"
- "mov %%r9, %c[r9](%0) \n\t"
- "mov %%r10, %c[r10](%0) \n\t"
- "mov %%r11, %c[r11](%0) \n\t"
- "mov %%r12, %c[r12](%0) \n\t"
- "mov %%r13, %c[r13](%0) \n\t"
- "mov %%r14, %c[r14](%0) \n\t"
- "mov %%r15, %c[r15](%0) \n\t"
+ "mov %%r8, %c[r8](%%" _ASM_CX ") \n\t"
+ "mov %%r9, %c[r9](%%" _ASM_CX ") \n\t"
+ "mov %%r10, %c[r10](%%" _ASM_CX ") \n\t"
+ "mov %%r11, %c[r11](%%" _ASM_CX ") \n\t"
+ "mov %%r12, %c[r12](%%" _ASM_CX ") \n\t"
+ "mov %%r13, %c[r13](%%" _ASM_CX ") \n\t"
+ "mov %%r14, %c[r14](%%" _ASM_CX ") \n\t"
+ "mov %%r15, %c[r15](%%" _ASM_CX ") \n\t"
+
+ /*
+ * Clear all general purpose registers (except RSP, which is loaded by
+ * the CPU during VM-Exit) to prevent speculative use of the guest's
+ * values, even those that are saved/loaded via the stack. In theory,
+ * an L1 cache miss when restoring registers could lead to speculative
+ * execution with the guest's values. Zeroing XORs are dirt cheap,
+ * i.e. the extra paranoia is essentially free.
+ */
"xor %%r8d, %%r8d \n\t"
"xor %%r9d, %%r9d \n\t"
"xor %%r10d, %%r10d \n\t"
@@ -10885,18 +10871,22 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
"xor %%r15d, %%r15d \n\t"
#endif
"mov %%cr2, %%" _ASM_AX " \n\t"
- "mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
+ "mov %%" _ASM_AX ", %c[cr2](%%" _ASM_CX ") \n\t"
"xor %%eax, %%eax \n\t"
"xor %%ebx, %%ebx \n\t"
+ "xor %%ecx, %%ecx \n\t"
+ "xor %%edx, %%edx \n\t"
"xor %%esi, %%esi \n\t"
"xor %%edi, %%edi \n\t"
+ "xor %%ebp, %%ebp \n\t"
"pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t"
".pushsection .rodata \n\t"
".global vmx_return \n\t"
"vmx_return: " _ASM_PTR " 2b \n\t"
".popsection"
- : : "c"(vmx), "d"((unsigned long)HOST_RSP), "S"(evmcs_rsp),
+ : "=c"((int){0}), "=d"((int){0}), "=S"((int){0})
+ : "c"(vmx), "d"((unsigned long)HOST_RSP), "S"(evmcs_rsp),
[launched]"i"(offsetof(struct vcpu_vmx, __launched)),
[fail]"i"(offsetof(struct vcpu_vmx, fail)),
[host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
@@ -11016,6 +11006,10 @@ STACK_FRAME_NON_STANDARD(vmx_vcpu_run);
static struct kvm *vmx_vm_alloc(void)
{
struct kvm_vmx *kvm_vmx = vzalloc(sizeof(struct kvm_vmx));
+
+ if (!kvm_vmx)
+ return NULL;
+
return &kvm_vmx->kvm;
}
@@ -12155,13 +12149,9 @@ static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
set_cr4_guest_host_mask(vmx);
- if (kvm_mpx_supported()) {
- if (vmx->nested.nested_run_pending &&
- (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
- vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
- else
- vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
- }
+ if (kvm_mpx_supported() && vmx->nested.nested_run_pending &&
+ (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
+ vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
if (enable_vpid) {
if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
@@ -12225,6 +12215,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
}
+ if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending ||
+ !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
+ vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
if (vmx->nested.nested_run_pending) {
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
vmcs12->vm_entry_intr_info_field);
@@ -12990,7 +12983,7 @@ static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
}
}
-static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
+static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long exit_qual;
@@ -13028,8 +13021,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
return 0;
}
- if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
- nested_exit_on_intr(vcpu)) {
+ if (kvm_cpu_has_interrupt(vcpu) && nested_exit_on_intr(vcpu)) {
if (block_nested_events)
return -EBUSY;
nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
@@ -13607,17 +13599,8 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
if (likely(!vmx->fail)) {
- /*
- * TODO: SDM says that with acknowledge interrupt on
- * exit, bit 31 of the VM-exit interrupt information
- * (valid interrupt) is always set to 1 on
- * EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't
- * need kvm_cpu_has_interrupt(). See the commit
- * message for details.
- */
- if (nested_exit_intr_ack_set(vcpu) &&
- exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
- kvm_cpu_has_interrupt(vcpu)) {
+ if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
+ nested_exit_intr_ack_set(vcpu)) {
int irq = kvm_cpu_get_interrupt(vcpu);
WARN_ON(irq < 0);
vmcs12->vm_exit_intr_info = irq |
@@ -13865,11 +13848,10 @@ static void vmx_flush_log_dirty(struct kvm *kvm)
kvm_flush_pml_buffers(kvm);
}
-static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
+static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
{
struct vmcs12 *vmcs12;
struct vcpu_vmx *vmx = to_vmx(vcpu);
- gpa_t gpa;
struct page *page = NULL;
u64 *pml_address;
@@ -13890,7 +13872,7 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
return 1;
}
- gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull;
+ gpa &= ~0xFFFull;
page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->pml_address);
if (is_error_page(page))
@@ -14590,7 +14572,7 @@ module_exit(vmx_exit);
static int __init vmx_init(void)
{
- int r;
+ int r, cpu;
#if IS_ENABLED(CONFIG_HYPERV)
/*
@@ -14641,6 +14623,12 @@ static int __init vmx_init(void)
}
}
+ for_each_possible_cpu(cpu) {
+ INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
+ INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
+ spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
+ }
+
#ifdef CONFIG_KEXEC_CORE
rcu_assign_pointer(crash_vmclear_loaded_vmcss,
crash_vmclear_local_loaded_vmcss);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2cb379e261c0..435e74e1f2e5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -102,6 +102,7 @@ static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
static void update_cr8_intercept(struct kvm_vcpu *vcpu);
static void process_nmi(struct kvm_vcpu *vcpu);
+static void process_smi(struct kvm_vcpu *vcpu);
static void enter_smm(struct kvm_vcpu *vcpu);
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
static void store_regs(struct kvm_vcpu *vcpu);
@@ -857,7 +858,8 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
unsigned long old_cr4 = kvm_read_cr4(vcpu);
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
- X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
+ X86_CR4_SMEP;
+ unsigned long mmu_role_bits = pdptr_bits | X86_CR4_SMAP | X86_CR4_PKE;
if (kvm_valid_cr4(vcpu, cr4))
return 1;
@@ -865,6 +867,8 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
if (is_long_mode(vcpu)) {
if (!(cr4 & X86_CR4_PAE))
return 1;
+ if ((cr4 ^ old_cr4) & X86_CR4_LA57)
+ return 1;
} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
&& ((cr4 ^ old_cr4) & pdptr_bits)
&& !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
@@ -883,7 +887,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
if (kvm_x86_ops->set_cr4(vcpu, cr4))
return 1;
- if (((cr4 ^ old_cr4) & pdptr_bits) ||
+ if (((cr4 ^ old_cr4) & mmu_role_bits) ||
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
kvm_mmu_reset_context(vcpu);
@@ -2397,43 +2401,45 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
static void record_steal_time(struct kvm_vcpu *vcpu)
{
+ struct kvm_host_map map;
+ struct kvm_steal_time *st;
+
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
return;
- if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
- &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
+ /* -EAGAIN is returned in atomic context so we can just return. */
+ if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT,
+ &map, &vcpu->arch.st.cache, false))
return;
+ st = map.hva +
+ offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
+
/*
* Doing a TLB flush here, on the guest's behalf, can avoid
* expensive IPIs.
*/
- if (xchg(&vcpu->arch.st.steal.preempted, 0) & KVM_VCPU_FLUSH_TLB)
+ if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
kvm_vcpu_flush_tlb(vcpu, false);
- if (vcpu->arch.st.steal.version & 1)
- vcpu->arch.st.steal.version += 1; /* first time write, random junk */
+ vcpu->arch.st.preempted = 0;
- vcpu->arch.st.steal.version += 1;
+ if (st->version & 1)
+ st->version += 1; /* first time write, random junk */
- kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
- &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
+ st->version += 1;
smp_wmb();
- vcpu->arch.st.steal.steal += current->sched_info.run_delay -
+ st->steal += current->sched_info.run_delay -
vcpu->arch.st.last_steal;
vcpu->arch.st.last_steal = current->sched_info.run_delay;
- kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
- &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
-
smp_wmb();
- vcpu->arch.st.steal.version += 1;
+ st->version += 1;
- kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
- &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
+ kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false);
}
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
@@ -2497,7 +2503,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return kvm_mtrr_set_msr(vcpu, msr, data);
case MSR_IA32_APICBASE:
return kvm_set_apic_base(vcpu, msr_info);
- case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
+ case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
return kvm_x2apic_msr_write(vcpu, msr, data);
case MSR_IA32_TSCDEADLINE:
kvm_set_lapic_tscdeadline_msr(vcpu, data);
@@ -2575,11 +2581,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (data & KVM_STEAL_RESERVED_MASK)
return 1;
- if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
- data & KVM_STEAL_VALID_BITS,
- sizeof(struct kvm_steal_time)))
- return 1;
-
vcpu->arch.st.msr_val = data;
if (!(data & KVM_MSR_ENABLED))
@@ -2800,7 +2801,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_APICBASE:
msr_info->data = kvm_get_apic_base(vcpu);
break;
- case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
+ case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
break;
case MSR_IA32_TSCDEADLINE:
@@ -3272,18 +3273,25 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
{
+ struct kvm_host_map map;
+ struct kvm_steal_time *st;
+
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
return;
- if (vcpu->arch.st.steal.preempted)
+ if (vcpu->arch.st.preempted)
return;
- vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
+ if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
+ &vcpu->arch.st.cache, true))
+ return;
+
+ st = map.hva +
+ offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
- kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
- &vcpu->arch.st.steal.preempted,
- offsetof(struct kvm_steal_time, preempted),
- sizeof(vcpu->arch.st.steal.preempted));
+ st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
+
+ kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -3344,21 +3352,23 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
{
+ /*
+ * We can accept userspace's request for interrupt injection
+ * as long as we have a place to store the interrupt number.
+ * The actual injection will happen when the CPU is able to
+ * deliver the interrupt.
+ */
+ if (kvm_cpu_has_extint(vcpu))
+ return false;
+
+ /* Acknowledging ExtINT does not happen if LINT0 is masked. */
return (!lapic_in_kernel(vcpu) ||
kvm_apic_accept_pic_intr(vcpu));
}
-/*
- * if userspace requested an interrupt window, check that the
- * interrupt window is open.
- *
- * No need to exit to userspace if we already have an interrupt queued.
- */
static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
{
return kvm_arch_interrupt_allowed(vcpu) &&
- !kvm_cpu_has_interrupt(vcpu) &&
- !kvm_event_needs_reinjection(vcpu) &&
kvm_cpu_accept_dm_intr(vcpu);
}
@@ -3419,7 +3429,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
unsigned bank_num = mcg_cap & 0xff, bank;
r = -EINVAL;
- if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
+ if (!bank_num || bank_num > KVM_MAX_MCE_BANKS)
goto out;
if (mcg_cap & ~(kvm_mce_cap_supported | 0xff | 0xff0000))
goto out;
@@ -3490,6 +3500,10 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
process_nmi(vcpu);
+
+ if (kvm_check_request(KVM_REQ_SMI, vcpu))
+ process_smi(vcpu);
+
/*
* FIXME: pass injected and pending separately. This is only
* needed for nested virtualization, whose state cannot be
@@ -4662,10 +4676,13 @@ set_identity_unlock:
r = -EFAULT;
if (copy_from_user(&u.ps, argp, sizeof u.ps))
goto out;
+ mutex_lock(&kvm->lock);
r = -ENXIO;
if (!kvm->arch.vpit)
- goto out;
+ goto set_pit_out;
r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
+set_pit_out:
+ mutex_unlock(&kvm->lock);
break;
}
case KVM_GET_PIT2: {
@@ -4685,10 +4702,13 @@ set_identity_unlock:
r = -EFAULT;
if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
goto out;
+ mutex_lock(&kvm->lock);
r = -ENXIO;
if (!kvm->arch.vpit)
- goto out;
+ goto set_pit2_out;
r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
+set_pit2_out:
+ mutex_unlock(&kvm->lock);
break;
}
case KVM_REINJECT_CONTROL: {
@@ -6771,35 +6791,6 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = {
.get_guest_ip = kvm_get_guest_ip,
};
-static void kvm_set_mmio_spte_mask(void)
-{
- u64 mask;
- int maxphyaddr = boot_cpu_data.x86_phys_bits;
-
- /*
- * Set the reserved bits and the present bit of an paging-structure
- * entry to generate page fault with PFER.RSV = 1.
- */
-
- /*
- * Mask the uppermost physical address bit, which would be reserved as
- * long as the supported physical address width is less than 52.
- */
- mask = 1ull << 51;
-
- /* Set the present bit. */
- mask |= 1ull;
-
- /*
- * If reserved bit is not supported, clear the present bit to disable
- * mmio page fault.
- */
- if (maxphyaddr == 52)
- mask &= ~1ull;
-
- kvm_mmu_set_mmio_spte_mask(mask, mask);
-}
-
#ifdef CONFIG_X86_64
static void pvclock_gtod_update_fn(struct work_struct *work)
{
@@ -6877,8 +6868,6 @@ int kvm_arch_init(void *opaque)
if (r)
goto out_free_percpu;
- kvm_set_mmio_spte_mask();
-
kvm_x86_ops = ops;
kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
@@ -6922,6 +6911,7 @@ void kvm_arch_exit(void)
cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
#ifdef CONFIG_X86_64
pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
+ cancel_work_sync(&pvclock_gtod_work);
#endif
kvm_x86_ops = NULL;
kvm_mmu_module_exit();
@@ -7124,7 +7114,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
}
-static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
+static int inject_pending_event(struct kvm_vcpu *vcpu)
{
int r;
@@ -7160,7 +7150,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
* from L2 to L1.
*/
if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
- r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
+ r = kvm_x86_ops->check_nested_events(vcpu);
if (r != 0)
return r;
}
@@ -7210,7 +7200,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
* KVM_REQ_EVENT only on certain events and not unconditionally?
*/
if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
- r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
+ r = kvm_x86_ops->check_nested_events(vcpu);
if (r != 0)
return r;
}
@@ -7521,9 +7511,8 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
}
-int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
- unsigned long start, unsigned long end,
- bool blockable)
+void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+ unsigned long start, unsigned long end)
{
unsigned long apic_address;
@@ -7534,8 +7523,6 @@ int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
if (start <= apic_address && apic_address < end)
kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
-
- return 0;
}
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
@@ -7683,7 +7670,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
goto out;
}
- if (inject_pending_event(vcpu, req_int_win) != 0)
+ if (inject_pending_event(vcpu) != 0)
req_immediate_exit = true;
else {
/* Enable SMI/NMI/IRQ window open exits if needed.
@@ -7894,7 +7881,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
{
if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
- kvm_x86_ops->check_nested_events(vcpu, false);
+ kvm_x86_ops->check_nested_events(vcpu);
return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
!vcpu->arch.apf.halted);
@@ -8634,6 +8621,9 @@ static void fx_init(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
+ struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache;
+
+ kvm_release_pfn(cache->pfn, cache->dirty, cache);
kvmclock_reset(vcpu);
@@ -9229,6 +9219,13 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
{
int i;
+ /*
+ * Clear out the previous array pointers for the KVM_MR_MOVE case. The
+ * old arrays will be freed by __kvm_set_memory_region() if installing
+ * the new memslot is successful.
+ */
+ memset(&slot->arch, 0, sizeof(slot->arch));
+
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
struct kvm_lpage_info *linfo;
unsigned long ugfn;
@@ -9291,11 +9288,18 @@ out_free:
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
{
+ struct kvm_vcpu *vcpu;
+ int i;
+
/*
* memslots->generation has been incremented.
* mmio generation may have reached its maximum value.
*/
kvm_mmu_invalidate_mmio_sptes(kvm, gen);
+
+ /* Force re-initialization of steal_time cache */
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_vcpu_kick(vcpu);
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
@@ -9303,6 +9307,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
enum kvm_mr_change change)
{
+ if (change == KVM_MR_MOVE)
+ return kvm_arch_create_memslot(kvm, memslot,
+ mem->memory_size >> PAGE_SHIFT);
+
return 0;
}
diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c
index 87dcba101e56..3172eaf4ec5e 100644
--- a/arch/x86/lib/insn-eval.c
+++ b/arch/x86/lib/insn-eval.c
@@ -70,14 +70,15 @@ static int get_seg_reg_override_idx(struct insn *insn)
{
int idx = INAT_SEG_REG_DEFAULT;
int num_overrides = 0, i;
+ insn_byte_t p;
insn_get_prefixes(insn);
/* Look for any segment override prefixes. */
- for (i = 0; i < insn->prefixes.nbytes; i++) {
+ for_each_insn_prefix(insn, i, p) {
insn_attr_t attr;
- attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]);
+ attr = inat_get_opcode_attribute(p);
switch (attr) {
case INAT_MAKE_PREFIX(INAT_PFX_CS):
idx = INAT_SEG_REG_CS;
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 9d05572370ed..84b0078272d1 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -14,8 +14,6 @@
* to a jmp to memcpy_erms which does the REP; MOVSB mem copy.
*/
-.weak memcpy
-
/*
* memcpy - Copy a memory block.
*
@@ -28,7 +26,9 @@
* rax original destination
*/
ENTRY(__memcpy)
-ENTRY(memcpy)
+.weak memcpy
+.p2align 4, 0x90
+memcpy:
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
"jmp memcpy_erms", X86_FEATURE_ERMS
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index bbec69d8223b..e1cfc880f42d 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -25,8 +25,8 @@
* rax: dest
*/
.weak memmove
-
-ENTRY(memmove)
+.p2align 4, 0x90
+memmove:
ENTRY(__memmove)
/* Handle more 32 bytes in loop */
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 9bc861c71e75..084189acdcd0 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -6,8 +6,6 @@
#include <asm/alternative-asm.h>
#include <asm/export.h>
-.weak memset
-
/*
* ISO C memset - set a memory block to a byte value. This function uses fast
* string to get better performance than the original function. The code is
@@ -19,7 +17,9 @@
*
* rax original destination
*/
-ENTRY(memset)
+.weak memset
+.p2align 4, 0x90
+memset:
ENTRY(__memset)
/*
* Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c
index fee8b9c0520c..9009393f44c7 100644
--- a/arch/x86/lib/msr-smp.c
+++ b/arch/x86/lib/msr-smp.c
@@ -253,7 +253,7 @@ static void __wrmsr_safe_regs_on_cpu(void *info)
rv->err = wrmsr_safe_regs(rv->regs);
}
-int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
+int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
{
int err;
struct msr_regs_info rv;
@@ -266,7 +266,7 @@ int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
}
EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
-int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
+int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
{
int err;
struct msr_regs_info rv;
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 9c5606d88f61..40dbbd8f1fe4 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -23,6 +23,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
asm volatile(
" testq %[size8],%[size8]\n"
" jz 4f\n"
+ " .align 16\n"
"0: movq $0,(%[dst])\n"
" addq $8,%[dst]\n"
" decl %%ecx ; jnz 0b\n"
@@ -138,7 +139,7 @@ long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
*/
if (size < 8) {
if (!IS_ALIGNED(dest, 4) || size != 4)
- clean_cache_range(dst, 1);
+ clean_cache_range(dst, size);
} else {
if (!IS_ALIGNED(dest, 8)) {
dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
diff --git a/arch/x86/math-emu/wm_sqrt.S b/arch/x86/math-emu/wm_sqrt.S
index f031c0e19356..515cdee90df7 100644
--- a/arch/x86/math-emu/wm_sqrt.S
+++ b/arch/x86/math-emu/wm_sqrt.S
@@ -209,7 +209,7 @@ sqrt_stage_2_finish:
#ifdef PARANOID
/* It should be possible to get here only if the arg is ffff....ffff */
- cmp $0xffffffff,FPU_fsqrt_arg_1
+ cmpl $0xffffffff,FPU_fsqrt_arg_1
jnz sqrt_stage_2_error
#endif /* PARANOID */
diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
index fe7a12599d8e..968d7005f4a7 100644
--- a/arch/x86/mm/ident_map.c
+++ b/arch/x86/mm/ident_map.c
@@ -62,6 +62,7 @@ static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
unsigned long addr, unsigned long end)
{
unsigned long next;
+ int result;
for (; addr < end; addr = next) {
p4d_t *p4d = p4d_page + p4d_index(addr);
@@ -73,13 +74,20 @@ static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
if (p4d_present(*p4d)) {
pud = pud_offset(p4d, 0);
- ident_pud_init(info, pud, addr, next);
+ result = ident_pud_init(info, pud, addr, next);
+ if (result)
+ return result;
+
continue;
}
pud = (pud_t *)info->alloc_pgt_page(info->context);
if (!pud)
return -ENOMEM;
- ident_pud_init(info, pud, addr, next);
+
+ result = ident_pud_init(info, pud, addr, next);
+ if (result)
+ return result;
+
set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag));
}
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index fb5f29c60019..b1dba0987565 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -120,8 +120,6 @@ __ref void *alloc_low_pages(unsigned int num)
} else {
pfn = pgt_buf_end;
pgt_buf_end += num;
- printk(KERN_DEBUG "BRK [%#010lx, %#010lx] PGTABLE\n",
- pfn << PAGE_SHIFT, (pgt_buf_end << PAGE_SHIFT) - 1);
}
for (i = 0; i < num; i++) {
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 006f373f54ab..c0499c38962b 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -228,7 +228,7 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
if (pgprot_val(old_prot) == pgprot_val(new_prot))
return;
- pa = pfn << page_level_shift(level);
+ pa = pfn << PAGE_SHIFT;
size = page_level_size(level);
/*
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index c9faf34cbb62..1f25201de0af 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -47,8 +47,8 @@
#define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
#define PMD_FLAGS_DEC PMD_FLAGS_LARGE
-#define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
- (_PAGE_PAT | _PAGE_PWT))
+#define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_LARGE_CACHE_MASK) | \
+ (_PAGE_PAT_LARGE | _PAGE_PWT))
#define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC)
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index 2c1ecf4763c4..e32b003e064a 100644
--- a/arch/x86/mm/mmio-mod.c
+++ b/arch/x86/mm/mmio-mod.c
@@ -384,7 +384,7 @@ static void enter_uniprocessor(void)
int cpu;
int err;
- if (downed_cpus == NULL &&
+ if (!cpumask_available(downed_cpus) &&
!alloc_cpumask_var(&downed_cpus, GFP_KERNEL)) {
pr_notice("Failed to allocate mask\n");
goto out;
@@ -414,7 +414,7 @@ static void leave_uniprocessor(void)
int cpu;
int err;
- if (downed_cpus == NULL || cpumask_weight(downed_cpus) == 0)
+ if (!cpumask_available(downed_cpus) || cpumask_weight(downed_cpus) == 0)
return;
pr_notice("Re-enabling CPUs...\n");
for_each_cpu(cpu, downed_cpus) {
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index d71d72cf6c66..4686757a74d7 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -322,7 +322,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
u64 addr, u64 max_addr, u64 size)
{
return split_nodes_size_interleave_uniform(ei, pi, addr, max_addr, size,
- 0, NULL, NUMA_NO_NODE);
+ 0, NULL, 0);
}
int __init setup_emu2phys_nid(int *dfl_phys_nid)
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index a3c9ea29d7cc..324e26d0607b 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -1131,12 +1131,14 @@ static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
+ kfree(v);
++*pos;
return memtype_get_idx(*pos);
}
static void memtype_seq_stop(struct seq_file *seq, void *v)
{
+ kfree(v);
}
static int memtype_seq_show(struct seq_file *seq, void *v)
@@ -1145,7 +1147,6 @@ static int memtype_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
print_entry->start, print_entry->end);
- kfree(print_entry);
return 0;
}
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index bf52106ab9c4..c0e9c00402ac 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -838,6 +838,8 @@ int pud_free_pmd_page(pud_t *pud, unsigned long addr)
}
free_page((unsigned long)pmd_sv);
+
+ pgtable_pmd_page_dtor(virt_to_page(pmd));
free_page((unsigned long)pmd);
return 1;
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index b72296bd04a2..2f41a34c8f57 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -321,8 +321,14 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
/*
* The membarrier system call requires a full memory barrier and
* core serialization before returning to user-space, after
- * storing to rq->curr. Writing to CR3 provides that full
- * memory barrier and core serializing instruction.
+ * storing to rq->curr, when changing mm. This is because
+ * membarrier() sends IPIs to all CPUs that are in the target mm
+ * to make them issue memory barriers. However, if another CPU
+ * switches to/from the target mm concurrently with
+ * membarrier(), it can cause that CPU not to receive an IPI
+ * when it really should issue a memory barrier. Writing to CR3
+ * provides that full memory barrier and core serializing
+ * instruction.
*/
if (real_prev == next) {
VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index a32fc3d99407..924ca27a6139 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -142,6 +142,19 @@ static bool is_ereg(u32 reg)
BIT(BPF_REG_AX));
}
+/*
+ * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
+ * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
+ * of encoding. al,cl,dl,bl have simpler encoding.
+ */
+static bool is_ereg_8l(u32 reg)
+{
+ return is_ereg(reg) ||
+ (1 << reg) & (BIT(BPF_REG_1) |
+ BIT(BPF_REG_2) |
+ BIT(BPF_REG_FP));
+}
+
static bool is_axreg(u32 reg)
{
return reg == BPF_REG_0;
@@ -751,9 +764,8 @@ st: if (is_imm8(insn->off))
/* STX: *(u8*)(dst_reg + off) = src_reg */
case BPF_STX | BPF_MEM | BPF_B:
/* Emit 'mov byte ptr [rax + off], al' */
- if (is_ereg(dst_reg) || is_ereg(src_reg) ||
- /* We have to add extra byte for x86 SIL, DIL regs */
- src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
+ if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
+ /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
else
EMIT1(0x88);
@@ -1007,7 +1019,16 @@ emit_jmp:
}
if (image) {
- if (unlikely(proglen + ilen > oldproglen)) {
+ /*
+ * When populating the image, assert that:
+ *
+ * i) We do not write beyond the allocated space, and
+ * ii) addrs[i] did not change from the prior run, in order
+ * to validate assumptions made for computing branch
+ * displacements.
+ */
+ if (unlikely(proglen + ilen > oldproglen ||
+ proglen + ilen != addrs[i])) {
pr_err("bpf_jit: fatal error\n");
return -EFAULT;
}
diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c
index 24d573bc550d..adee990abab1 100644
--- a/arch/x86/net/bpf_jit_comp32.c
+++ b/arch/x86/net/bpf_jit_comp32.c
@@ -1830,7 +1830,9 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
STACK_VAR(dst_hi));
EMIT(0x0, 4);
} else {
- EMIT3(0xC7, add_1reg(0xC0, dst_hi), 0);
+ /* xor dst_hi,dst_hi */
+ EMIT2(0x33,
+ add_2reg(0xC0, dst_hi, dst_hi));
}
break;
case BPF_DW:
@@ -1968,8 +1970,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
goto emit_cond_jmp_signed;
}
case BPF_JMP | BPF_JSET | BPF_X: {
- u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
- u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
+ u8 dreg_lo = IA32_EAX;
+ u8 dreg_hi = IA32_EDX;
u8 sreg_lo = sstk ? IA32_ECX : src_lo;
u8 sreg_hi = sstk ? IA32_EBX : src_hi;
@@ -1978,6 +1980,12 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
STACK_VAR(dst_lo));
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
STACK_VAR(dst_hi));
+ } else {
+ /* mov dreg_lo,dst_lo */
+ EMIT2(0x89, add_2reg(0xC0, dreg_lo, dst_lo));
+ /* mov dreg_hi,dst_hi */
+ EMIT2(0x89,
+ add_2reg(0xC0, dreg_hi, dst_hi));
}
if (sstk) {
@@ -1996,8 +2004,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
}
case BPF_JMP | BPF_JSET | BPF_K: {
u32 hi;
- u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
- u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
+ u8 dreg_lo = IA32_EAX;
+ u8 dreg_hi = IA32_EDX;
u8 sreg_lo = IA32_ECX;
u8 sreg_hi = IA32_EBX;
@@ -2006,6 +2014,12 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
STACK_VAR(dst_lo));
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
STACK_VAR(dst_hi));
+ } else {
+ /* mov dreg_lo,dst_lo */
+ EMIT2(0x89, add_2reg(0xC0, dreg_lo, dst_lo));
+ /* mov dreg_hi,dst_hi */
+ EMIT2(0x89,
+ add_2reg(0xC0, dreg_hi, dst_hi));
}
hi = imm32 & (1<<31) ? (u32)~0 : 0;
@@ -2187,7 +2201,16 @@ notyet:
}
if (image) {
- if (unlikely(proglen + ilen > oldproglen)) {
+ /*
+ * When populating the image, assert that:
+ *
+ * i) We do not write beyond the allocated space, and
+ * ii) addrs[i] did not change from the prior run, in order
+ * to validate assumptions made for computing branch
+ * displacements.
+ */
+ if (unlikely(proglen + ilen > oldproglen ||
+ proglen + ilen != addrs[i])) {
pr_err("bpf_jit: fatal error\n");
return -EFAULT;
}
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index e723559c386a..0c67a5a94de3 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -572,6 +572,10 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ec, pci_invalid_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ed, pci_invalid_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26c, pci_invalid_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26d, pci_invalid_bar);
/*
* Device [1022:7808]
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index 43867bc85368..eea5a0f3b959 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -33,6 +33,7 @@
#include <asm/hw_irq.h>
#include <asm/io_apic.h>
#include <asm/intel-mid.h>
+#include <asm/acpi.h>
#define PCIE_CAP_OFFSET 0x100
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 9112d1cb397b..22da9bfd8a45 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -25,6 +25,7 @@
#include <asm/xen/pci.h>
#include <asm/xen/cpuid.h>
#include <asm/apic.h>
+#include <asm/acpi.h>
#include <asm/i8259.h>
static int xen_pcifront_enable_irq(struct pci_dev *dev)
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 2a9a703ef4a0..77d05b56089a 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -214,28 +214,30 @@ int __init efi_alloc_page_tables(void)
gfp_mask = GFP_KERNEL | __GFP_ZERO;
efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
if (!efi_pgd)
- return -ENOMEM;
+ goto fail;
pgd = efi_pgd + pgd_index(EFI_VA_END);
p4d = p4d_alloc(&init_mm, pgd, EFI_VA_END);
- if (!p4d) {
- free_page((unsigned long)efi_pgd);
- return -ENOMEM;
- }
+ if (!p4d)
+ goto free_pgd;
pud = pud_alloc(&init_mm, p4d, EFI_VA_END);
- if (!pud) {
- if (pgtable_l5_enabled())
- free_page((unsigned long) pgd_page_vaddr(*pgd));
- free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER);
- return -ENOMEM;
- }
+ if (!pud)
+ goto free_p4d;
efi_mm.pgd = efi_pgd;
mm_init_cpumask(&efi_mm);
init_new_context(NULL, &efi_mm);
return 0;
+
+free_p4d:
+ if (pgtable_l5_enabled())
+ free_page((unsigned long)pgd_page_vaddr(*pgd));
+free_pgd:
+ free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER);
+fail:
+ return -ENOMEM;
}
/*
@@ -833,7 +835,7 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
phys_vendor = virt_to_phys_or_null(vnd);
phys_data = virt_to_phys_or_null_size(data, data_size);
- if (!phys_name || !phys_data)
+ if (!phys_name || (data && !phys_data))
status = EFI_INVALID_PARAMETER;
else
status = efi_thunk(set_variable, phys_name, phys_vendor,
@@ -864,7 +866,7 @@ efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
phys_vendor = virt_to_phys_or_null(vnd);
phys_data = virt_to_phys_or_null_size(data, data_size);
- if (!phys_name || !phys_data)
+ if (!phys_name || (data && !phys_data))
status = EFI_INVALID_PARAMETER;
else
status = efi_thunk(set_variable, phys_name, phys_vendor,
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
index fc13cbbb2dce..abb6075397f0 100644
--- a/arch/x86/platform/uv/uv_irq.c
+++ b/arch/x86/platform/uv/uv_irq.c
@@ -167,9 +167,10 @@ static struct irq_domain *uv_get_irq_domain(void)
goto out;
uv_domain = irq_domain_create_tree(fn, &uv_domain_ops, NULL);
- irq_domain_free_fwnode(fn);
if (uv_domain)
uv_domain->parent = x86_vector_domain;
+ else
+ irq_domain_free_fwnode(fn);
out:
mutex_unlock(&uv_lock);
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index b81b5172cf99..2cfa0caef133 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -15,7 +15,10 @@ $(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE
LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib
targets += purgatory.ro
+# Sanitizer, etc. runtimes are unavailable and cannot be linked here.
+GCOV_PROFILE := n
KASAN_SANITIZE := n
+UBSAN_SANITIZE := n
KCOV_INSTRUMENT := n
# These are adjustments to the compiler flags used for objects that
@@ -23,7 +26,7 @@ KCOV_INSTRUMENT := n
PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss
-PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN)
+PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING
# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
# in turn leaves some undefined symbols like __fentry__ in purgatory and not
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index 3a6c8ebc8032..aa046d46ff8f 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -841,9 +841,11 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
case R_386_PC32:
case R_386_PC16:
case R_386_PC8:
+ case R_386_PLT32:
/*
- * NONE can be ignored and PC relative relocations don't
- * need to be adjusted.
+ * NONE can be ignored and PC relative relocations don't need
+ * to be adjusted. Because sym must be defined, R_386_PLT32 can
+ * be treated the same way as R_386_PC32.
*/
break;
@@ -884,9 +886,11 @@ static int do_reloc_real(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
case R_386_PC32:
case R_386_PC16:
case R_386_PC8:
+ case R_386_PLT32:
/*
- * NONE can be ignored and PC relative relocations don't
- * need to be adjusted.
+ * NONE can be ignored and PC relative relocations don't need
+ * to be adjusted. Because sym must be defined, R_386_PLT32 can
+ * be treated the same way as R_386_PC32.
*/
break;
diff --git a/arch/x86/xen/efi.c b/arch/x86/xen/efi.c
index 66bcdeeee639..90d2e4ce7064 100644
--- a/arch/x86/xen/efi.c
+++ b/arch/x86/xen/efi.c
@@ -172,7 +172,7 @@ static enum efi_secureboot_mode xen_efi_get_secureboot(void)
return efi_secureboot_mode_unknown;
}
-void __init xen_efi_init(void)
+void __init xen_efi_init(struct boot_params *boot_params)
{
efi_system_table_t *efi_systab_xen;
@@ -181,12 +181,12 @@ void __init xen_efi_init(void)
if (efi_systab_xen == NULL)
return;
- strncpy((char *)&boot_params.efi_info.efi_loader_signature, "Xen",
- sizeof(boot_params.efi_info.efi_loader_signature));
- boot_params.efi_info.efi_systab = (__u32)__pa(efi_systab_xen);
- boot_params.efi_info.efi_systab_hi = (__u32)(__pa(efi_systab_xen) >> 32);
+ strncpy((char *)&boot_params->efi_info.efi_loader_signature, "Xen",
+ sizeof(boot_params->efi_info.efi_loader_signature));
+ boot_params->efi_info.efi_systab = (__u32)__pa(efi_systab_xen);
+ boot_params->efi_info.efi_systab_hi = (__u32)(__pa(efi_systab_xen) >> 32);
- boot_params.secure_boot = xen_efi_get_secureboot();
+ boot_params->secure_boot = xen_efi_get_secureboot();
set_bit(EFI_BOOT, &efi.flags);
set_bit(EFI_PARAVIRT, &efi.flags);
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 76864ea59160..1c3e9185934c 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -1383,6 +1383,15 @@ asmlinkage __visible void __init xen_start_kernel(void)
x86_init.mpparse.get_smp_config = x86_init_uint_noop;
xen_boot_params_init_edd();
+
+#ifdef CONFIG_ACPI
+ /*
+ * Disable selecting "Firmware First mode" for correctable
+ * memory errors, as this is the duty of the hypervisor to
+ * decide.
+ */
+ acpi_disable_cmcff = 1;
+#endif
}
if (!boot_params.screen_info.orig_video_isVGA)
@@ -1400,7 +1409,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
/* We need this for printk timestamps */
xen_setup_runstate_info(0);
- xen_efi_init();
+ xen_efi_init(&boot_params);
/* Start the world */
#ifdef CONFIG_X86_32
diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
index dab07827d25e..f04d22bcf08d 100644
--- a/arch/x86/xen/enlighten_pvh.c
+++ b/arch/x86/xen/enlighten_pvh.c
@@ -14,6 +14,8 @@
#include <xen/interface/memory.h>
#include <xen/interface/hvm/start_info.h>
+#include "xen-ops.h"
+
/*
* PVH variables.
*
@@ -79,6 +81,8 @@ static void __init init_pvh_bootparams(void)
pvh_bootparams.hdr.type_of_loader = (9 << 4) | 0; /* Xen loader */
x86_init.acpi.get_root_pointer = pvh_get_root_pointer;
+
+ xen_efi_init(&pvh_bootparams);
}
/*
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 159a897151d6..82577eec6d0a 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -706,9 +706,12 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
for (i = 0; i < count; i++) {
unsigned long mfn, pfn;
+ struct gnttab_unmap_grant_ref unmap[2];
+ int rc;
/* Do not add to override if the map failed. */
- if (map_ops[i].status)
+ if (map_ops[i].status != GNTST_okay ||
+ (kmap_ops && kmap_ops[i].status != GNTST_okay))
continue;
if (map_ops[i].flags & GNTMAP_contains_pte) {
@@ -722,10 +725,46 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned");
- if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
- ret = -ENOMEM;
- goto out;
+ if (likely(set_phys_to_machine(pfn, FOREIGN_FRAME(mfn))))
+ continue;
+
+ /*
+ * Signal an error for this slot. This in turn requires
+ * immediate unmapping.
+ */
+ map_ops[i].status = GNTST_general_error;
+ unmap[0].host_addr = map_ops[i].host_addr,
+ unmap[0].handle = map_ops[i].handle;
+ map_ops[i].handle = ~0;
+ if (map_ops[i].flags & GNTMAP_device_map)
+ unmap[0].dev_bus_addr = map_ops[i].dev_bus_addr;
+ else
+ unmap[0].dev_bus_addr = 0;
+
+ if (kmap_ops) {
+ kmap_ops[i].status = GNTST_general_error;
+ unmap[1].host_addr = kmap_ops[i].host_addr,
+ unmap[1].handle = kmap_ops[i].handle;
+ kmap_ops[i].handle = ~0;
+ if (kmap_ops[i].flags & GNTMAP_device_map)
+ unmap[1].dev_bus_addr = kmap_ops[i].dev_bus_addr;
+ else
+ unmap[1].dev_bus_addr = 0;
}
+
+ /*
+ * Pre-populate both status fields, to be recognizable in
+ * the log message below.
+ */
+ unmap[0].status = 1;
+ unmap[1].status = 1;
+
+ rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
+ unmap, 1 + !!kmap_ops);
+ if (rc || unmap[0].status != GNTST_okay ||
+ unmap[1].status != GNTST_okay)
+ pr_err_once("gnttab unmap failed: rc=%d st0=%d st1=%d\n",
+ rc, unmap[0].status, unmap[1].status);
}
out:
@@ -746,17 +785,15 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
unsigned long pfn = page_to_pfn(pages[i]);
- if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
+ if (mfn != INVALID_P2M_ENTRY && (mfn & FOREIGN_FRAME_BIT))
+ set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
+ else
ret = -EINVAL;
- goto out;
- }
-
- set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}
if (kunmap_ops)
ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
- kunmap_ops, count);
-out:
+ kunmap_ops, count) ?: ret;
+
return ret;
}
EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index e3b18ad49889..41fd4c123165 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -89,6 +89,7 @@ asmlinkage __visible void cpu_bringup_and_idle(void)
{
cpu_bringup();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+ prevent_tail_call_optimization();
}
void xen_smp_intr_free_pv(unsigned int cpu)
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 717b4847b473..6fffb86a32ad 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -101,10 +101,20 @@ void xen_init_lock_cpu(int cpu)
void xen_uninit_lock_cpu(int cpu)
{
+ int irq;
+
if (!xen_pvspin)
return;
- unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
+ /*
+ * When booting the kernel with 'mitigations=auto,nosmt', the secondary
+ * CPUs are not activated, and lock_kicker_irq is not initialized.
+ */
+ irq = per_cpu(lock_kicker_irq, cpu);
+ if (irq == -1)
+ return;
+
+ unbind_from_irqhandler(irq, NULL);
per_cpu(lock_kicker_irq, cpu) = -1;
kfree(per_cpu(irq_name, cpu));
per_cpu(irq_name, cpu) = NULL;
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 0e60bd918695..2f111f47ba98 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -122,9 +122,9 @@ static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
void __init xen_init_apic(void);
#ifdef CONFIG_XEN_EFI
-extern void xen_efi_init(void);
+extern void xen_efi_init(struct boot_params *boot_params);
#else
-static inline void __init xen_efi_init(void)
+static inline void __init xen_efi_init(struct boot_params *boot_params)
{
}
#endif
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index f1158b4c629c..da4effe27007 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -291,7 +291,7 @@ strncpy_from_user(char *dst, const char *src, long count)
return -EFAULT;
}
#else
-long strncpy_from_user(char *dst, const char *src, long count);
+long strncpy_from_user(char *dst, const char __user *src, long count);
#endif
/*
diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c
index ff1d81385ed7..768e1f7ab871 100644
--- a/arch/xtensa/kernel/perf_event.c
+++ b/arch/xtensa/kernel/perf_event.c
@@ -404,7 +404,7 @@ static struct pmu xtensa_pmu = {
.read = xtensa_pmu_read,
};
-static int xtensa_pmu_setup(int cpu)
+static int xtensa_pmu_setup(unsigned int cpu)
{
unsigned i;
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 15580e4fc766..6a0167ac803c 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -720,7 +720,8 @@ c_start(struct seq_file *f, loff_t *pos)
static void *
c_next(struct seq_file *f, void *v, loff_t *pos)
{
- return NULL;
+ ++*pos;
+ return c_start(f, pos);
}
static void
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
index 4092555828b1..24cf6972eace 100644
--- a/arch/xtensa/kernel/xtensa_ksyms.c
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -87,13 +87,13 @@ void __xtensa_libgcc_window_spill(void)
}
EXPORT_SYMBOL(__xtensa_libgcc_window_spill);
-unsigned long __sync_fetch_and_and_4(unsigned long *p, unsigned long v)
+unsigned int __sync_fetch_and_and_4(volatile void *p, unsigned int v)
{
BUG();
}
EXPORT_SYMBOL(__sync_fetch_and_and_4);
-unsigned long __sync_fetch_and_or_4(unsigned long *p, unsigned long v)
+unsigned int __sync_fetch_and_or_4(volatile void *p, unsigned int v)
{
BUG();
}
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
index 9220dcde7520..d1ebe67c68d4 100644
--- a/arch/xtensa/mm/cache.c
+++ b/arch/xtensa/mm/cache.c
@@ -71,8 +71,10 @@ static inline void kmap_invalidate_coherent(struct page *page,
kvaddr = TLBTEMP_BASE_1 +
(page_to_phys(page) & DCACHE_ALIAS_MASK);
+ preempt_disable();
__invalidate_dcache_page_alias(kvaddr,
page_to_phys(page));
+ preempt_enable();
}
}
}
@@ -157,6 +159,7 @@ void flush_dcache_page(struct page *page)
if (!alias && !mapping)
return;
+ preempt_disable();
virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(virt, phys);
@@ -167,6 +170,7 @@ void flush_dcache_page(struct page *page)
if (mapping)
__invalidate_icache_page_alias(virt, phys);
+ preempt_enable();
}
/* There shouldn't be an entry in the cache for this page anymore. */
@@ -200,8 +204,10 @@ void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
unsigned long phys = page_to_phys(pfn_to_page(pfn));
unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
+ preempt_disable();
__flush_invalidate_dcache_page_alias(virt, phys);
__invalidate_icache_page_alias(virt, phys);
+ preempt_enable();
}
EXPORT_SYMBOL(local_flush_cache_page);
@@ -228,11 +234,13 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
unsigned long phys = page_to_phys(page);
unsigned long tmp;
+ preempt_disable();
tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(tmp, phys);
tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(tmp, phys);
__invalidate_icache_page_alias(tmp, phys);
+ preempt_enable();
clear_bit(PG_arch_1, &page->flags);
}
@@ -266,7 +274,9 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
if (alias) {
unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
+ preempt_disable();
__flush_invalidate_dcache_page_alias(t, phys);
+ preempt_enable();
}
/* Copy data */
@@ -281,9 +291,11 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
if (alias) {
unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
+ preempt_disable();
__flush_invalidate_dcache_range((unsigned long) dst, len);
if ((vma->vm_flags & VM_EXEC) != 0)
__invalidate_icache_page_alias(t, phys);
+ preempt_enable();
} else if ((vma->vm_flags & VM_EXEC) != 0) {
__flush_dcache_range((unsigned long)dst,len);
@@ -305,7 +317,9 @@ extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
if (alias) {
unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
+ preempt_disable();
__flush_invalidate_dcache_page_alias(t, phys);
+ preempt_enable();
}
memcpy(dst, src, len);
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 66b1ebc21ce4..d984592b0995 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -2478,6 +2478,7 @@ static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
}
bfqd->in_service_queue = bfqq;
+ bfqd->in_serv_last_pos = 0;
}
/*
@@ -5156,20 +5157,28 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
return bfqq;
}
-static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
+static void
+bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq)
{
- struct bfq_data *bfqd = bfqq->bfqd;
enum bfqq_expiration reason;
unsigned long flags;
spin_lock_irqsave(&bfqd->lock, flags);
- bfq_clear_bfqq_wait_request(bfqq);
+ /*
+ * Considering that bfqq may be in race, we should firstly check
+ * whether bfqq is in service before doing something on it. If
+ * the bfqq in race is not in service, it has already been expired
+ * through __bfq_bfqq_expire func and its wait_request flags has
+ * been cleared in __bfq_bfqd_reset_in_service func.
+ */
if (bfqq != bfqd->in_service_queue) {
spin_unlock_irqrestore(&bfqd->lock, flags);
return;
}
+ bfq_clear_bfqq_wait_request(bfqq);
+
if (bfq_bfqq_budget_timeout(bfqq))
/*
* Also here the queue can be safely expired
@@ -5214,7 +5223,7 @@ static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
* early.
*/
if (bfqq)
- bfq_idle_slice_timer_body(bfqq);
+ bfq_idle_slice_timer_body(bfqd, bfqq);
return HRTIMER_NORESTART;
}
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 5bd90cd4b51e..0b96220d0efd 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -38,6 +38,18 @@ void blk_flush_integrity(void)
flush_workqueue(kintegrityd_wq);
}
+void __bio_integrity_free(struct bio_set *bs, struct bio_integrity_payload *bip)
+{
+ if (bs && mempool_initialized(&bs->bio_integrity_pool)) {
+ if (bip->bip_vec)
+ bvec_free(&bs->bvec_integrity_pool, bip->bip_vec,
+ bip->bip_slab);
+ mempool_free(bip, &bs->bio_integrity_pool);
+ } else {
+ kfree(bip);
+ }
+}
+
/**
* bio_integrity_alloc - Allocate integrity payload and attach it to bio
* @bio: bio to attach integrity metadata to
@@ -90,7 +102,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
return bip;
err:
- mempool_free(bip, &bs->bio_integrity_pool);
+ __bio_integrity_free(bs, bip);
return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL(bio_integrity_alloc);
@@ -111,14 +123,7 @@ static void bio_integrity_free(struct bio *bio)
kfree(page_address(bip->bip_vec->bv_page) +
bip->bip_vec->bv_offset);
- if (bs && mempool_initialized(&bs->bio_integrity_pool)) {
- bvec_free(&bs->bvec_integrity_pool, bip->bip_vec, bip->bip_slab);
-
- mempool_free(bip, &bs->bio_integrity_pool);
- } else {
- kfree(bip);
- }
-
+ __bio_integrity_free(bs, bip);
bio->bi_integrity = NULL;
bio->bi_opf &= ~REQ_INTEGRITY;
}
@@ -293,7 +298,6 @@ bool bio_integrity_prep(struct bio *bio)
if (ret == 0) {
printk(KERN_ERR "could not attach integrity payload\n");
- kfree(buf);
status = BLK_STS_RESOURCE;
goto err_end_io;
}
diff --git a/block/bio.c b/block/bio.c
index 3d757055305f..fe749404ef93 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -314,7 +314,7 @@ static struct bio *__bio_chain_endio(struct bio *bio)
{
struct bio *parent = bio->bi_private;
- if (!parent->bi_status)
+ if (bio->bi_status && !parent->bi_status)
parent->bi_status = bio->bi_status;
bio_put(bio);
return parent;
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index a06547fe6f6b..85bd46e0a745 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -876,13 +876,20 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
goto fail;
}
+ if (radix_tree_preload(GFP_KERNEL)) {
+ blkg_free(new_blkg);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
rcu_read_lock();
spin_lock_irq(q->queue_lock);
blkg = blkg_lookup_check(pos, pol, q);
if (IS_ERR(blkg)) {
ret = PTR_ERR(blkg);
- goto fail_unlock;
+ blkg_free(new_blkg);
+ goto fail_preloaded;
}
if (blkg) {
@@ -891,10 +898,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
blkg = blkg_create(pos, q, new_blkg);
if (unlikely(IS_ERR(blkg))) {
ret = PTR_ERR(blkg);
- goto fail_unlock;
+ goto fail_preloaded;
}
}
+ radix_tree_preload_end();
+
if (pos == blkcg)
goto success;
}
@@ -904,6 +913,8 @@ success:
ctx->body = body;
return 0;
+fail_preloaded:
+ radix_tree_preload_end();
fail_unlock:
spin_unlock_irq(q->queue_lock);
rcu_read_unlock();
diff --git a/block/blk-core.c b/block/blk-core.c
index ea33d6abdcfc..80f3e729fdd4 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1036,6 +1036,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
q->backing_dev_info->ra_pages =
(VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
+ q->backing_dev_info->io_pages =
+ (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
q->backing_dev_info->name = "block";
q->node = node_id;
@@ -1057,6 +1059,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
mutex_init(&q->blk_trace_mutex);
#endif
mutex_init(&q->sysfs_lock);
+ mutex_init(&q->sysfs_dir_lock);
spin_lock_init(&q->__queue_lock);
if (!q->mq_ops)
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 01580f88fcb3..4c810969c3e2 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -87,6 +87,7 @@ static void ioc_destroy_icq(struct io_cq *icq)
* making it impossible to determine icq_cache. Record it in @icq.
*/
icq->__rcu_icq_cache = et->icq_cache;
+ icq->flags |= ICQ_DESTROYED;
call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
}
@@ -230,15 +231,21 @@ static void __ioc_clear_queue(struct list_head *icq_list)
{
unsigned long flags;
+ rcu_read_lock();
while (!list_empty(icq_list)) {
struct io_cq *icq = list_entry(icq_list->next,
struct io_cq, q_node);
struct io_context *ioc = icq->ioc;
spin_lock_irqsave(&ioc->lock, flags);
+ if (icq->flags & ICQ_DESTROYED) {
+ spin_unlock_irqrestore(&ioc->lock, flags);
+ continue;
+ }
ioc_destroy_icq(icq);
spin_unlock_irqrestore(&ioc->lock, flags);
}
+ rcu_read_unlock();
}
/**
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index da1de190a3b1..d89a757cbde0 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -69,6 +69,15 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
return;
clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+ /*
+ * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch)
+ * in blk_mq_run_hw_queue(). Its pair is the barrier in
+ * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART,
+ * meantime new request added to hctx->dispatch is missed to check in
+ * blk_mq_run_hw_queue().
+ */
+ smp_mb();
+
blk_mq_run_hw_queue(hctx, true);
}
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 5006a0d00990..5e4b7ed1e897 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -264,7 +264,7 @@ void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
struct blk_mq_hw_ctx *hctx;
int i;
- lockdep_assert_held(&q->sysfs_lock);
+ lockdep_assert_held(&q->sysfs_dir_lock);
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_unregister_hctx(hctx);
@@ -312,7 +312,7 @@ int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
int ret, i;
WARN_ON_ONCE(!q->kobj.parent);
- lockdep_assert_held(&q->sysfs_lock);
+ lockdep_assert_held(&q->sysfs_dir_lock);
ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
if (ret < 0)
@@ -358,7 +358,7 @@ void blk_mq_sysfs_unregister(struct request_queue *q)
struct blk_mq_hw_ctx *hctx;
int i;
- mutex_lock(&q->sysfs_lock);
+ mutex_lock(&q->sysfs_dir_lock);
if (!q->mq_sysfs_init_done)
goto unlock;
@@ -366,7 +366,7 @@ void blk_mq_sysfs_unregister(struct request_queue *q)
blk_mq_unregister_hctx(hctx);
unlock:
- mutex_unlock(&q->sysfs_lock);
+ mutex_unlock(&q->sysfs_dir_lock);
}
int blk_mq_sysfs_register(struct request_queue *q)
@@ -374,7 +374,7 @@ int blk_mq_sysfs_register(struct request_queue *q)
struct blk_mq_hw_ctx *hctx;
int i, ret = 0;
- mutex_lock(&q->sysfs_lock);
+ mutex_lock(&q->sysfs_dir_lock);
if (!q->mq_sysfs_init_done)
goto unlock;
@@ -385,7 +385,7 @@ int blk_mq_sysfs_register(struct request_queue *q)
}
unlock:
- mutex_unlock(&q->sysfs_lock);
+ mutex_unlock(&q->sysfs_dir_lock);
return ret;
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 684acaa96db7..ae70b4809bec 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1118,6 +1118,23 @@ static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
#define BLK_MQ_RESOURCE_DELAY 3 /* ms units */
+static void blk_mq_handle_dev_resource(struct request *rq,
+ struct list_head *list)
+{
+ struct request *next =
+ list_first_entry_or_null(list, struct request, queuelist);
+
+ /*
+ * If an I/O scheduler has been configured and we got a driver tag for
+ * the next request already, free it.
+ */
+ if (next)
+ blk_mq_put_driver_tag(next);
+
+ list_add(&rq->queuelist, list);
+ __blk_mq_requeue_request(rq);
+}
+
/*
* Returns true if we did some work AND can potentially do more.
*/
@@ -1185,17 +1202,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
ret = q->mq_ops->queue_rq(hctx, &bd);
if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
- /*
- * If an I/O scheduler has been configured and we got a
- * driver tag for the next request already, free it
- * again.
- */
- if (!list_empty(list)) {
- nxt = list_first_entry(list, struct request, queuelist);
- blk_mq_put_driver_tag(nxt);
- }
- list_add(&rq->queuelist, list);
- __blk_mq_requeue_request(rq);
+ blk_mq_handle_dev_resource(rq, list);
break;
}
@@ -1222,6 +1229,15 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
spin_unlock(&hctx->lock);
/*
+ * Order adding requests to hctx->dispatch and checking
+ * SCHED_RESTART flag. The pair of this smp_mb() is the one
+ * in blk_mq_sched_restart(). Avoid restart code path to
+ * miss the new added requests to hctx->dispatch, meantime
+ * SCHED_RESTART is observed here.
+ */
+ smp_mb();
+
+ /*
* If SCHED_RESTART was set by the caller of this function and
* it is no longer set that means that it was cleared by another
* thread and hence that a queue rerun is needed.
@@ -2308,11 +2324,6 @@ static void blk_mq_map_swqueue(struct request_queue *q)
struct blk_mq_ctx *ctx;
struct blk_mq_tag_set *set = q->tag_set;
- /*
- * Avoid others reading imcomplete hctx->cpumask through sysfs
- */
- mutex_lock(&q->sysfs_lock);
-
queue_for_each_hw_ctx(q, hctx, i) {
cpumask_clear(hctx->cpumask);
hctx->nr_ctx = 0;
@@ -2346,8 +2357,6 @@ static void blk_mq_map_swqueue(struct request_queue *q)
hctx->ctxs[hctx->nr_ctx++] = ctx;
}
- mutex_unlock(&q->sysfs_lock);
-
queue_for_each_hw_ctx(q, hctx, i) {
/*
* If no software queues are mapped to this hardware queue,
@@ -2664,10 +2673,12 @@ EXPORT_SYMBOL(blk_mq_init_allocated_queue);
/* tags can _not_ be used after returning from blk_mq_exit_queue */
void blk_mq_exit_queue(struct request_queue *q)
{
- struct blk_mq_tag_set *set = q->tag_set;
+ struct blk_mq_tag_set *set = q->tag_set;
- blk_mq_del_queue_tag_set(q);
+ /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
+ /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
+ blk_mq_del_queue_tag_set(q);
}
/* Basically redo blk_mq_init_queue with queue frozen */
diff --git a/block/blk-settings.c b/block/blk-settings.c
index be9b39caadbd..2c01b6f65110 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -513,6 +513,14 @@ void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
}
EXPORT_SYMBOL(blk_queue_io_opt);
+static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
+{
+ sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
+ if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
+ sectors = PAGE_SIZE >> SECTOR_SHIFT;
+ return sectors;
+}
+
/**
* blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
* @t: the stacking driver (top)
@@ -639,6 +647,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
ret = -1;
}
+ t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
+ t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
+ t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
+
/* Discard alignment and granularity */
if (b->discard_granularity) {
alignment = queue_limit_discard_alignment(b, start);
@@ -717,6 +729,9 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
top, bottom);
}
+
+ t->backing_dev_info->io_pages =
+ t->limits.max_sectors >> (PAGE_SHIFT - 9);
}
EXPORT_SYMBOL(disk_stack_limits);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 8286640d4d66..07494deb1a26 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -892,14 +892,14 @@ int blk_register_queue(struct gendisk *disk)
int ret;
struct device *dev = disk_to_dev(disk);
struct request_queue *q = disk->queue;
+ bool has_elevator = false;
if (WARN_ON(!q))
return -ENXIO;
- WARN_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags),
+ WARN_ONCE(blk_queue_registered(q),
"%s is registering an already registered queue\n",
kobject_name(&dev->kobj));
- queue_flag_set_unlocked(QUEUE_FLAG_REGISTERED, q);
/*
* SCSI probing may synchronously create and destroy a lot of
@@ -920,8 +920,7 @@ int blk_register_queue(struct gendisk *disk)
if (ret)
return ret;
- /* Prevent changes through sysfs until registration is completed. */
- mutex_lock(&q->sysfs_lock);
+ mutex_lock(&q->sysfs_dir_lock);
ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
if (ret < 0) {
@@ -934,26 +933,37 @@ int blk_register_queue(struct gendisk *disk)
blk_mq_debugfs_register(q);
}
- kobject_uevent(&q->kobj, KOBJ_ADD);
-
- wbt_enable_default(q);
-
- blk_throtl_register_queue(q);
-
+ mutex_lock(&q->sysfs_lock);
+ /*
+ * The flag of QUEUE_FLAG_REGISTERED isn't set yet, so elevator
+ * switch won't happen at all.
+ */
if (q->request_fn || (q->mq_ops && q->elevator)) {
- ret = elv_register_queue(q);
+ ret = elv_register_queue(q, false);
if (ret) {
mutex_unlock(&q->sysfs_lock);
- kobject_uevent(&q->kobj, KOBJ_REMOVE);
+ mutex_unlock(&q->sysfs_dir_lock);
kobject_del(&q->kobj);
blk_trace_remove_sysfs(dev);
kobject_put(&dev->kobj);
return ret;
}
+ has_elevator = true;
}
+
+ blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
+ wbt_enable_default(q);
+ blk_throtl_register_queue(q);
+
+ /* Now everything is ready and send out KOBJ_ADD uevent */
+ kobject_uevent(&q->kobj, KOBJ_ADD);
+ if (has_elevator)
+ kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
+ mutex_unlock(&q->sysfs_lock);
+
ret = 0;
unlock:
- mutex_unlock(&q->sysfs_lock);
+ mutex_unlock(&q->sysfs_dir_lock);
return ret;
}
EXPORT_SYMBOL_GPL(blk_register_queue);
@@ -973,7 +983,7 @@ void blk_unregister_queue(struct gendisk *disk)
return;
/* Return early if disk->queue was never registered. */
- if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
+ if (!blk_queue_registered(q))
return;
/*
@@ -982,25 +992,30 @@ void blk_unregister_queue(struct gendisk *disk)
* concurrent elv_iosched_store() calls.
*/
mutex_lock(&q->sysfs_lock);
-
blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
+ mutex_unlock(&q->sysfs_lock);
+ mutex_lock(&q->sysfs_dir_lock);
/*
* Remove the sysfs attributes before unregistering the queue data
* structures that can be modified through sysfs.
*/
if (q->mq_ops)
blk_mq_unregister_dev(disk_to_dev(disk), q);
- mutex_unlock(&q->sysfs_lock);
kobject_uevent(&q->kobj, KOBJ_REMOVE);
kobject_del(&q->kobj);
blk_trace_remove_sysfs(disk_to_dev(disk));
mutex_lock(&q->sysfs_lock);
- if (q->request_fn || (q->mq_ops && q->elevator))
+ /*
+ * q->kobj has been removed, so it is safe to check if elevator
+ * exists without holding q->sysfs_lock.
+ */
+ if (q->request_fn || q->elevator)
elv_unregister_queue(q);
mutex_unlock(&q->sysfs_lock);
+ mutex_unlock(&q->sysfs_dir_lock);
kobject_put(&disk_to_dev(disk)->kobj);
}
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index f1de8ba483a9..50f2abfa1a60 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -708,7 +708,7 @@ void wbt_enable_default(struct request_queue *q)
return;
/* Queue not registered? Maybe shutting down... */
- if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
+ if (!blk_queue_registered(q))
return;
if ((q->mq_ops && IS_ENABLED(CONFIG_BLK_WBT_MQ)) ||
diff --git a/block/blk.h b/block/blk.h
index 1a5b67b57e6b..ae87e2a5f2bd 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -244,7 +244,7 @@ int elevator_init_mq(struct request_queue *q);
int elevator_switch_mq(struct request_queue *q,
struct elevator_type *new_e);
void elevator_exit(struct request_queue *, struct elevator_queue *);
-int elv_register_queue(struct request_queue *q);
+int elv_register_queue(struct request_queue *q, bool uevent);
void elv_unregister_queue(struct request_queue *q);
struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);
diff --git a/block/elevator.c b/block/elevator.c
index fae58b2f906f..5b51bc5fad9f 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -833,13 +833,16 @@ static struct kobj_type elv_ktype = {
.release = elevator_release,
};
-int elv_register_queue(struct request_queue *q)
+/*
+ * elv_register_queue is called from either blk_register_queue or
+ * elevator_switch, elevator switch is prevented from being happen
+ * in the two paths, so it is safe to not hold q->sysfs_lock.
+ */
+int elv_register_queue(struct request_queue *q, bool uevent)
{
struct elevator_queue *e = q->elevator;
int error;
- lockdep_assert_held(&q->sysfs_lock);
-
error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
if (!error) {
struct elv_fs_entry *attr = e->type->elevator_attrs;
@@ -850,7 +853,9 @@ int elv_register_queue(struct request_queue *q)
attr++;
}
}
- kobject_uevent(&e->kobj, KOBJ_ADD);
+ if (uevent)
+ kobject_uevent(&e->kobj, KOBJ_ADD);
+
e->registered = 1;
if (!e->uses_mq && e->type->ops.sq.elevator_registered_fn)
e->type->ops.sq.elevator_registered_fn(q);
@@ -858,15 +863,19 @@ int elv_register_queue(struct request_queue *q)
return error;
}
+/*
+ * elv_unregister_queue is called from either blk_unregister_queue or
+ * elevator_switch, elevator switch is prevented from being happen
+ * in the two paths, so it is safe to not hold q->sysfs_lock.
+ */
void elv_unregister_queue(struct request_queue *q)
{
- lockdep_assert_held(&q->sysfs_lock);
-
if (q) {
struct elevator_queue *e = q->elevator;
kobject_uevent(&e->kobj, KOBJ_REMOVE);
kobject_del(&e->kobj);
+
e->registered = 0;
/* Re-enable throttling in case elevator disabled it */
wbt_enable_default(q);
@@ -942,6 +951,7 @@ int elevator_switch_mq(struct request_queue *q,
if (q->elevator) {
if (q->elevator->registered)
elv_unregister_queue(q);
+
ioc_clear_queue(q);
elevator_exit(q, q->elevator);
}
@@ -951,7 +961,7 @@ int elevator_switch_mq(struct request_queue *q,
goto out;
if (new_e) {
- ret = elv_register_queue(q);
+ ret = elv_register_queue(q, true);
if (ret) {
elevator_exit(q, q->elevator);
goto out;
@@ -980,23 +990,19 @@ int elevator_init_mq(struct request_queue *q)
if (q->nr_hw_queues != 1)
return 0;
- /*
- * q->sysfs_lock must be held to provide mutual exclusion between
- * elevator_switch() and here.
- */
- mutex_lock(&q->sysfs_lock);
+ WARN_ON_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags));
+
if (unlikely(q->elevator))
- goto out_unlock;
+ goto out;
e = elevator_get(q, "mq-deadline", false);
if (!e)
- goto out_unlock;
+ goto out;
err = blk_mq_init_sched(q, e);
if (err)
elevator_put(e);
-out_unlock:
- mutex_unlock(&q->sysfs_lock);
+out:
return err;
}
@@ -1051,7 +1057,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
if (err)
goto fail_init;
- err = elv_register_queue(q);
+ err = elv_register_queue(q, true);
if (err)
goto fail_register;
@@ -1071,7 +1077,7 @@ fail_init:
/* switch failed, restore and re-register old elevator */
if (old) {
q->elevator = old;
- elv_register_queue(q);
+ elv_register_queue(q, true);
blk_queue_bypass_end(q);
}
@@ -1087,7 +1093,7 @@ static int __elevator_change(struct request_queue *q, const char *name)
struct elevator_type *e;
/* Make sure queue is not in the middle of being removed */
- if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
+ if (!blk_queue_registered(q))
return -ENOENT;
/*
diff --git a/block/genhd.c b/block/genhd.c
index 2b2a936cf848..27a410d31087 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -208,14 +208,17 @@ struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter)
part = rcu_dereference(ptbl->part[piter->idx]);
if (!part)
continue;
+ get_device(part_to_dev(part));
+ piter->part = part;
if (!part_nr_sects_read(part) &&
!(piter->flags & DISK_PITER_INCL_EMPTY) &&
!(piter->flags & DISK_PITER_INCL_EMPTY_PART0 &&
- piter->idx == 0))
+ piter->idx == 0)) {
+ put_device(part_to_dev(part));
+ piter->part = NULL;
continue;
+ }
- get_device(part_to_dev(part));
- piter->part = part;
piter->idx += inc;
break;
}
@@ -579,7 +582,8 @@ static int exact_lock(dev_t devt, void *data)
return 0;
}
-static void register_disk(struct device *parent, struct gendisk *disk)
+static void register_disk(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups)
{
struct device *ddev = disk_to_dev(disk);
struct block_device *bdev;
@@ -594,6 +598,10 @@ static void register_disk(struct device *parent, struct gendisk *disk)
/* delay uevents, until we scanned partition table */
dev_set_uevent_suppress(ddev, 1);
+ if (groups) {
+ WARN_ON(ddev->groups);
+ ddev->groups = groups;
+ }
if (device_add(ddev))
return;
if (!sysfs_deprecated) {
@@ -615,10 +623,8 @@ static void register_disk(struct device *parent, struct gendisk *disk)
disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj);
disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
- if (disk->flags & GENHD_FL_HIDDEN) {
- dev_set_uevent_suppress(ddev, 0);
+ if (disk->flags & GENHD_FL_HIDDEN)
return;
- }
/* No minors to use for partitions */
if (!disk_part_scan_enabled(disk))
@@ -649,16 +655,19 @@ exit:
kobject_uevent(&part_to_dev(part)->kobj, KOBJ_ADD);
disk_part_iter_exit(&piter);
- err = sysfs_create_link(&ddev->kobj,
- &disk->queue->backing_dev_info->dev->kobj,
- "bdi");
- WARN_ON(err);
+ if (disk->queue->backing_dev_info->dev) {
+ err = sysfs_create_link(&ddev->kobj,
+ &disk->queue->backing_dev_info->dev->kobj,
+ "bdi");
+ WARN_ON(err);
+ }
}
/**
* __device_add_disk - add disk information to kernel list
* @parent: parent device for the disk
* @disk: per-device partitioning information
+ * @groups: Additional per-device sysfs groups
* @register_queue: register the queue if set to true
*
* This function registers the partitioning information in @disk
@@ -667,6 +676,7 @@ exit:
* FIXME: error handling
*/
static void __device_add_disk(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups,
bool register_queue)
{
dev_t devt;
@@ -710,7 +720,7 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
blk_register_region(disk_devt(disk), disk->minors, NULL,
exact_match, exact_lock, disk);
}
- register_disk(parent, disk);
+ register_disk(parent, disk, groups);
if (register_queue)
blk_register_queue(disk);
@@ -724,15 +734,17 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
blk_integrity_add(disk);
}
-void device_add_disk(struct device *parent, struct gendisk *disk)
+void device_add_disk(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups)
+
{
- __device_add_disk(parent, disk, true);
+ __device_add_disk(parent, disk, groups, true);
}
EXPORT_SYMBOL(device_add_disk);
void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk)
{
- __device_add_disk(parent, disk, false);
+ __device_add_disk(parent, disk, NULL, false);
}
EXPORT_SYMBOL(device_add_disk_no_queue_reg);
diff --git a/certs/blacklist.c b/certs/blacklist.c
index 3a507b9e2568..e9f3f81c51f9 100644
--- a/certs/blacklist.c
+++ b/certs/blacklist.c
@@ -157,7 +157,7 @@ static int __init blacklist_init(void)
KEY_USR_VIEW | KEY_USR_READ |
KEY_USR_SEARCH,
KEY_ALLOC_NOT_IN_QUOTA |
- KEY_FLAG_KEEP,
+ KEY_ALLOC_SET_KEEP,
NULL, NULL);
if (IS_ERR(blacklist_keyring))
panic("Can't allocate system blacklist keyring\n");
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 4fc8e6a7abb2..d0276a4ed987 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -133,21 +133,15 @@ EXPORT_SYMBOL_GPL(af_alg_release);
void af_alg_release_parent(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
- unsigned int nokey = ask->nokey_refcnt;
- bool last = nokey && !ask->refcnt;
+ unsigned int nokey = atomic_read(&ask->nokey_refcnt);
sk = ask->parent;
ask = alg_sk(sk);
- local_bh_disable();
- bh_lock_sock(sk);
- ask->nokey_refcnt -= nokey;
- if (!last)
- last = !--ask->refcnt;
- bh_unlock_sock(sk);
- local_bh_enable();
+ if (nokey)
+ atomic_dec(&ask->nokey_refcnt);
- if (last)
+ if (atomic_dec_and_test(&ask->refcnt))
sock_put(sk);
}
EXPORT_SYMBOL_GPL(af_alg_release_parent);
@@ -157,7 +151,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
- struct sockaddr_alg *sa = (void *)uaddr;
+ struct sockaddr_alg_new *sa = (void *)uaddr;
const struct af_alg_type *type;
void *private;
int err;
@@ -165,7 +159,11 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (sock->state == SS_CONNECTED)
return -EINVAL;
- if (addr_len < sizeof(*sa))
+ BUILD_BUG_ON(offsetof(struct sockaddr_alg_new, salg_name) !=
+ offsetof(struct sockaddr_alg, salg_name));
+ BUILD_BUG_ON(offsetof(struct sockaddr_alg, salg_name) != sizeof(*sa));
+
+ if (addr_len < sizeof(*sa) + 1)
return -EINVAL;
/* If caller uses non-allowed flag, return error. */
@@ -173,7 +171,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
return -EINVAL;
sa->salg_type[sizeof(sa->salg_type) - 1] = 0;
- sa->salg_name[sizeof(sa->salg_name) + addr_len - sizeof(*sa) - 1] = 0;
+ sa->salg_name[addr_len - sizeof(*sa) - 1] = 0;
type = alg_get_type(sa->salg_type);
if (IS_ERR(type) && PTR_ERR(type) == -ENOENT) {
@@ -192,7 +190,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
err = -EBUSY;
lock_sock(sk);
- if (ask->refcnt | ask->nokey_refcnt)
+ if (atomic_read(&ask->refcnt))
goto unlock;
swap(ask->type, type);
@@ -241,7 +239,7 @@ static int alg_setsockopt(struct socket *sock, int level, int optname,
int err = -EBUSY;
lock_sock(sk);
- if (ask->refcnt)
+ if (atomic_read(&ask->refcnt) != atomic_read(&ask->nokey_refcnt))
goto unlock;
type = ask->type;
@@ -308,12 +306,14 @@ int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern)
sk2->sk_family = PF_ALG;
- if (nokey || !ask->refcnt++)
+ if (atomic_inc_return_relaxed(&ask->refcnt) == 1)
sock_hold(sk);
- ask->nokey_refcnt += nokey;
+ if (nokey) {
+ atomic_inc(&ask->nokey_refcnt);
+ atomic_set(&alg_sk(sk2)->nokey_refcnt, 1);
+ }
alg_sk(sk2)->parent = sk;
alg_sk(sk2)->type = type;
- alg_sk(sk2)->nokey_refcnt = nokey;
newsock->ops = type->ops;
newsock->state = SS_CONNECTED;
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 5e6df2a087fa..445da4a0618f 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -193,8 +193,6 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval)
if (IS_ERR(thread))
goto err_put_larval;
- wait_for_completion_interruptible(&larval->completion);
-
return NOTIFY_STOP;
err_put_larval:
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index c40a8c7ee8ae..182783801ffa 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -82,7 +82,7 @@ static int crypto_aead_copy_sgl(struct crypto_skcipher *null_tfm,
SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
skcipher_request_set_tfm(skreq, null_tfm);
- skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP,
NULL, NULL);
skcipher_request_set_crypt(skreq, src, dst, len, NULL);
@@ -295,19 +295,20 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
areq->outlen = outlen;
aead_request_set_callback(&areq->cra_u.aead_req,
- CRYPTO_TFM_REQ_MAY_BACKLOG,
+ CRYPTO_TFM_REQ_MAY_SLEEP,
af_alg_async_cb, areq);
err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
crypto_aead_decrypt(&areq->cra_u.aead_req);
/* AIO operation in progress */
- if (err == -EINPROGRESS || err == -EBUSY)
+ if (err == -EINPROGRESS)
return -EIOCBQUEUED;
sock_put(sk);
} else {
/* Synchronous operation */
aead_request_set_callback(&areq->cra_u.aead_req,
+ CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &ctx->wait);
err = crypto_wait_req(ctx->enc ?
@@ -388,7 +389,7 @@ static int aead_check_key(struct socket *sock)
struct alg_sock *ask = alg_sk(sk);
lock_sock(sk);
- if (ask->refcnt)
+ if (!atomic_read(&ask->nokey_refcnt))
goto unlock_child;
psk = ask->parent;
@@ -400,11 +401,8 @@ static int aead_check_key(struct socket *sock)
if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
goto unlock;
- if (!pask->refcnt++)
- sock_hold(psk);
-
- ask->refcnt = 1;
- sock_put(psk);
+ atomic_dec(&pask->nokey_refcnt);
+ atomic_set(&ask->nokey_refcnt, 0);
err = 0;
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index bfcf595fd8f9..42521d0d7bfe 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -306,7 +306,7 @@ static int hash_check_key(struct socket *sock)
struct alg_sock *ask = alg_sk(sk);
lock_sock(sk);
- if (ask->refcnt)
+ if (!atomic_read(&ask->nokey_refcnt))
goto unlock_child;
psk = ask->parent;
@@ -318,11 +318,8 @@ static int hash_check_key(struct socket *sock)
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
goto unlock;
- if (!pask->refcnt++)
- sock_hold(psk);
-
- ask->refcnt = 1;
- sock_put(psk);
+ atomic_dec(&pask->nokey_refcnt);
+ atomic_set(&ask->nokey_refcnt, 0);
err = 0;
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index cfdaab2b7d76..9d2e9783c0d4 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -78,14 +78,10 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
return PTR_ERR(areq);
/* convert iovecs of output buffers into RX SGL */
- err = af_alg_get_rsgl(sk, msg, flags, areq, -1, &len);
+ err = af_alg_get_rsgl(sk, msg, flags, areq, ctx->used, &len);
if (err)
goto free;
- /* Process only as much RX buffers for which we have TX data */
- if (len > ctx->used)
- len = ctx->used;
-
/*
* If more buffers are to be expected to be processed, process only
* full block size buffers.
@@ -131,7 +127,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
/* AIO operation in progress */
- if (err == -EINPROGRESS || err == -EBUSY)
+ if (err == -EINPROGRESS)
return -EIOCBQUEUED;
sock_put(sk);
@@ -219,7 +215,7 @@ static int skcipher_check_key(struct socket *sock)
struct alg_sock *ask = alg_sk(sk);
lock_sock(sk);
- if (ask->refcnt)
+ if (!atomic_read(&ask->nokey_refcnt))
goto unlock_child;
psk = ask->parent;
@@ -231,11 +227,8 @@ static int skcipher_check_key(struct socket *sock)
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
goto unlock;
- if (!pask->refcnt++)
- sock_hold(psk);
-
- ask->refcnt = 1;
- sock_put(psk);
+ atomic_dec(&pask->nokey_refcnt);
+ atomic_set(&ask->nokey_refcnt, 0);
err = 0;
diff --git a/crypto/api.c b/crypto/api.c
index 1909195b2c70..5efd4d6e6312 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -571,7 +571,7 @@ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
{
struct crypto_alg *alg;
- if (unlikely(!mem))
+ if (IS_ERR_OR_NULL(mem))
return;
alg = tfm->__crt_alg;
diff --git a/crypto/ecdh.c b/crypto/ecdh.c
index bf6300175b9c..34605509b41a 100644
--- a/crypto/ecdh.c
+++ b/crypto/ecdh.c
@@ -43,7 +43,8 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
struct ecdh params;
unsigned int ndigits;
- if (crypto_ecdh_decode_key(buf, len, &params) < 0)
+ if (crypto_ecdh_decode_key(buf, len, &params) < 0 ||
+ params.key_size > sizeof(ctx->private_key))
return -EINVAL;
ndigits = ecdh_supported_curve(params.curve_id);
@@ -57,12 +58,13 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
return ecc_gen_privkey(ctx->curve_id, ctx->ndigits,
ctx->private_key);
- if (ecc_is_key_valid(ctx->curve_id, ctx->ndigits,
- (const u64 *)params.key, params.key_size) < 0)
- return -EINVAL;
-
memcpy(ctx->private_key, params.key, params.key_size);
+ if (ecc_is_key_valid(ctx->curve_id, ctx->ndigits,
+ ctx->private_key, params.key_size) < 0) {
+ memzero_explicit(ctx->private_key, params.key_size);
+ return -EINVAL;
+ }
return 0;
}
diff --git a/crypto/ecdh_helper.c b/crypto/ecdh_helper.c
index d3af8e8b0b5e..25711de44584 100644
--- a/crypto/ecdh_helper.c
+++ b/crypto/ecdh_helper.c
@@ -71,6 +71,9 @@ int crypto_ecdh_decode_key(const char *buf, unsigned int len,
if (secret.type != CRYPTO_KPP_SECRET_TYPE_ECDH)
return -EINVAL;
+ if (unlikely(len < secret.len))
+ return -EINVAL;
+
ptr = ecdh_unpack_data(&params->curve_id, ptr, sizeof(params->curve_id));
ptr = ecdh_unpack_data(&params->key_size, ptr, sizeof(params->key_size));
if (secret.len != crypto_ecdh_key_len(params))
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 5504d1325a56..d55c9ee18817 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -453,7 +453,7 @@ static void exit_tfm(struct crypto_skcipher *tfm)
crypto_free_skcipher(ctx->child);
}
-static void free(struct skcipher_instance *inst)
+static void free_inst(struct skcipher_instance *inst)
{
crypto_drop_skcipher(skcipher_instance_ctx(inst));
kfree(inst);
@@ -565,7 +565,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.encrypt = encrypt;
inst->alg.decrypt = decrypt;
- inst->free = free;
+ inst->free = free_inst;
err = skcipher_register_instance(tmpl, inst);
if (err)
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index d332988eb8de..bf797c613ba2 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -202,8 +202,8 @@ static int test_mb_aead_jiffies(struct test_mb_aead_data *data, int enc,
goto out;
}
- pr_cont("%d operations in %d seconds (%ld bytes)\n",
- bcount * num_mb, secs, (long)bcount * blen * num_mb);
+ pr_cont("%d operations in %d seconds (%llu bytes)\n",
+ bcount * num_mb, secs, (u64)bcount * blen * num_mb);
out:
kfree(rc);
@@ -472,8 +472,8 @@ static int test_aead_jiffies(struct aead_request *req, int enc,
return ret;
}
- printk("%d operations in %d seconds (%ld bytes)\n",
- bcount, secs, (long)bcount * blen);
+ pr_cont("%d operations in %d seconds (%llu bytes)\n",
+ bcount, secs, (u64)bcount * blen);
return 0;
}
@@ -763,8 +763,8 @@ static int test_mb_ahash_jiffies(struct test_mb_ahash_data *data, int blen,
goto out;
}
- pr_cont("%d operations in %d seconds (%ld bytes)\n",
- bcount * num_mb, secs, (long)bcount * blen * num_mb);
+ pr_cont("%d operations in %d seconds (%llu bytes)\n",
+ bcount * num_mb, secs, (u64)bcount * blen * num_mb);
out:
kfree(rc);
@@ -1200,8 +1200,8 @@ static int test_mb_acipher_jiffies(struct test_mb_skcipher_data *data, int enc,
goto out;
}
- pr_cont("%d operations in %d seconds (%ld bytes)\n",
- bcount * num_mb, secs, (long)bcount * blen * num_mb);
+ pr_cont("%d operations in %d seconds (%llu bytes)\n",
+ bcount * num_mb, secs, (u64)bcount * blen * num_mb);
out:
kfree(rc);
@@ -1438,8 +1438,8 @@ static int test_acipher_jiffies(struct skcipher_request *req, int enc,
return ret;
}
- pr_cont("%d operations in %d seconds (%ld bytes)\n",
- bcount, secs, (long)bcount * blen);
+ pr_cont("%d operations in %d seconds (%llu bytes)\n",
+ bcount, secs, (u64)bcount * blen);
return 0;
}
diff --git a/crypto/xts.c b/crypto/xts.c
index ccf55fbb8bc2..5542dd10aedf 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -393,7 +393,7 @@ static void exit_tfm(struct crypto_skcipher *tfm)
crypto_free_cipher(ctx->tweak);
}
-static void free(struct skcipher_instance *inst)
+static void free_inst(struct skcipher_instance *inst)
{
crypto_drop_skcipher(skcipher_instance_ctx(inst));
kfree(inst);
@@ -504,7 +504,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.encrypt = encrypt;
inst->alg.decrypt = decrypt;
- inst->free = free;
+ inst->free = free_inst;
err = skcipher_register_instance(tmpl, inst);
if (err)
diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c
index b58850389094..c0325556a897 100644
--- a/drivers/acpi/acpi_configfs.c
+++ b/drivers/acpi/acpi_configfs.c
@@ -269,7 +269,12 @@ static int __init acpi_configfs_init(void)
acpi_table_group = configfs_register_default_group(root, "table",
&acpi_tables_type);
- return PTR_ERR_OR_ZERO(acpi_table_group);
+ if (IS_ERR(acpi_table_group)) {
+ configfs_unregister_subsystem(&acpi_configfs);
+ return PTR_ERR(acpi_table_group);
+ }
+
+ return 0;
}
module_init(acpi_configfs_init);
diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c
index f21c99ec46ee..ed19d9822bc0 100644
--- a/drivers/acpi/acpi_dbg.c
+++ b/drivers/acpi/acpi_dbg.c
@@ -757,6 +757,9 @@ int __init acpi_aml_init(void)
goto err_exit;
}
+ if (acpi_disabled)
+ return -ENODEV;
+
/* Initialize AML IO interface */
mutex_init(&acpi_aml_io.lock);
init_waitqueue_head(&acpi_aml_io.wait);
diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
index 560fdae8cc59..943b1dc2d0b3 100644
--- a/drivers/acpi/acpi_extlog.c
+++ b/drivers/acpi/acpi_extlog.c
@@ -224,9 +224,9 @@ static int __init extlog_init(void)
u64 cap;
int rc;
- rdmsrl(MSR_IA32_MCG_CAP, cap);
-
- if (!(cap & MCG_ELOG_P) || !extlog_get_l1addr())
+ if (rdmsrl_safe(MSR_IA32_MCG_CAP, &cap) ||
+ !(cap & MCG_ELOG_P) ||
+ !extlog_get_l1addr())
return -ENODEV;
if (edac_get_report_status() == EDAC_REPORTING_FORCE) {
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
index 67d97c0090a2..5d72baf60ac8 100644
--- a/drivers/acpi/acpi_pnp.c
+++ b/drivers/acpi/acpi_pnp.c
@@ -320,6 +320,9 @@ static bool matching_id(const char *idstr, const char *list_id)
{
int i;
+ if (strlen(idstr) != strlen(list_id))
+ return false;
+
if (memcmp(idstr, list_id, 3))
return false;
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 738f3c732363..228feeea555f 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -473,10 +473,6 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
(u8)access_byte_width;
}
}
- /* An additional reference for the container */
-
- acpi_ut_add_reference(obj_desc->field.region_obj);
-
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"RegionField: BitOff %X, Off %X, Gran %X, Region %p\n",
obj_desc->field.start_field_bit_offset,
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 8cc4392c61f3..0dc8dea81582 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -563,11 +563,6 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
next_object = object->buffer_field.buffer_obj;
break;
- case ACPI_TYPE_LOCAL_REGION_FIELD:
-
- next_object = object->field.region_obj;
- break;
-
case ACPI_TYPE_LOCAL_BANK_FIELD:
next_object = object->bank_field.bank_obj;
@@ -608,6 +603,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
}
break;
+ case ACPI_TYPE_LOCAL_REGION_FIELD:
case ACPI_TYPE_REGION:
default:
diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c
index 92f9edf9d11e..c39b36c558d6 100644
--- a/drivers/acpi/arm64/gtdt.c
+++ b/drivers/acpi/arm64/gtdt.c
@@ -332,7 +332,7 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
int index)
{
struct platform_device *pdev;
- int irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags);
+ int irq;
/*
* According to SBSA specification the size of refresh and control
@@ -341,7 +341,7 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
struct resource res[] = {
DEFINE_RES_MEM(wd->control_frame_address, SZ_4K),
DEFINE_RES_MEM(wd->refresh_frame_address, SZ_4K),
- DEFINE_RES_IRQ(irq),
+ {},
};
int nr_res = ARRAY_SIZE(res);
@@ -351,10 +351,11 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
if (!(wd->refresh_frame_address && wd->control_frame_address)) {
pr_err(FW_BUG "failed to get the Watchdog base address.\n");
- acpi_unregister_gsi(wd->timer_interrupt);
return -EINVAL;
}
+ irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags);
+ res[2] = (struct resource)DEFINE_RES_IRQ(irq);
if (irq <= 0) {
pr_warn("failed to map the Watchdog interrupt.\n");
nr_res--;
@@ -367,7 +368,8 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
*/
pdev = platform_device_register_simple("sbsa-gwdt", index, res, nr_res);
if (IS_ERR(pdev)) {
- acpi_unregister_gsi(wd->timer_interrupt);
+ if (irq > 0)
+ acpi_unregister_gsi(wd->timer_interrupt);
return PTR_ERR(pdev);
}
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index d5c19e25ddf5..abf101451c92 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -98,7 +98,18 @@ static const struct dmi_system_id lid_blacklst[] = {
*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
- DMI_MATCH(DMI_PRODUCT_NAME, "E2215T MD60198"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "E2215T"),
+ },
+ .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
+ },
+ {
+ /*
+ * Medion Akoya E2228T, notification of the LID device only
+ * happens on close, not on open and _LID always returns closed.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "E2228T"),
},
.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
},
@@ -149,6 +160,7 @@ struct acpi_button {
int last_state;
ktime_t last_time;
bool suspended;
+ bool lid_state_initialized;
};
static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier);
@@ -404,6 +416,8 @@ static int acpi_lid_update_state(struct acpi_device *device,
static void acpi_lid_initialize_state(struct acpi_device *device)
{
+ struct acpi_button *button = acpi_driver_data(device);
+
switch (lid_init_state) {
case ACPI_BUTTON_LID_INIT_OPEN:
(void)acpi_lid_notify_state(device, 1);
@@ -415,13 +429,14 @@ static void acpi_lid_initialize_state(struct acpi_device *device)
default:
break;
}
+
+ button->lid_state_initialized = true;
}
static void acpi_button_notify(struct acpi_device *device, u32 event)
{
struct acpi_button *button = acpi_driver_data(device);
struct input_dev *input;
- int users;
switch (event) {
case ACPI_FIXED_HARDWARE_EVENT:
@@ -430,10 +445,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
case ACPI_BUTTON_NOTIFY_STATUS:
input = button->input;
if (button->type == ACPI_BUTTON_TYPE_LID) {
- mutex_lock(&button->input->mutex);
- users = button->input->users;
- mutex_unlock(&button->input->mutex);
- if (users)
+ if (button->lid_state_initialized)
acpi_lid_update_state(device, true);
} else {
int keycode;
@@ -478,7 +490,7 @@ static int acpi_button_resume(struct device *dev)
struct acpi_button *button = acpi_driver_data(device);
button->suspended = false;
- if (button->type == ACPI_BUTTON_TYPE_LID && button->input->users) {
+ if (button->type == ACPI_BUTTON_TYPE_LID) {
button->last_state = !!acpi_lid_evaluate_state(device);
button->last_time = ktime_get();
acpi_lid_initialize_state(device);
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 41228e545e82..1b43f8ebfabe 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -122,23 +122,15 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
*/
#define NUM_RETRIES 500ULL
-struct cppc_attr {
- struct attribute attr;
- ssize_t (*show)(struct kobject *kobj,
- struct attribute *attr, char *buf);
- ssize_t (*store)(struct kobject *kobj,
- struct attribute *attr, const char *c, ssize_t count);
-};
-
#define define_one_cppc_ro(_name) \
-static struct cppc_attr _name = \
+static struct kobj_attribute _name = \
__ATTR(_name, 0444, show_##_name, NULL)
#define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
#define show_cppc_data(access_fn, struct_name, member_name) \
static ssize_t show_##member_name(struct kobject *kobj, \
- struct attribute *attr, char *buf) \
+ struct kobj_attribute *attr, char *buf) \
{ \
struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \
struct struct_name st_name = {0}; \
@@ -164,7 +156,7 @@ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
static ssize_t show_feedback_ctrs(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
struct cppc_perf_fb_ctrs fb_ctrs = {0};
@@ -869,6 +861,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
"acpi_cppc");
if (ret) {
per_cpu(cpc_desc_ptr, pr->id) = NULL;
+ kobject_put(&cpc_ptr->kobj);
goto out_free;
}
diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
index 222ea3f12f41..613041870872 100644
--- a/drivers/acpi/custom_method.c
+++ b/drivers/acpi/custom_method.c
@@ -37,6 +37,8 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
sizeof(struct acpi_table_header)))
return -EFAULT;
uncopied_bytes = max_size = table.length;
+ /* make sure the buf is not allocated */
+ kfree(buf);
buf = kzalloc(max_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -50,6 +52,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
(*ppos + count < count) ||
(count > uncopied_bytes)) {
kfree(buf);
+ buf = NULL;
return -EINVAL;
}
@@ -71,7 +74,6 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
}
- kfree(buf);
return count;
}
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 54b6547d32b2..9617e5883271 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -172,7 +172,7 @@ int acpi_device_set_power(struct acpi_device *device, int state)
* possibly drop references to the power resources in use.
*/
state = ACPI_STATE_D3_HOT;
- /* If _PR3 is not available, use D3hot as the target state. */
+ /* If D3cold is not supported, use D3hot as the target state. */
if (!device->power.states[ACPI_STATE_D3_COLD].flags.valid)
target_state = state;
} else if (!device->power.states[state].flags.valid) {
@@ -227,13 +227,13 @@ int acpi_device_set_power(struct acpi_device *device, int state)
end:
if (result) {
dev_warn(&device->dev, "Failed to change power state to %s\n",
- acpi_power_state_string(state));
+ acpi_power_state_string(target_state));
} else {
device->power.state = target_state;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Device [%s] transitioned to %s\n",
device->pnp.bus_id,
- acpi_power_state_string(state)));
+ acpi_power_state_string(target_state)));
}
return result;
@@ -702,7 +702,7 @@ static void acpi_pm_notify_work_func(struct acpi_device_wakeup_context *context)
static DEFINE_MUTEX(acpi_wakeup_lock);
static int __acpi_device_wakeup_enable(struct acpi_device *adev,
- u32 target_state, int max_count)
+ u32 target_state)
{
struct acpi_device_wakeup *wakeup = &adev->wakeup;
acpi_status status;
@@ -710,9 +710,10 @@ static int __acpi_device_wakeup_enable(struct acpi_device *adev,
mutex_lock(&acpi_wakeup_lock);
- if (wakeup->enable_count >= max_count)
+ if (wakeup->enable_count >= INT_MAX) {
+ acpi_handle_info(adev->handle, "Wakeup enable count out of bounds!\n");
goto out;
-
+ }
if (wakeup->enable_count > 0)
goto inc;
@@ -749,7 +750,7 @@ out:
*/
static int acpi_device_wakeup_enable(struct acpi_device *adev, u32 target_state)
{
- return __acpi_device_wakeup_enable(adev, target_state, 1);
+ return __acpi_device_wakeup_enable(adev, target_state);
}
/**
@@ -779,8 +780,12 @@ out:
mutex_unlock(&acpi_wakeup_lock);
}
-static int __acpi_pm_set_device_wakeup(struct device *dev, bool enable,
- int max_count)
+/**
+ * acpi_pm_set_device_wakeup - Enable/disable remote wakeup for given device.
+ * @dev: Device to enable/disable to generate wakeup events.
+ * @enable: Whether to enable or disable the wakeup functionality.
+ */
+int acpi_pm_set_device_wakeup(struct device *dev, bool enable)
{
struct acpi_device *adev;
int error;
@@ -800,37 +805,15 @@ static int __acpi_pm_set_device_wakeup(struct device *dev, bool enable,
return 0;
}
- error = __acpi_device_wakeup_enable(adev, acpi_target_system_state(),
- max_count);
+ error = __acpi_device_wakeup_enable(adev, acpi_target_system_state());
if (!error)
dev_dbg(dev, "Wakeup enabled by ACPI\n");
return error;
}
-
-/**
- * acpi_pm_set_device_wakeup - Enable/disable remote wakeup for given device.
- * @dev: Device to enable/disable to generate wakeup events.
- * @enable: Whether to enable or disable the wakeup functionality.
- */
-int acpi_pm_set_device_wakeup(struct device *dev, bool enable)
-{
- return __acpi_pm_set_device_wakeup(dev, enable, 1);
-}
EXPORT_SYMBOL_GPL(acpi_pm_set_device_wakeup);
/**
- * acpi_pm_set_bridge_wakeup - Enable/disable remote wakeup for given bridge.
- * @dev: Bridge device to enable/disable to generate wakeup events.
- * @enable: Whether to enable or disable the wakeup functionality.
- */
-int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable)
-{
- return __acpi_pm_set_device_wakeup(dev, enable, INT_MAX);
-}
-EXPORT_SYMBOL_GPL(acpi_pm_set_bridge_wakeup);
-
-/**
* acpi_dev_pm_low_power - Put ACPI device into a low-power state.
* @dev: Device to put into a low-power state.
* @adev: ACPI device node corresponding to @dev.
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index 8940054d6250..b3b92c54cba8 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -259,20 +259,12 @@ int __acpi_device_uevent_modalias(struct acpi_device *adev,
if (add_uevent_var(env, "MODALIAS="))
return -ENOMEM;
- len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
- sizeof(env->buf) - env->buflen);
- if (len < 0)
- return len;
-
- env->buflen += len;
- if (!adev->data.of_compatible)
- return 0;
-
- if (len > 0 && add_uevent_var(env, "MODALIAS="))
- return -ENOMEM;
-
- len = create_of_modalias(adev, &env->buf[env->buflen - 1],
- sizeof(env->buf) - env->buflen);
+ if (adev->data.of_compatible)
+ len = create_of_modalias(adev, &env->buf[env->buflen - 1],
+ sizeof(env->buf) - env->buflen);
+ else
+ len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
+ sizeof(env->buf) - env->buflen);
if (len < 0)
return len;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 49e16f009095..9415a0041aaf 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1081,28 +1081,20 @@ void acpi_ec_dispatch_gpe(void)
Event Management
-------------------------------------------------------------------------- */
static struct acpi_ec_query_handler *
-acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler)
-{
- if (handler)
- kref_get(&handler->kref);
- return handler;
-}
-
-static struct acpi_ec_query_handler *
acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value)
{
struct acpi_ec_query_handler *handler;
- bool found = false;
mutex_lock(&ec->mutex);
list_for_each_entry(handler, &ec->list, node) {
if (value == handler->query_bit) {
- found = true;
- break;
+ kref_get(&handler->kref);
+ mutex_unlock(&ec->mutex);
+ return handler;
}
}
mutex_unlock(&ec->mutex);
- return found ? acpi_ec_get_query_handler(handler) : NULL;
+ return NULL;
}
static void acpi_ec_query_handler_release(struct kref *kref)
diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c
index f13ba2c07667..9f4b405a5c20 100644
--- a/drivers/acpi/evged.c
+++ b/drivers/acpi/evged.c
@@ -88,6 +88,8 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
struct resource r;
struct acpi_resource_irq *p = &ares->data.irq;
struct acpi_resource_extended_irq *pext = &ares->data.extended_irq;
+ char ev_name[5];
+ u8 trigger;
if (ares->type == ACPI_RESOURCE_TYPE_END_TAG)
return AE_OK;
@@ -96,14 +98,28 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
dev_err(dev, "unable to parse IRQ resource\n");
return AE_ERROR;
}
- if (ares->type == ACPI_RESOURCE_TYPE_IRQ)
+ if (ares->type == ACPI_RESOURCE_TYPE_IRQ) {
gsi = p->interrupts[0];
- else
+ trigger = p->triggering;
+ } else {
gsi = pext->interrupts[0];
+ trigger = pext->triggering;
+ }
irq = r.start;
- if (ACPI_FAILURE(acpi_get_handle(handle, "_EVT", &evt_handle))) {
+ switch (gsi) {
+ case 0 ... 255:
+ sprintf(ev_name, "_%c%02X",
+ trigger == ACPI_EDGE_SENSITIVE ? 'E' : 'L', gsi);
+
+ if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
+ break;
+ /* fall through */
+ default:
+ if (ACPI_SUCCESS(acpi_get_handle(handle, "_EVT", &evt_handle)))
+ break;
+
dev_err(dev, "cannot locate _EVT method\n");
return AE_ERROR;
}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index f59d0b9e2683..913613cf5c53 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -18,6 +18,8 @@
#ifndef _ACPI_INTERNAL_H_
#define _ACPI_INTERNAL_H_
+#include <linux/idr.h>
+
#define PREFIX "ACPI: "
int early_acpi_osi_init(void);
@@ -97,9 +99,11 @@ void acpi_scan_table_handler(u32 event, void *table, void *context);
extern struct list_head acpi_bus_id_list;
+#define ACPI_MAX_DEVICE_INSTANCES 4096
+
struct acpi_device_bus_id {
- char bus_id[15];
- unsigned int instance_no;
+ const char *bus_id;
+ struct ida instance_ida;
struct list_head node;
};
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 8340c81b258b..cb88f3b43a94 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -1535,7 +1535,7 @@ static ssize_t format1_show(struct device *dev,
le16_to_cpu(nfit_dcr->dcr->code));
break;
}
- if (rc != ENXIO)
+ if (rc != -ENXIO)
break;
}
mutex_unlock(&acpi_desc->init_mutex);
@@ -1773,9 +1773,17 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
dev_set_drvdata(&adev_dimm->dev, nfit_mem);
/*
- * Until standardization materializes we need to consider 4
- * different command sets. Note, that checking for function0 (bit0)
- * tells us if any commands are reachable through this GUID.
+ * There are 4 "legacy" NVDIMM command sets
+ * (NVDIMM_FAMILY_{INTEL,MSFT,HPE1,HPE2}) that were created before
+ * an EFI working group was established to constrain this
+ * proliferation. The nfit driver probes for the supported command
+ * set by GUID. Note, if you're a platform developer looking to add
+ * a new command set to this probe, consider using an existing set,
+ * or otherwise seek approval to publish the command set at
+ * http://www.uefi.org/RFIC_LIST.
+ *
+ * Note, that checking for function0 (bit0) tells us if any commands
+ * are reachable through this GUID.
*/
for (i = 0; i <= NVDIMM_FAMILY_MAX; i++)
if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
@@ -1798,6 +1806,8 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
dsm_mask &= ~(1 << 8);
} else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) {
dsm_mask = 0xffffffff;
+ } else if (nfit_mem->family == NVDIMM_FAMILY_HYPERV) {
+ dsm_mask = 0x1f;
} else {
dev_dbg(dev, "unknown dimm command family\n");
nfit_mem->family = -1;
@@ -3622,6 +3632,7 @@ static __init int nfit_init(void)
guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
+ guid_parse(UUID_NFIT_DIMM_N_HYPERV, &nfit_uuid[NFIT_DEV_DIMM_N_HYPERV]);
nfit_wq = create_singlethread_workqueue("nfit");
if (!nfit_wq)
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index 68848fc4b7c9..cc2ec62951de 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -34,11 +34,14 @@
/* https://msdn.microsoft.com/library/windows/hardware/mt604741 */
#define UUID_NFIT_DIMM_N_MSFT "1ee68b36-d4bd-4a1a-9a16-4f8e53d46e05"
+/* http://www.uefi.org/RFIC_LIST (see "Virtual NVDIMM 0x1901") */
+#define UUID_NFIT_DIMM_N_HYPERV "5746c5f2-a9a2-4264-ad0e-e4ddc9e09e80"
+
#define ACPI_NFIT_MEM_FAILED_MASK (ACPI_NFIT_MEM_SAVE_FAILED \
| ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \
| ACPI_NFIT_MEM_NOT_ARMED | ACPI_NFIT_MEM_MAP_FAILED)
-#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_MSFT
+#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_HYPERV
#define NVDIMM_STANDARD_CMDMASK \
(1 << ND_CMD_SMART | 1 << ND_CMD_SMART_THRESHOLD | 1 << ND_CMD_DIMM_FLAGS \
@@ -75,6 +78,7 @@ enum nfit_uuids {
NFIT_DEV_DIMM_N_HPE1 = NVDIMM_FAMILY_HPE1,
NFIT_DEV_DIMM_N_HPE2 = NVDIMM_FAMILY_HPE2,
NFIT_DEV_DIMM_N_MSFT = NVDIMM_FAMILY_MSFT,
+ NFIT_DEV_DIMM_N_HYPERV = NVDIMM_FAMILY_HYPERV,
NFIT_SPA_VOLATILE,
NFIT_SPA_PM,
NFIT_SPA_DCR,
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 0da58f0bf7e5..a28ff3cfbc29 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -46,7 +46,7 @@ int acpi_numa __initdata;
int pxm_to_node(int pxm)
{
- if (pxm < 0)
+ if (pxm < 0 || pxm >= MAX_PXM_DOMAINS || numa_off)
return NUMA_NO_NODE;
return pxm_to_node_map[pxm];
}
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index fbc936cf2025..62c0fe9ef412 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -910,13 +910,6 @@ static long __acpi_processor_get_throttling(void *data)
return pr->throttling.acpi_processor_get_throttling(pr);
}
-static int call_on_cpu(int cpu, long (*fn)(void *), void *arg, bool direct)
-{
- if (direct || (is_percpu_thread() && cpu == smp_processor_id()))
- return fn(arg);
- return work_on_cpu(cpu, fn, arg);
-}
-
static int acpi_processor_get_throttling(struct acpi_processor *pr)
{
if (!pr)
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 288673cff85e..27db1a968241 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -720,9 +720,6 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data,
const union acpi_object *obj;
int ret;
- if (!val)
- return -EINVAL;
-
if (proptype >= DEV_PROP_U8 && proptype <= DEV_PROP_U64) {
ret = acpi_data_get_property(data, propname, ACPI_TYPE_INTEGER, &obj);
if (ret)
@@ -732,28 +729,43 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data,
case DEV_PROP_U8:
if (obj->integer.value > U8_MAX)
return -EOVERFLOW;
- *(u8 *)val = obj->integer.value;
+
+ if (val)
+ *(u8 *)val = obj->integer.value;
+
break;
case DEV_PROP_U16:
if (obj->integer.value > U16_MAX)
return -EOVERFLOW;
- *(u16 *)val = obj->integer.value;
+
+ if (val)
+ *(u16 *)val = obj->integer.value;
+
break;
case DEV_PROP_U32:
if (obj->integer.value > U32_MAX)
return -EOVERFLOW;
- *(u32 *)val = obj->integer.value;
+
+ if (val)
+ *(u32 *)val = obj->integer.value;
+
break;
default:
- *(u64 *)val = obj->integer.value;
+ if (val)
+ *(u64 *)val = obj->integer.value;
+
break;
}
+
+ if (!val)
+ return 1;
} else if (proptype == DEV_PROP_STRING) {
ret = acpi_data_get_property(data, propname, ACPI_TYPE_STRING, &obj);
if (ret)
return ret;
- *(char **)val = obj->string.pointer;
+ if (val)
+ *(char **)val = obj->string.pointer;
return 1;
} else {
@@ -767,7 +779,7 @@ int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname,
{
int ret;
- if (!adev)
+ if (!adev || !val)
return -EINVAL;
ret = acpi_data_prop_read_single(&adev->data, propname, proptype, val);
@@ -861,10 +873,20 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
const union acpi_object *items;
int ret;
- if (val && nval == 1) {
+ if (nval == 1 || !val) {
ret = acpi_data_prop_read_single(data, propname, proptype, val);
- if (ret >= 0)
+ /*
+ * The overflow error means that the property is there and it is
+ * single-value, but its type does not match, so return.
+ */
+ if (ret >= 0 || ret == -EOVERFLOW)
return ret;
+
+ /*
+ * Reading this property as a single-value one failed, but its
+ * value may still be represented as one-element array, so
+ * continue.
+ */
}
ret = acpi_data_get_property_array(data, propname, ACPI_TYPE_ANY, &obj);
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 316a0fc785e3..d3f9a320e880 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -549,7 +549,7 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares,
ret = c->preproc(ares, c->preproc_data);
if (ret < 0) {
c->error = ret;
- return AE_CTRL_TERMINATE;
+ return AE_ABORT_METHOD;
} else if (ret > 0) {
return AE_OK;
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 1dcc48b9d33c..1e7e2c438acf 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -482,10 +482,10 @@ static void acpi_device_del(struct acpi_device *device)
list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node)
if (!strcmp(acpi_device_bus_id->bus_id,
acpi_device_hid(device))) {
- if (acpi_device_bus_id->instance_no > 0)
- acpi_device_bus_id->instance_no--;
- else {
+ ida_simple_remove(&acpi_device_bus_id->instance_ida, device->pnp.instance_no);
+ if (ida_is_empty(&acpi_device_bus_id->instance_ida)) {
list_del(&acpi_device_bus_id->node);
+ kfree_const(acpi_device_bus_id->bus_id);
kfree(acpi_device_bus_id);
}
break;
@@ -585,6 +585,8 @@ static int acpi_get_device_data(acpi_handle handle, struct acpi_device **device,
if (!device)
return -EINVAL;
+ *device = NULL;
+
status = acpi_get_data_full(handle, acpi_scan_drop_device,
(void **)device, callback);
if (ACPI_FAILURE(status) || !*device) {
@@ -620,12 +622,38 @@ void acpi_bus_put_acpi_device(struct acpi_device *adev)
put_device(&adev->dev);
}
+static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id)
+{
+ struct acpi_device_bus_id *acpi_device_bus_id;
+
+ /* Find suitable bus_id and instance number in acpi_bus_id_list. */
+ list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
+ if (!strcmp(acpi_device_bus_id->bus_id, dev_id))
+ return acpi_device_bus_id;
+ }
+ return NULL;
+}
+
+static int acpi_device_set_name(struct acpi_device *device,
+ struct acpi_device_bus_id *acpi_device_bus_id)
+{
+ struct ida *instance_ida = &acpi_device_bus_id->instance_ida;
+ int result;
+
+ result = ida_simple_get(instance_ida, 0, ACPI_MAX_DEVICE_INSTANCES, GFP_KERNEL);
+ if (result < 0)
+ return result;
+
+ device->pnp.instance_no = result;
+ dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, result);
+ return 0;
+}
+
int acpi_device_add(struct acpi_device *device,
void (*release)(struct device *))
{
+ struct acpi_device_bus_id *acpi_device_bus_id;
int result;
- struct acpi_device_bus_id *acpi_device_bus_id, *new_bus_id;
- int found = 0;
if (device->handle) {
acpi_status status;
@@ -651,34 +679,39 @@ int acpi_device_add(struct acpi_device *device,
INIT_LIST_HEAD(&device->del_list);
mutex_init(&device->physical_node_lock);
- new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
- if (!new_bus_id) {
- pr_err(PREFIX "Memory allocation error\n");
- result = -ENOMEM;
- goto err_detach;
- }
-
mutex_lock(&acpi_device_lock);
- /*
- * Find suitable bus_id and instance number in acpi_bus_id_list
- * If failed, create one and link it into acpi_bus_id_list
- */
- list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
- if (!strcmp(acpi_device_bus_id->bus_id,
- acpi_device_hid(device))) {
- acpi_device_bus_id->instance_no++;
- found = 1;
- kfree(new_bus_id);
- break;
+
+ acpi_device_bus_id = acpi_device_bus_id_match(acpi_device_hid(device));
+ if (acpi_device_bus_id) {
+ result = acpi_device_set_name(device, acpi_device_bus_id);
+ if (result)
+ goto err_unlock;
+ } else {
+ acpi_device_bus_id = kzalloc(sizeof(*acpi_device_bus_id),
+ GFP_KERNEL);
+ if (!acpi_device_bus_id) {
+ result = -ENOMEM;
+ goto err_unlock;
}
- }
- if (!found) {
- acpi_device_bus_id = new_bus_id;
- strcpy(acpi_device_bus_id->bus_id, acpi_device_hid(device));
- acpi_device_bus_id->instance_no = 0;
+ acpi_device_bus_id->bus_id =
+ kstrdup_const(acpi_device_hid(device), GFP_KERNEL);
+ if (!acpi_device_bus_id->bus_id) {
+ kfree(acpi_device_bus_id);
+ result = -ENOMEM;
+ goto err_unlock;
+ }
+
+ ida_init(&acpi_device_bus_id->instance_ida);
+
+ result = acpi_device_set_name(device, acpi_device_bus_id);
+ if (result) {
+ kfree_const(acpi_device_bus_id->bus_id);
+ kfree(acpi_device_bus_id);
+ goto err_unlock;
+ }
+
list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
}
- dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no);
if (device->parent)
list_add_tail(&device->node, &device->parent->children);
@@ -709,9 +742,10 @@ int acpi_device_add(struct acpi_device *device,
if (device->parent)
list_del(&device->node);
list_del(&device->wakeup_list);
+
+ err_unlock:
mutex_unlock(&acpi_device_lock);
- err_detach:
acpi_detach_data(device->handle, acpi_scan_drop_device);
return result;
}
@@ -921,12 +955,9 @@ static void acpi_bus_init_power_state(struct acpi_device *device, int state)
if (buffer.length && package
&& package->type == ACPI_TYPE_PACKAGE
- && package->package.count) {
- int err = acpi_extract_power_resources(package, 0,
- &ps->resources);
- if (!err)
- device->power.flags.power_resources = 1;
- }
+ && package->package.count)
+ acpi_extract_power_resources(package, 0, &ps->resources);
+
ACPI_FREE(buffer.pointer);
}
@@ -973,14 +1004,27 @@ static void acpi_bus_get_power_flags(struct acpi_device *device)
acpi_bus_init_power_state(device, i);
INIT_LIST_HEAD(&device->power.states[ACPI_STATE_D3_COLD].resources);
- if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources))
- device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
- /* Set defaults for D0 and D3hot states (always valid) */
+ /* Set the defaults for D0 and D3hot (always supported). */
device->power.states[ACPI_STATE_D0].flags.valid = 1;
device->power.states[ACPI_STATE_D0].power = 100;
device->power.states[ACPI_STATE_D3_HOT].flags.valid = 1;
+ /*
+ * Use power resources only if the D0 list of them is populated, because
+ * some platforms may provide _PR3 only to indicate D3cold support and
+ * in those cases the power resources list returned by it may be bogus.
+ */
+ if (!list_empty(&device->power.states[ACPI_STATE_D0].resources)) {
+ device->power.flags.power_resources = 1;
+ /*
+ * D3cold is supported if the D3hot list of power resources is
+ * not empty.
+ */
+ if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources))
+ device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
+ }
+
if (acpi_bus_init_power(device))
device->flags.power_manageable = 0;
}
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index f8150c7bfe88..39ee0ca636aa 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -935,13 +935,13 @@ static void __exit interrupt_stats_exit(void)
}
static ssize_t
-acpi_show_profile(struct device *dev, struct device_attribute *attr,
+acpi_show_profile(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
}
-static const struct device_attribute pm_profile_attr =
+static const struct kobj_attribute pm_profile_attr =
__ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL);
static ssize_t hotplug_enabled_show(struct kobject *kobj,
@@ -990,8 +990,10 @@ void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
error = kobject_init_and_add(&hotplug->kobj,
&acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name);
- if (error)
+ if (error) {
+ kobject_put(&hotplug->kobj);
goto err_out;
+ }
kobject_uevent(&hotplug->kobj, KOBJ_ADD);
return;
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index a3d012b08fc5..041ee2381927 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -732,7 +732,7 @@ acpi_os_table_override(struct acpi_table_header *existing_table,
}
/*
- * acpi_table_init()
+ * acpi_locate_initial_tables()
*
* find RSDP, find and checksum SDT/XSDT.
* checksum all tables, print SDT/XSDT
@@ -740,7 +740,7 @@ acpi_os_table_override(struct acpi_table_header *existing_table,
* result: sdt_entry[] is initialized
*/
-int __init acpi_table_init(void)
+int __init acpi_locate_initial_tables(void)
{
acpi_status status;
@@ -755,9 +755,45 @@ int __init acpi_table_init(void)
status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
if (ACPI_FAILURE(status))
return -EINVAL;
- acpi_table_initrd_scan();
+ return 0;
+}
+
+void __init acpi_reserve_initial_tables(void)
+{
+ int i;
+
+ for (i = 0; i < ACPI_MAX_TABLES; i++) {
+ struct acpi_table_desc *table_desc = &initial_tables[i];
+ u64 start = table_desc->address;
+ u64 size = table_desc->length;
+
+ if (!start || !size)
+ break;
+
+ pr_info("Reserving %4s table memory at [mem 0x%llx-0x%llx]\n",
+ table_desc->signature.ascii, start, start + size - 1);
+
+ memblock_reserve(start, size);
+ }
+}
+
+void __init acpi_table_init_complete(void)
+{
+ acpi_table_initrd_scan();
check_multiple_madt();
+}
+
+int __init acpi_table_init(void)
+{
+ int ret;
+
+ ret = acpi_locate_initial_tables();
+ if (ret)
+ return ret;
+
+ acpi_table_init_complete();
+
return 0;
}
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 551b71a24b85..3bdab6eb33bf 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -188,6 +188,8 @@ struct acpi_thermal {
int tz_enabled;
int kelvin_offset;
struct work_struct thermal_check_work;
+ struct mutex thermal_check_lock;
+ refcount_t thermal_check_count;
};
/* --------------------------------------------------------------------------
@@ -513,17 +515,6 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
return 0;
}
-static void acpi_thermal_check(void *data)
-{
- struct acpi_thermal *tz = data;
-
- if (!tz->tz_enabled)
- return;
-
- thermal_zone_device_update(tz->thermal_zone,
- THERMAL_EVENT_UNSPECIFIED);
-}
-
/* sys I/F for generic thermal sysfs support */
static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp)
@@ -557,6 +548,8 @@ static int thermal_get_mode(struct thermal_zone_device *thermal,
return 0;
}
+static void acpi_thermal_check_fn(struct work_struct *work);
+
static int thermal_set_mode(struct thermal_zone_device *thermal,
enum thermal_device_mode mode)
{
@@ -582,7 +575,7 @@ static int thermal_set_mode(struct thermal_zone_device *thermal,
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"%s kernel ACPI thermal control\n",
tz->tz_enabled ? "Enable" : "Disable"));
- acpi_thermal_check(tz);
+ acpi_thermal_check_fn(&tz->thermal_check_work);
}
return 0;
}
@@ -951,6 +944,12 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz)
Driver Interface
-------------------------------------------------------------------------- */
+static void acpi_queue_thermal_check(struct acpi_thermal *tz)
+{
+ if (!work_pending(&tz->thermal_check_work))
+ queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
+}
+
static void acpi_thermal_notify(struct acpi_device *device, u32 event)
{
struct acpi_thermal *tz = acpi_driver_data(device);
@@ -961,17 +960,17 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event)
switch (event) {
case ACPI_THERMAL_NOTIFY_TEMPERATURE:
- acpi_thermal_check(tz);
+ acpi_queue_thermal_check(tz);
break;
case ACPI_THERMAL_NOTIFY_THRESHOLDS:
acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS);
- acpi_thermal_check(tz);
+ acpi_queue_thermal_check(tz);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
break;
case ACPI_THERMAL_NOTIFY_DEVICES:
acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES);
- acpi_thermal_check(tz);
+ acpi_queue_thermal_check(tz);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
break;
@@ -1071,7 +1070,27 @@ static void acpi_thermal_check_fn(struct work_struct *work)
{
struct acpi_thermal *tz = container_of(work, struct acpi_thermal,
thermal_check_work);
- acpi_thermal_check(tz);
+
+ if (!tz->tz_enabled)
+ return;
+ /*
+ * In general, it is not sufficient to check the pending bit, because
+ * subsequent instances of this function may be queued after one of them
+ * has started running (e.g. if _TMP sleeps). Avoid bailing out if just
+ * one of them is running, though, because it may have done the actual
+ * check some time ago, so allow at least one of them to block on the
+ * mutex while another one is running the update.
+ */
+ if (!refcount_dec_not_one(&tz->thermal_check_count))
+ return;
+
+ mutex_lock(&tz->thermal_check_lock);
+
+ thermal_zone_device_update(tz->thermal_zone, THERMAL_EVENT_UNSPECIFIED);
+
+ refcount_inc(&tz->thermal_check_count);
+
+ mutex_unlock(&tz->thermal_check_lock);
}
static int acpi_thermal_add(struct acpi_device *device)
@@ -1103,6 +1122,8 @@ static int acpi_thermal_add(struct acpi_device *device)
if (result)
goto free_memory;
+ refcount_set(&tz->thermal_check_count, 3);
+ mutex_init(&tz->thermal_check_lock);
INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn);
pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device),
@@ -1168,7 +1189,7 @@ static int acpi_thermal_resume(struct device *dev)
tz->state.active |= tz->trips.active[i].flags.enabled;
}
- queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
+ acpi_queue_thermal_check(tz);
return AE_OK;
}
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 214c4e2e8ade..86ffb4af4afc 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -274,6 +274,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "530U4E/540U4E"),
},
},
+ /* https://bugs.launchpad.net/bugs/1894667 */
+ {
+ .callback = video_detect_force_video,
+ .ident = "HP 635 Notebook",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP 635 Notebook PC"),
+ },
+ },
/* Non win8 machines which need native backlight nevertheless */
{
@@ -328,6 +337,25 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"),
},
},
+ {
+ .callback = video_detect_force_native,
+ .ident = "Acer Aspire 5738z",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"),
+ DMI_MATCH(DMI_BOARD_NAME, "JV50"),
+ },
+ },
+ {
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=207835 */
+ .callback = video_detect_force_native,
+ .ident = "Acer TravelMate 5735Z",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5735Z"),
+ DMI_MATCH(DMI_BOARD_NAME, "BA51_MV"),
+ },
+ },
/*
* Desktops which falsely report a backlight and which our heuristics
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 41b706403ef7..2380ebd9b7fd 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -284,10 +284,11 @@ static int amba_remove(struct device *dev)
{
struct amba_device *pcdev = to_amba_device(dev);
struct amba_driver *drv = to_amba_driver(dev->driver);
- int ret;
+ int ret = 0;
pm_runtime_get_sync(dev);
- ret = drv->remove(pcdev);
+ if (drv->remove)
+ ret = drv->remove(pcdev);
pm_runtime_put_noidle(dev);
/* Undo the runtime PM settings in amba_probe() */
@@ -304,7 +305,9 @@ static int amba_remove(struct device *dev)
static void amba_shutdown(struct device *dev)
{
struct amba_driver *drv = to_amba_driver(dev->driver);
- drv->shutdown(to_amba_device(dev));
+
+ if (drv->shutdown)
+ drv->shutdown(to_amba_device(dev));
}
/**
@@ -317,12 +320,13 @@ static void amba_shutdown(struct device *dev)
*/
int amba_driver_register(struct amba_driver *drv)
{
- drv->drv.bus = &amba_bustype;
+ if (!drv->probe)
+ return -EINVAL;
-#define SETFN(fn) if (drv->fn) drv->drv.fn = amba_##fn
- SETFN(probe);
- SETFN(remove);
- SETFN(shutdown);
+ drv->drv.bus = &amba_bustype;
+ drv->drv.probe = amba_probe;
+ drv->drv.remove = amba_remove;
+ drv->drv.shutdown = amba_shutdown;
return driver_register(&drv->drv);
}
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index cf4367135a00..cda4f7eb58ea 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -285,7 +285,7 @@ struct binder_device {
struct binder_work {
struct list_head entry;
- enum {
+ enum binder_work_type {
BINDER_WORK_TRANSACTION = 1,
BINDER_WORK_TRANSACTION_COMPLETE,
BINDER_WORK_RETURN_ERROR,
@@ -895,27 +895,6 @@ static struct binder_work *binder_dequeue_work_head_ilocked(
return w;
}
-/**
- * binder_dequeue_work_head() - Dequeues the item at head of list
- * @proc: binder_proc associated with list
- * @list: list to dequeue head
- *
- * Removes the head of the list if there are items on the list
- *
- * Return: pointer dequeued binder_work, NULL if list was empty
- */
-static struct binder_work *binder_dequeue_work_head(
- struct binder_proc *proc,
- struct list_head *list)
-{
- struct binder_work *w;
-
- binder_inner_proc_lock(proc);
- w = binder_dequeue_work_head_ilocked(list);
- binder_inner_proc_unlock(proc);
- return w;
-}
-
static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
static void binder_free_thread(struct binder_thread *thread);
@@ -2862,6 +2841,12 @@ static void binder_transaction(struct binder_proc *proc,
goto err_dead_binder;
}
e->to_node = target_node->debug_id;
+ if (WARN_ON(proc == target_proc)) {
+ return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
+ goto err_invalid_target_handle;
+ }
if (security_binder_transaction(proc->tsk,
target_proc->tsk) < 0) {
return_error = BR_FAILED_REPLY;
@@ -3366,10 +3351,17 @@ static int binder_thread_write(struct binder_proc *proc,
struct binder_node *ctx_mgr_node;
mutex_lock(&context->context_mgr_node_lock);
ctx_mgr_node = context->binder_context_mgr_node;
- if (ctx_mgr_node)
+ if (ctx_mgr_node) {
+ if (ctx_mgr_node->proc == proc) {
+ binder_user_error("%d:%d context manager tried to acquire desc 0\n",
+ proc->pid, thread->pid);
+ mutex_unlock(&context->context_mgr_node_lock);
+ return -EINVAL;
+ }
ret = binder_inc_ref_for_node(
proc, ctx_mgr_node,
strong, NULL, &rdata);
+ }
mutex_unlock(&context->context_mgr_node_lock);
}
if (ret)
@@ -4229,13 +4221,17 @@ static void binder_release_work(struct binder_proc *proc,
struct list_head *list)
{
struct binder_work *w;
+ enum binder_work_type wtype;
while (1) {
- w = binder_dequeue_work_head(proc, list);
+ binder_inner_proc_lock(proc);
+ w = binder_dequeue_work_head_ilocked(list);
+ wtype = w ? w->type : 0;
+ binder_inner_proc_unlock(proc);
if (!w)
return;
- switch (w->type) {
+ switch (wtype) {
case BINDER_WORK_TRANSACTION: {
struct binder_transaction *t;
@@ -4269,9 +4265,11 @@ static void binder_release_work(struct binder_proc *proc,
kfree(death);
binder_stats_deleted(BINDER_STAT_DEATH);
} break;
+ case BINDER_WORK_NODE:
+ break;
default:
pr_err("unexpected work type, %d, not freed\n",
- w->type);
+ wtype);
break;
}
}
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index c3fdd79c4082..3371b986e3b4 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -970,8 +970,8 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
mm = alloc->vma_vm_mm;
if (!mmget_not_zero(mm))
goto err_mmget;
- if (!down_write_trylock(&mm->mmap_sem))
- goto err_down_write_mmap_sem_failed;
+ if (!down_read_trylock(&mm->mmap_sem))
+ goto err_down_read_mmap_sem_failed;
vma = binder_alloc_get_vma(alloc);
list_lru_isolate(lru, item);
@@ -986,8 +986,8 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
trace_binder_unmap_user_end(alloc, index);
}
- up_write(&mm->mmap_sem);
- mmput(mm);
+ up_read(&mm->mmap_sem);
+ mmput_async(mm);
trace_binder_unmap_kernel_start(alloc, index);
@@ -1001,7 +1001,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
mutex_unlock(&alloc->mutex);
return LRU_REMOVED_RETRY;
-err_down_write_mmap_sem_failed:
+err_down_read_mmap_sem_failed:
mmput_async(mm);
err_mmget:
err_page_already_freed:
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
index 583e366be7e2..505f8c316818 100644
--- a/drivers/ata/acard-ahci.c
+++ b/drivers/ata/acard-ahci.c
@@ -72,7 +72,7 @@ struct acard_sg {
__le32 size; /* bit 31 (EOT) max==0x10000 (64k) */
};
-static void acard_ahci_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc);
static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
static int acard_ahci_port_start(struct ata_port *ap);
static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
@@ -257,7 +257,7 @@ static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
return si;
}
-static void acard_ahci_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ahci_port_priv *pp = ap->private_data;
@@ -295,6 +295,8 @@ static void acard_ahci_qc_prep(struct ata_queued_cmd *qc)
opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
+
+ return AC_ERR_OK;
}
static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index 0192cab1b862..e58b336d1324 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -370,6 +370,10 @@ static int brcm_ahci_resume(struct device *dev)
if (ret)
return ret;
+ ret = ahci_platform_enable_regulators(hpriv);
+ if (ret)
+ goto out_disable_clks;
+
brcm_sata_init(priv);
brcm_sata_phys_enable(priv);
brcm_sata_alpm_init(hpriv);
@@ -399,6 +403,8 @@ out_disable_platform_phys:
ahci_platform_disable_phys(hpriv);
out_disable_phys:
brcm_sata_phys_disable(priv);
+ ahci_platform_disable_regulators(hpriv);
+out_disable_clks:
ahci_platform_disable_clks(hpriv);
return ret;
}
@@ -471,6 +477,10 @@ static int brcm_ahci_probe(struct platform_device *pdev)
if (ret)
goto out_reset;
+ ret = ahci_platform_enable_regulators(hpriv);
+ if (ret)
+ goto out_disable_clks;
+
/* Must be first so as to configure endianness including that
* of the standard AHCI register space.
*/
@@ -480,7 +490,7 @@ static int brcm_ahci_probe(struct platform_device *pdev)
priv->port_mask = brcm_ahci_get_portmask(hpriv, priv);
if (!priv->port_mask) {
ret = -ENODEV;
- goto out_disable_clks;
+ goto out_disable_regulators;
}
/* Must be done before ahci_platform_enable_phys() */
@@ -505,6 +515,8 @@ out_disable_platform_phys:
ahci_platform_disable_phys(hpriv);
out_disable_phys:
brcm_sata_phys_disable(priv);
+out_disable_regulators:
+ ahci_platform_disable_regulators(hpriv);
out_disable_clks:
ahci_platform_disable_clks(hpriv);
out_reset:
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 2bdb250a2142..f1153e7ba3b3 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -73,7 +73,7 @@ static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
static int ahci_port_start(struct ata_port *ap);
static void ahci_port_stop(struct ata_port *ap);
-static void ahci_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc);
static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
static void ahci_freeze(struct ata_port *ap);
static void ahci_thaw(struct ata_port *ap);
@@ -1640,7 +1640,7 @@ static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
return sata_pmp_qc_defer_cmd_switch(qc);
}
-static void ahci_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ahci_port_priv *pp = ap->private_data;
@@ -1676,6 +1676,8 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
+
+ return AC_ERR_OK;
}
static void ahci_fbs_dec_intr(struct ata_port *ap)
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 522b543f718d..6a55aac0c60f 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -544,11 +544,13 @@ int ahci_platform_init_host(struct platform_device *pdev,
int i, irq, n_ports, rc;
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
+ if (irq < 0) {
if (irq != -EPROBE_DEFER)
dev_err(dev, "no irq\n");
return irq;
}
+ if (!irq)
+ return -EINVAL;
hpriv->irq = irq;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 75d582ca917f..db1d86af21b4 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -57,7 +57,6 @@
#include <linux/workqueue.h>
#include <linux/scatterlist.h>
#include <linux/io.h>
-#include <linux/async.h>
#include <linux/log2.h>
#include <linux/slab.h>
#include <linux/glob.h>
@@ -4493,9 +4492,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
{ "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
- /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on
- SD7SN6S256G and SD8SN8U256G */
- { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, },
+ /* Sandisk SD7/8/9s lock up hard on large trims */
+ { "SanDisk SD[789]*", NULL, ATA_HORKAGE_MAX_TRIM_128M, },
/* devices which puke on READ_NATIVE_MAX */
{ "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
@@ -4998,7 +4996,10 @@ int ata_std_qc_defer(struct ata_queued_cmd *qc)
return ATA_DEFER_LINK;
}
-void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
+enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
+{
+ return AC_ERR_OK;
+}
/**
* ata_sg_init - Associate command with scatter-gather table.
@@ -5485,7 +5486,9 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
return;
}
- ap->ops->qc_prep(qc);
+ qc->err_mask |= ap->ops->qc_prep(qc);
+ if (unlikely(qc->err_mask))
+ goto err;
trace_ata_qc_issue(qc);
qc->err_mask |= ap->ops->qc_issue(qc);
if (unlikely(qc->err_mask))
@@ -6610,7 +6613,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
/* perform each probe asynchronously */
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
- async_schedule(async_port_probe, ap);
+ ap->cookie = async_schedule(async_port_probe, ap);
}
return 0;
@@ -6750,11 +6753,11 @@ void ata_host_detach(struct ata_host *host)
{
int i;
- /* Ensure ata_port probe has completed */
- async_synchronize_full();
-
- for (i = 0; i < host->n_ports; i++)
+ for (i = 0; i < host->n_ports; i++) {
+ /* Ensure ata_port probe has completed */
+ async_synchronize_cookie(host->ports[i]->cookie + 1);
ata_port_detach(host->ports[i]);
+ }
/* the host is dead now, dissociate ACPI */
ata_acpi_dissociate(host);
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 2ae1799f4992..51eeaea65833 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -764,6 +764,7 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
if (dev->flags & ATA_DFLAG_DETACH) {
detach = 1;
+ rc = -ENODEV;
goto fail;
}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 3a64fa4aaf7e..e7af41d95490 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2391,6 +2391,7 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
{
+ struct ata_device *dev = args->dev;
u16 min_io_sectors;
rbuf[1] = 0xb0;
@@ -2416,7 +2417,12 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
* with the unmap bit set.
*/
if (ata_id_has_trim(args->id)) {
- put_unaligned_be64(65535 * ATA_MAX_TRIM_RNUM, &rbuf[36]);
+ u64 max_blocks = 65535 * ATA_MAX_TRIM_RNUM;
+
+ if (dev->horkage & ATA_HORKAGE_MAX_TRIM_128M)
+ max_blocks = 128 << (20 - SECTOR_SHIFT);
+
+ put_unaligned_be64(max_blocks, &rbuf[36]);
put_unaligned_be32(1, &rbuf[28]);
}
@@ -3995,12 +4001,13 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
const u8 *cdb = scmd->cmnd;
- const u8 *p;
u8 pg, spg;
unsigned six_byte, pg_len, hdr_len, bd_len;
int len;
u16 fp = (u16)-1;
u8 bp = 0xff;
+ u8 buffer[64];
+ const u8 *p = buffer;
VPRINTK("ENTER\n");
@@ -4034,12 +4041,14 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len)
goto invalid_param_len;
- p = page_address(sg_page(scsi_sglist(scmd)));
-
/* Move past header and block descriptors. */
if (len < hdr_len)
goto invalid_param_len;
+ if (!sg_copy_to_buffer(scsi_sglist(scmd), scsi_sg_count(scmd),
+ buffer, sizeof(buffer)))
+ goto invalid_param_len;
+
if (six_byte)
bd_len = p[3];
else
@@ -4570,22 +4579,19 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
*/
shost->max_host_blocked = 1;
- rc = scsi_add_host_with_dma(ap->scsi_host,
- &ap->tdev, ap->host->dev);
+ rc = scsi_add_host_with_dma(shost, &ap->tdev, ap->host->dev);
if (rc)
- goto err_add;
+ goto err_alloc;
}
return 0;
- err_add:
- scsi_host_put(host->ports[i]->scsi_host);
err_alloc:
while (--i >= 0) {
struct Scsi_Host *shost = host->ports[i]->scsi_host;
+ /* scsi_host_put() is in ata_devres_release() */
scsi_remove_host(shost);
- scsi_host_put(shost);
}
return rc;
}
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 873cc0906055..7484ffdabd54 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -2695,12 +2695,14 @@ static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-void ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
+enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
{
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
ata_bmdma_fill_sg(qc);
+
+ return AC_ERR_OK;
}
EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
@@ -2713,12 +2715,14 @@ EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
+enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
{
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
ata_bmdma_fill_sg_dumb(qc);
+
+ return AC_ERR_OK;
}
EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index ebecab8c3f36..7c1c399450f3 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -817,12 +817,19 @@ static int arasan_cf_probe(struct platform_device *pdev)
else
quirk = CF_BROKEN_UDMA; /* as it is on spear1340 */
- /* if irq is 0, support only PIO */
- acdev->irq = platform_get_irq(pdev, 0);
- if (acdev->irq)
+ /*
+ * If there's an error getting IRQ (or we do get IRQ0),
+ * support only PIO
+ */
+ ret = platform_get_irq(pdev, 0);
+ if (ret > 0) {
+ acdev->irq = ret;
irq_handler = arasan_cf_interrupt;
- else
+ } else if (ret == -EPROBE_DEFER) {
+ return ret;
+ } else {
quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA;
+ }
acdev->pbase = res->start;
acdev->vbase = devm_ioremap_nocache(&pdev->dev, res->start,
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index 0b0d93065f5a..867621f8c387 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -169,8 +169,12 @@ static int ixp4xx_pata_probe(struct platform_device *pdev)
return -ENOMEM;
irq = platform_get_irq(pdev, 0);
- if (irq)
+ if (irq > 0)
irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
+ else if (irq < 0)
+ return irq;
+ else
+ return -EINVAL;
/* Setup expansion bus chip selects */
*data->cs0_cfg = data->cs0_bits;
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 9588e685d994..765b99319d3c 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -507,7 +507,7 @@ static int pata_macio_cable_detect(struct ata_port *ap)
return ATA_CBL_PATA40;
}
-static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
{
unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE);
struct ata_port *ap = qc->ap;
@@ -520,7 +520,7 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
__func__, qc, qc->flags, write, qc->dev->devno);
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
table = (struct dbdma_cmd *) priv->dma_table_cpu;
@@ -565,6 +565,8 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
table->command = cpu_to_le16(DBDMA_STOP);
dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi);
+
+ return AC_ERR_OK;
}
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index e8b6a2e464c9..5b1458ca986b 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -58,25 +58,27 @@ static void pxa_ata_dma_irq(void *d)
/*
* Prepare taskfile for submission.
*/
-static void pxa_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors pxa_qc_prep(struct ata_queued_cmd *qc)
{
struct pata_pxa_data *pd = qc->ap->private_data;
struct dma_async_tx_descriptor *tx;
enum dma_transfer_direction dir;
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
dir = (qc->dma_dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
tx = dmaengine_prep_slave_sg(pd->dma_chan, qc->sg, qc->n_elem, dir,
DMA_PREP_INTERRUPT);
if (!tx) {
ata_dev_err(qc->dev, "prep_slave_sg() failed\n");
- return;
+ return AC_ERR_OK;
}
tx->callback = pxa_ata_dma_irq;
tx->callback_param = pd;
pd->dma_cookie = dmaengine_submit(tx);
+
+ return AC_ERR_OK;
}
/*
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index f1e873a37465..096b4771b19d 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -132,7 +132,7 @@ static int adma_ata_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent);
static int adma_port_start(struct ata_port *ap);
static void adma_port_stop(struct ata_port *ap);
-static void adma_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc);
static unsigned int adma_qc_issue(struct ata_queued_cmd *qc);
static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
static void adma_freeze(struct ata_port *ap);
@@ -311,7 +311,7 @@ static int adma_fill_sg(struct ata_queued_cmd *qc)
return i;
}
-static void adma_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc)
{
struct adma_port_priv *pp = qc->ap->private_data;
u8 *buf = pp->pkt;
@@ -322,7 +322,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
adma_enter_reg_mode(qc->ap);
if (qc->tf.protocol != ATA_PROT_DMA)
- return;
+ return AC_ERR_OK;
buf[i++] = 0; /* Response flags */
buf[i++] = 0; /* reserved */
@@ -387,6 +387,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
printk("%s\n", obuf);
}
#endif
+ return AC_ERR_OK;
}
static inline void adma_packet_start(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index ae52a45fab5f..8b3be0ff91cb 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -507,7 +507,7 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
return num_prde;
}
-static void sata_fsl_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors sata_fsl_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct sata_fsl_port_priv *pp = ap->private_data;
@@ -553,6 +553,8 @@ static void sata_fsl_qc_prep(struct ata_queued_cmd *qc)
VPRINTK("SATA FSL : xx_qc_prep, di = 0x%x, ttl = %d, num_prde = %d\n",
desc_info, ttl_dwords, num_prde);
+
+ return AC_ERR_OK;
}
static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 9b6d7930d1c7..6c7ddc037fce 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -472,7 +472,7 @@ static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc)
prd[-1].flags |= PRD_END;
}
-static void inic_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors inic_qc_prep(struct ata_queued_cmd *qc)
{
struct inic_port_priv *pp = qc->ap->private_data;
struct inic_pkt *pkt = pp->pkt;
@@ -532,6 +532,8 @@ static void inic_qc_prep(struct ata_queued_cmd *qc)
inic_fill_sg(prd, qc);
pp->cpb_tbl[0] = pp->pkt_dma;
+
+ return AC_ERR_OK;
}
static unsigned int inic_qc_issue(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index ab2e9f62ddc1..57ef11ecbb9b 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -605,8 +605,8 @@ static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
static int mv_port_start(struct ata_port *ap);
static void mv_port_stop(struct ata_port *ap);
static int mv_qc_defer(struct ata_queued_cmd *qc);
-static void mv_qc_prep(struct ata_queued_cmd *qc);
-static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
+static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc);
static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
static int mv_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
@@ -2044,7 +2044,7 @@ static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
* LOCKING:
* Inherited from caller.
*/
-static void mv_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct mv_port_priv *pp = ap->private_data;
@@ -2056,15 +2056,15 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
switch (tf->protocol) {
case ATA_PROT_DMA:
if (tf->command == ATA_CMD_DSM)
- return;
+ return AC_ERR_OK;
/* fall-thru */
case ATA_PROT_NCQ:
break; /* continue below */
case ATA_PROT_PIO:
mv_rw_multi_errata_sata24(qc);
- return;
+ return AC_ERR_OK;
default:
- return;
+ return AC_ERR_OK;
}
/* Fill in command request block
@@ -2111,12 +2111,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
* non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
* of which are defined/used by Linux. If we get here, this
* driver needs work.
- *
- * FIXME: modify libata to give qc_prep a return value and
- * return error here.
*/
- BUG_ON(tf->command);
- break;
+ ata_port_err(ap, "%s: unsupported command: %.2x\n", __func__,
+ tf->command);
+ return AC_ERR_INVALID;
}
mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
@@ -2129,8 +2127,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
mv_fill_sg(qc);
+
+ return AC_ERR_OK;
}
/**
@@ -2145,7 +2145,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
* LOCKING:
* Inherited from caller.
*/
-static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
+static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct mv_port_priv *pp = ap->private_data;
@@ -2156,9 +2156,9 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
if ((tf->protocol != ATA_PROT_DMA) &&
(tf->protocol != ATA_PROT_NCQ))
- return;
+ return AC_ERR_OK;
if (tf->command == ATA_CMD_DSM)
- return; /* use bmdma for this */
+ return AC_ERR_OK; /* use bmdma for this */
/* Fill in Gen IIE command request block */
if (!(tf->flags & ATA_TFLAG_WRITE))
@@ -2199,8 +2199,10 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
);
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
mv_fill_sg(qc);
+
+ return AC_ERR_OK;
}
/**
@@ -4108,6 +4110,10 @@ static int mv_platform_probe(struct platform_device *pdev)
n_ports = mv_platform_data->n_ports;
irq = platform_get_irq(pdev, 0);
}
+ if (irq < 0)
+ return irq;
+ if (!irq)
+ return -EINVAL;
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 761577d57ff3..2248a40631bf 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -313,7 +313,7 @@ static void nv_ck804_freeze(struct ata_port *ap);
static void nv_ck804_thaw(struct ata_port *ap);
static int nv_adma_slave_config(struct scsi_device *sdev);
static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
-static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
static void nv_adma_irq_clear(struct ata_port *ap);
@@ -335,7 +335,7 @@ static void nv_mcp55_freeze(struct ata_port *ap);
static void nv_swncq_error_handler(struct ata_port *ap);
static int nv_swncq_slave_config(struct scsi_device *sdev);
static int nv_swncq_port_start(struct ata_port *ap);
-static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
@@ -1365,7 +1365,7 @@ static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
return 1;
}
-static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc)
{
struct nv_adma_port_priv *pp = qc->ap->private_data;
struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
@@ -1377,7 +1377,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
(qc->flags & ATA_QCFLAG_DMAMAP));
nv_adma_register_mode(qc->ap);
ata_bmdma_qc_prep(qc);
- return;
+ return AC_ERR_OK;
}
cpb->resp_flags = NV_CPB_RESP_DONE;
@@ -1409,6 +1409,8 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
cpb->ctl_flags = ctl_flags;
wmb();
cpb->resp_flags = 0;
+
+ return AC_ERR_OK;
}
static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
@@ -1972,17 +1974,19 @@ static int nv_swncq_port_start(struct ata_port *ap)
return 0;
}
-static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc)
{
if (qc->tf.protocol != ATA_PROT_NCQ) {
ata_bmdma_qc_prep(qc);
- return;
+ return AC_ERR_OK;
}
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
nv_swncq_fill_sg(qc);
+
+ return AC_ERR_OK;
}
static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
@@ -2118,7 +2122,7 @@ static int nv_swncq_sdbfis(struct ata_port *ap)
pp->dhfis_bits &= ~done_mask;
pp->dmafis_bits &= ~done_mask;
pp->sdbfis_bits |= done_mask;
- ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
+ ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
if (!ap->qc_active) {
DPRINTK("over\n");
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index d032bf657f70..29d2bb465f60 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -155,7 +155,7 @@ static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 va
static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static int pdc_common_port_start(struct ata_port *ap);
static int pdc_sata_port_start(struct ata_port *ap);
-static void pdc_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc);
static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
@@ -649,7 +649,7 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
}
-static void pdc_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc)
{
struct pdc_port_priv *pp = qc->ap->private_data;
unsigned int i;
@@ -681,6 +681,8 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
default:
break;
}
+
+ return AC_ERR_OK;
}
static int pdc_is_sataii_tx4(unsigned long flags)
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index 1fe941688e95..a66d10628c18 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -116,7 +116,7 @@ static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static int qs_port_start(struct ata_port *ap);
static void qs_host_stop(struct ata_host *host);
-static void qs_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc);
static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
static void qs_freeze(struct ata_port *ap);
@@ -276,7 +276,7 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
return si;
}
-static void qs_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc)
{
struct qs_port_priv *pp = qc->ap->private_data;
u8 dflags = QS_DF_PORD, *buf = pp->pkt;
@@ -288,7 +288,7 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
qs_enter_reg_mode(qc->ap);
if (qc->tf.protocol != ATA_PROT_DMA)
- return;
+ return AC_ERR_OK;
nelem = qs_fill_sg(qc);
@@ -311,6 +311,8 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
/* frame information structure (FIS) */
ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]);
+
+ return AC_ERR_OK;
}
static inline void qs_packet_start(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 03867f539f3a..4dcdf8ee0055 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -124,7 +124,7 @@
/* Descriptor table word 0 bit (when DTA32M = 1) */
#define SATA_RCAR_DTEND BIT(0)
-#define SATA_RCAR_DMA_BOUNDARY 0x1FFFFFFEUL
+#define SATA_RCAR_DMA_BOUNDARY 0x1FFFFFFFUL
/* Gen2 Physical Layer Control Registers */
#define RCAR_GEN2_PHY_CTL1_REG 0x1704
@@ -554,12 +554,14 @@ static void sata_rcar_bmdma_fill_sg(struct ata_queued_cmd *qc)
prd[si - 1].addr |= cpu_to_le32(SATA_RCAR_DTEND);
}
-static void sata_rcar_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors sata_rcar_qc_prep(struct ata_queued_cmd *qc)
{
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
sata_rcar_bmdma_fill_sg(qc);
+
+ return AC_ERR_OK;
}
static void sata_rcar_bmdma_setup(struct ata_queued_cmd *qc)
@@ -909,7 +911,7 @@ static int sata_rcar_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0)
- goto err_pm_disable;
+ goto err_pm_put;
host = ata_host_alloc(dev, 1);
if (!host) {
@@ -940,7 +942,6 @@ static int sata_rcar_probe(struct platform_device *pdev)
err_pm_put:
pm_runtime_put(dev);
-err_pm_disable:
pm_runtime_disable(dev);
return ret;
}
@@ -994,8 +995,10 @@ static int sata_rcar_resume(struct device *dev)
int ret;
ret = pm_runtime_get_sync(dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put(dev);
return ret;
+ }
if (priv->type == RCAR_GEN3_SATA) {
sata_rcar_init_module(priv);
@@ -1020,8 +1023,10 @@ static int sata_rcar_restore(struct device *dev)
int ret;
ret = pm_runtime_get_sync(dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put(dev);
return ret;
+ }
sata_rcar_setup_port(host);
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index ed76f070d21e..82adaf02887f 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -119,7 +119,7 @@ static void sil_dev_config(struct ata_device *dev);
static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed);
-static void sil_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc);
static void sil_bmdma_setup(struct ata_queued_cmd *qc);
static void sil_bmdma_start(struct ata_queued_cmd *qc);
static void sil_bmdma_stop(struct ata_queued_cmd *qc);
@@ -333,12 +333,14 @@ static void sil_fill_sg(struct ata_queued_cmd *qc)
last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT);
}
-static void sil_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc)
{
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
+ return AC_ERR_OK;
sil_fill_sg(qc);
+
+ return AC_ERR_OK;
}
static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 319f517137cd..7a8ca81e52bf 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -336,7 +336,7 @@ static void sil24_dev_config(struct ata_device *dev);
static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val);
static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val);
static int sil24_qc_defer(struct ata_queued_cmd *qc);
-static void sil24_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc);
static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc);
static void sil24_pmp_attach(struct ata_port *ap);
@@ -840,7 +840,7 @@ static int sil24_qc_defer(struct ata_queued_cmd *qc)
return ata_std_qc_defer(qc);
}
-static void sil24_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct sil24_port_priv *pp = ap->private_data;
@@ -884,6 +884,8 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
if (qc->flags & ATA_QCFLAG_DMAMAP)
sil24_fill_sg(qc, sge);
+
+ return AC_ERR_OK;
}
static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 405e606a234d..0d742457925e 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -218,7 +218,7 @@ static void pdc_error_handler(struct ata_port *ap);
static void pdc_freeze(struct ata_port *ap);
static void pdc_thaw(struct ata_port *ap);
static int pdc_port_start(struct ata_port *ap);
-static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
+static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc);
static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static unsigned int pdc20621_dimm_init(struct ata_host *host);
@@ -546,7 +546,7 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
}
-static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
+static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc)
{
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
@@ -558,6 +558,8 @@ static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
default:
break;
}
+
+ return AC_ERR_OK;
}
static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index afebeb1c3e1e..723bad1201cc 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -432,9 +432,15 @@ static int atmtcp_remove_persistent(int itf)
return -EMEDIUMTYPE;
}
dev_data = PRIV(dev);
- if (!dev_data->persist) return 0;
+ if (!dev_data->persist) {
+ atm_dev_put(dev);
+ return 0;
+ }
dev_data->persist = 0;
- if (PRIV(dev)->vcc) return 0;
+ if (PRIV(dev)->vcc) {
+ atm_dev_put(dev);
+ return 0;
+ }
kfree(dev_data);
atm_dev_put(dev);
atm_dev_deregister(dev);
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 7323e9210f4b..1409d48affb7 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -2243,7 +2243,7 @@ static int eni_init_one(struct pci_dev *pci_dev,
rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
if (rc < 0)
- goto out;
+ goto err_disable;
rc = -ENOMEM;
eni_dev = kmalloc(sizeof(struct eni_dev), GFP_KERNEL);
@@ -2279,7 +2279,8 @@ out:
return rc;
err_eni_release:
- eni_do_release(dev);
+ dev->phy = NULL;
+ iounmap(ENI_DEV(dev)->ioaddr);
err_unregister:
atm_dev_deregister(dev);
err_free_consistent:
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 112b1001c269..ef395b238816 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -1013,6 +1013,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
error = make_rate (pcr, r, &tmc0, NULL);
if (error) {
kfree(tc);
+ kfree(vcc);
return error;
}
}
diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c
index 0a67487c0b1d..a2ecb4190f78 100644
--- a/drivers/atm/idt77105.c
+++ b/drivers/atm/idt77105.c
@@ -261,7 +261,7 @@ static int idt77105_start(struct atm_dev *dev)
{
unsigned long flags;
- if (!(dev->dev_data = kmalloc(sizeof(struct idt77105_priv),GFP_KERNEL)))
+ if (!(dev->phy_data = kmalloc(sizeof(struct idt77105_priv),GFP_KERNEL)))
return -ENOMEM;
PRIV(dev)->dev = dev;
spin_lock_irqsave(&idt77105_priv_lock, flags);
@@ -336,7 +336,7 @@ static int idt77105_stop(struct atm_dev *dev)
else
idt77105_all = walk->next;
dev->phy = NULL;
- dev->dev_data = NULL;
+ dev->phy_data = NULL;
kfree(walk);
break;
}
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 6e737142ceaa..3e00ab8a8890 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -3607,7 +3607,7 @@ static int idt77252_init_one(struct pci_dev *pcidev,
if ((err = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)))) {
printk("idt77252: can't enable DMA for PCI device at %s\n", pci_name(pcidev));
- return err;
+ goto err_out_disable_pdev;
}
card = kzalloc(sizeof(struct idt77252_dev), GFP_KERNEL);
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index 5f8e009b2da1..34e6e4b90f74 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -2238,6 +2238,7 @@ static int lanai_dev_open(struct atm_dev *atmdev)
conf1_write(lanai);
#endif
iounmap(lanai->base);
+ lanai->base = NULL;
error_pci:
pci_disable_device(lanai->pci);
error:
@@ -2250,6 +2251,8 @@ static int lanai_dev_open(struct atm_dev *atmdev)
static void lanai_dev_close(struct atm_dev *atmdev)
{
struct lanai_dev *lanai = (struct lanai_dev *) atmdev->dev_data;
+ if (lanai->base==NULL)
+ return;
printk(KERN_INFO DEV_LABEL "(itf %d): shutting down interface\n",
lanai->number);
lanai_timed_poll_stop(lanai);
@@ -2559,7 +2562,7 @@ static int lanai_init_one(struct pci_dev *pci,
struct atm_dev *atmdev;
int result;
- lanai = kmalloc(sizeof(*lanai), GFP_KERNEL);
+ lanai = kzalloc(sizeof(*lanai), GFP_KERNEL);
if (lanai == NULL) {
printk(KERN_ERR DEV_LABEL
": couldn't allocate dev_data structure!\n");
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index cbec9adc01c7..0d3754a4ac20 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -1705,6 +1705,8 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
atomic_inc(&vcc->stats->tx_err);
+ dma_unmap_single(&card->pcidev->dev, NS_PRV_DMA(skb), skb->len,
+ DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
return -EIO;
}
diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
index 4fa13a807873..cf517fd148ea 100644
--- a/drivers/atm/uPD98402.c
+++ b/drivers/atm/uPD98402.c
@@ -210,7 +210,7 @@ static void uPD98402_int(struct atm_dev *dev)
static int uPD98402_start(struct atm_dev *dev)
{
DPRINTK("phy_start\n");
- if (!(dev->dev_data = kmalloc(sizeof(struct uPD98402_priv),GFP_KERNEL)))
+ if (!(dev->phy_data = kmalloc(sizeof(struct uPD98402_priv),GFP_KERNEL)))
return -ENOMEM;
spin_lock_init(&PRIV(dev)->lock);
memset(&PRIV(dev)->sonet_stats,0,sizeof(struct k_sonet_stats));
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index 21393ec3b9a4..194370ae37dd 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -117,8 +117,7 @@ static void ht16k33_fb_queue(struct ht16k33_priv *priv)
{
struct ht16k33_fbdev *fbdev = &priv->fbdev;
- schedule_delayed_work(&fbdev->work,
- msecs_to_jiffies(HZ / fbdev->refresh_rate));
+ schedule_delayed_work(&fbdev->work, HZ / fbdev->refresh_rate);
}
/*
diff --git a/drivers/base/component.c b/drivers/base/component.c
index 7f7c4233cd31..ee4d3b388f44 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -235,7 +235,8 @@ static int try_to_bring_up_master(struct master *master,
ret = master->ops->bind(master->dev);
if (ret < 0) {
devres_release_group(master->dev, NULL);
- dev_info(master->dev, "master bind failed: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_info(master->dev, "master bind failed: %d\n", ret);
return ret;
}
@@ -506,8 +507,9 @@ static int component_bind(struct component *component, struct master *master,
devres_release_group(component->dev, NULL);
devres_release_group(master->dev, NULL);
- dev_err(master->dev, "failed to bind %s (ops %ps): %d\n",
- dev_name(component->dev), component->ops, ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(master->dev, "failed to bind %s (ops %ps): %d\n",
+ dev_name(component->dev), component->ops, ret);
}
return ret;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 928fc1532a70..f7f601858f10 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -93,6 +93,16 @@ void device_links_read_unlock(int not_used)
}
#endif /* !CONFIG_SRCU */
+static bool device_is_ancestor(struct device *dev, struct device *target)
+{
+ while (target->parent) {
+ target = target->parent;
+ if (dev == target)
+ return true;
+ }
+ return false;
+}
+
/**
* device_is_dependent - Check if one device depends on another one
* @dev: Device to check dependencies for.
@@ -106,7 +116,12 @@ static int device_is_dependent(struct device *dev, void *target)
struct device_link *link;
int ret;
- if (dev == target)
+ /*
+ * The "ancestors" check is needed to catch the case when the target
+ * device has not been completely initialized yet and it is still
+ * missing from the list of children of its parent device.
+ */
+ if (dev == target || device_is_ancestor(dev, target))
return 1;
ret = device_for_each_child(dev, target, device_is_dependent);
@@ -3333,9 +3348,10 @@ static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
*/
void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
{
- if (fwnode) {
- struct fwnode_handle *fn = dev->fwnode;
+ struct device *parent = dev->parent;
+ struct fwnode_handle *fn = dev->fwnode;
+ if (fwnode) {
if (fwnode_is_primary(fn))
fn = fn->secondary;
@@ -3345,8 +3361,13 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
}
dev->fwnode = fwnode;
} else {
- dev->fwnode = fwnode_is_primary(dev->fwnode) ?
- dev->fwnode->secondary : NULL;
+ if (fwnode_is_primary(fn)) {
+ dev->fwnode = fn->secondary;
+ if (!(parent && fn == parent->fwnode))
+ fn->secondary = NULL;
+ } else {
+ dev->fwnode = NULL;
+ }
}
}
EXPORT_SYMBOL_GPL(set_primary_fwnode);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index f3ecf7418ed4..1df057486176 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -565,6 +565,12 @@ ssize_t __weak cpu_show_itlb_multihit(struct device *dev,
return sprintf(buf, "Not affected\n");
}
+ssize_t __weak cpu_show_srbds(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
@@ -573,6 +579,7 @@ static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
+static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -583,6 +590,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_mds.attr,
&dev_attr_tsx_async_abort.attr,
&dev_attr_itlb_multihit.attr,
+ &dev_attr_srbds.attr,
NULL
};
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index caaeb7910a04..26ba7a99b7d5 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -254,14 +254,16 @@ int driver_deferred_probe_check_state(struct device *dev)
static void deferred_probe_timeout_work_func(struct work_struct *work)
{
- struct device_private *private, *p;
+ struct device_private *p;
deferred_probe_timeout = 0;
driver_deferred_probe_trigger();
flush_work(&deferred_probe_work);
- list_for_each_entry_safe(private, p, &deferred_probe_pending_list, deferred_probe)
- dev_info(private->device, "deferred probe pending");
+ mutex_lock(&deferred_probe_mutex);
+ list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe)
+ dev_info(p->device, "deferred probe pending\n");
+ mutex_unlock(&deferred_probe_mutex);
}
static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
@@ -472,7 +474,8 @@ static int really_probe(struct device *dev, struct device_driver *drv)
drv->bus->name, __func__, drv->name, dev_name(dev));
if (!list_empty(&dev->devres_head)) {
dev_crit(dev, "Resources present before probing\n");
- return -EBUSY;
+ ret = -EBUSY;
+ goto done;
}
re_probe:
@@ -579,7 +582,7 @@ pinctrl_bind_failed:
ret = 0;
done:
atomic_dec(&probe_count);
- wake_up(&probe_waitqueue);
+ wake_up_all(&probe_waitqueue);
return ret;
}
@@ -792,7 +795,9 @@ static int __device_attach(struct device *dev, bool allow_async)
int ret = 0;
device_lock(dev);
- if (dev->driver) {
+ if (dev->p->dead) {
+ goto out_unlock;
+ } else if (dev->driver) {
if (device_is_bound(dev)) {
ret = 1;
goto out_unlock;
@@ -928,6 +933,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
drv = dev->driver;
if (drv) {
+ pm_runtime_get_sync(dev);
+
while (device_links_busy(dev)) {
device_unlock(dev);
if (parent && dev->bus->need_parent_lock)
@@ -943,11 +950,12 @@ static void __device_release_driver(struct device *dev, struct device *parent)
* have released the driver successfully while this one
* was waiting, so check for that.
*/
- if (dev->driver != drv)
+ if (dev->driver != drv) {
+ pm_runtime_put(dev);
return;
+ }
}
- pm_runtime_get_sync(dev);
pm_runtime_clean_up_links(dev);
driver_sysfs_remove(dev);
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
index 818d8c37d70a..3b7b748c4d4f 100644
--- a/drivers/base/firmware_loader/fallback.c
+++ b/drivers/base/firmware_loader/fallback.c
@@ -572,7 +572,7 @@ static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs,
}
retval = fw_sysfs_wait_timeout(fw_priv, timeout);
- if (retval < 0) {
+ if (retval < 0 && retval != -ENOENT) {
mutex_lock(&fw_lock);
fw_load_abort(fw_sysfs);
mutex_unlock(&fw_lock);
diff --git a/drivers/base/node.c b/drivers/base/node.c
index f3565c2dbc52..503e2f90e58e 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -403,10 +403,32 @@ static int __ref get_nid_for_pfn(unsigned long pfn)
return pfn_to_nid(pfn);
}
+static int do_register_memory_block_under_node(int nid,
+ struct memory_block *mem_blk)
+{
+ int ret;
+
+ /*
+ * If this memory block spans multiple nodes, we only indicate
+ * the last processed node.
+ */
+ mem_blk->nid = nid;
+
+ ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
+ &mem_blk->dev.kobj,
+ kobject_name(&mem_blk->dev.kobj));
+ if (ret)
+ return ret;
+
+ return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
+ &node_devices[nid]->dev.kobj,
+ kobject_name(&node_devices[nid]->dev.kobj));
+}
+
/* register memory section under specified node if it spans that node */
-int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg)
+int register_mem_block_under_node_early(struct memory_block *mem_blk, void *arg)
{
- int ret, nid = *(int *)arg;
+ int nid = *(int *)arg;
unsigned long pfn, sect_start_pfn, sect_end_pfn;
sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
@@ -426,39 +448,34 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg)
}
/*
- * We need to check if page belongs to nid only for the boot
- * case, during hotplug we know that all pages in the memory
- * block belong to the same node.
- */
- if (system_state == SYSTEM_BOOTING) {
- page_nid = get_nid_for_pfn(pfn);
- if (page_nid < 0)
- continue;
- if (page_nid != nid)
- continue;
- }
-
- /*
- * If this memory block spans multiple nodes, we only indicate
- * the last processed node.
+ * We need to check if page belongs to nid only at the boot
+ * case because node's ranges can be interleaved.
*/
- mem_blk->nid = nid;
-
- ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
- &mem_blk->dev.kobj,
- kobject_name(&mem_blk->dev.kobj));
- if (ret)
- return ret;
+ page_nid = get_nid_for_pfn(pfn);
+ if (page_nid < 0)
+ continue;
+ if (page_nid != nid)
+ continue;
- return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
- &node_devices[nid]->dev.kobj,
- kobject_name(&node_devices[nid]->dev.kobj));
+ return do_register_memory_block_under_node(nid, mem_blk);
}
/* mem section does not span the specified node */
return 0;
}
/*
+ * During hotplug we know that all pages in the memory block belong to the same
+ * node.
+ */
+static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
+ void *arg)
+{
+ int nid = *(int *)arg;
+
+ return do_register_memory_block_under_node(nid, mem_blk);
+}
+
+/*
* Unregister a memory block device under the node it spans. Memory blocks
* with multiple nodes cannot be offlined and therefore also never be removed.
*/
@@ -473,10 +490,17 @@ void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
}
-int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn)
+int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn,
+ enum meminit_context context)
{
- return walk_memory_range(start_pfn, end_pfn, (void *)&nid,
- register_mem_sect_under_node);
+ walk_memory_blocks_func_t func;
+
+ if (context == MEMINIT_HOTPLUG)
+ func = register_mem_block_under_node_hotplug;
+ else
+ func = register_mem_block_under_node_early;
+
+ return walk_memory_range(start_pfn, end_pfn, (void *)&nid, func);
}
#ifdef CONFIG_HUGETLBFS
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index d1f901b58f75..349c2754eed7 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -700,6 +700,8 @@ int __init_or_module __platform_driver_probe(struct platform_driver *drv,
/* temporary section violation during probe() */
drv->probe = probe;
retval = code = __platform_driver_register(drv, module);
+ if (retval)
+ return retval;
/*
* Fixup that section violation, being paranoid about code scanning
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 3b382a7e07b2..da413b95afab 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1751,13 +1751,17 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
}
/*
- * If a device configured to wake up the system from sleep states
- * has been suspended at run time and there's a resume request pending
- * for it, this is equivalent to the device signaling wakeup, so the
- * system suspend operation should be aborted.
+ * Wait for possible runtime PM transitions of the device in progress
+ * to complete and if there's a runtime resume request pending for it,
+ * resume it before proceeding with invoking the system-wide suspend
+ * callbacks for it.
+ *
+ * If the system-wide suspend callbacks below change the configuration
+ * of the device, they must disable runtime PM for it or otherwise
+ * ensure that its runtime-resume callbacks will not be confused by that
+ * change in case they are invoked going forward.
*/
- if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
- pm_wakeup_event(dev, 0);
+ pm_runtime_barrier(dev);
if (pm_wakeup_pending()) {
dev->power.direct_complete = false;
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 2c99f93020bc..eaae4adf9ce4 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1572,8 +1572,8 @@ void pm_runtime_get_suppliers(struct device *dev)
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
if (link->flags & DL_FLAG_PM_RUNTIME) {
link->supplier_preactivated = true;
- refcount_inc(&link->rpm_active);
pm_runtime_get_sync(link->supplier);
+ refcount_inc(&link->rpm_active);
}
device_links_read_unlock(idx);
@@ -1586,6 +1586,8 @@ void pm_runtime_get_suppliers(struct device *dev)
void pm_runtime_put_suppliers(struct device *dev)
{
struct device_link *link;
+ unsigned long flags;
+ bool put;
int idx;
idx = device_links_read_lock();
@@ -1593,7 +1595,11 @@ void pm_runtime_put_suppliers(struct device *dev)
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
if (link->supplier_preactivated) {
link->supplier_preactivated = false;
- if (refcount_dec_not_one(&link->rpm_active))
+ spin_lock_irqsave(&dev->power.lock, flags);
+ put = pm_runtime_status_suspended(dev) &&
+ refcount_dec_not_one(&link->rpm_active);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+ if (put)
pm_runtime_put(link->supplier);
}
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index c9687c8b2347..de706734b921 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -209,6 +209,9 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
if (*ppos < 0 || !count)
return -EINVAL;
+ if (count > (PAGE_SIZE << (MAX_ORDER - 1)))
+ count = PAGE_SIZE << (MAX_ORDER - 1);
+
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -357,6 +360,9 @@ static ssize_t regmap_reg_ranges_read_file(struct file *file,
if (*ppos < 0 || !count)
return -EINVAL;
+ if (count > (PAGE_SIZE << (MAX_ORDER - 1)))
+ count = PAGE_SIZE << (MAX_ORDER - 1);
+
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -453,29 +459,31 @@ static ssize_t regmap_cache_only_write_file(struct file *file,
{
struct regmap *map = container_of(file->private_data,
struct regmap, cache_only);
- ssize_t result;
- bool was_enabled, require_sync = false;
+ bool new_val, require_sync = false;
int err;
- map->lock(map->lock_arg);
+ err = kstrtobool_from_user(user_buf, count, &new_val);
+ /* Ignore malforned data like debugfs_write_file_bool() */
+ if (err)
+ return count;
- was_enabled = map->cache_only;
+ err = debugfs_file_get(file->f_path.dentry);
+ if (err)
+ return err;
- result = debugfs_write_file_bool(file, user_buf, count, ppos);
- if (result < 0) {
- map->unlock(map->lock_arg);
- return result;
- }
+ map->lock(map->lock_arg);
- if (map->cache_only && !was_enabled) {
+ if (new_val && !map->cache_only) {
dev_warn(map->dev, "debugfs cache_only=Y forced\n");
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
- } else if (!map->cache_only && was_enabled) {
+ } else if (!new_val && map->cache_only) {
dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n");
require_sync = true;
}
+ map->cache_only = new_val;
map->unlock(map->lock_arg);
+ debugfs_file_put(file->f_path.dentry);
if (require_sync) {
err = regcache_sync(map);
@@ -483,7 +491,7 @@ static ssize_t regmap_cache_only_write_file(struct file *file,
dev_err(map->dev, "Failed to sync cache %d\n", err);
}
- return result;
+ return count;
}
static const struct file_operations regmap_cache_only_fops = {
@@ -498,28 +506,32 @@ static ssize_t regmap_cache_bypass_write_file(struct file *file,
{
struct regmap *map = container_of(file->private_data,
struct regmap, cache_bypass);
- ssize_t result;
- bool was_enabled;
+ bool new_val;
+ int err;
- map->lock(map->lock_arg);
+ err = kstrtobool_from_user(user_buf, count, &new_val);
+ /* Ignore malforned data like debugfs_write_file_bool() */
+ if (err)
+ return count;
- was_enabled = map->cache_bypass;
+ err = debugfs_file_get(file->f_path.dentry);
+ if (err)
+ return err;
- result = debugfs_write_file_bool(file, user_buf, count, ppos);
- if (result < 0)
- goto out;
+ map->lock(map->lock_arg);
- if (map->cache_bypass && !was_enabled) {
+ if (new_val && !map->cache_bypass) {
dev_warn(map->dev, "debugfs cache_bypass=Y forced\n");
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
- } else if (!map->cache_bypass && was_enabled) {
+ } else if (!new_val && map->cache_bypass) {
dev_warn(map->dev, "debugfs cache_bypass=N forced\n");
}
+ map->cache_bypass = new_val;
-out:
map->unlock(map->lock_arg);
+ debugfs_file_put(file->f_path.dentry);
- return result;
+ return count;
}
static const struct file_operations regmap_cache_bypass_fops = {
@@ -567,8 +579,12 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
devname = dev_name(map->dev);
if (name) {
- map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
+ if (!map->debugfs_name) {
+ map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
devname, name);
+ if (!map->debugfs_name)
+ return;
+ }
name = map->debugfs_name;
} else {
name = devname;
@@ -576,9 +592,10 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
if (!strcmp(name, "dummy")) {
kfree(map->debugfs_name);
-
map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
dummy_index);
+ if (!map->debugfs_name)
+ return;
name = map->debugfs_name;
dummy_index++;
}
@@ -648,6 +665,7 @@ void regmap_debugfs_exit(struct regmap *map)
regmap_debugfs_free_dump_cache(map);
mutex_unlock(&map->cache_lock);
kfree(map->debugfs_name);
+ map->debugfs_name = NULL;
} else {
struct regmap_debugfs_node *node, *tmp;
diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c
index 50a66382d87d..e75168b941d0 100644
--- a/drivers/base/regmap/regmap-sdw.c
+++ b/drivers/base/regmap/regmap-sdw.c
@@ -12,7 +12,7 @@ static int regmap_sdw_write(void *context, unsigned int reg, unsigned int val)
struct device *dev = context;
struct sdw_slave *slave = dev_to_sdw_dev(dev);
- return sdw_write(slave, reg, val);
+ return sdw_write_no_pm(slave, reg, val);
}
static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val)
@@ -21,7 +21,7 @@ static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val)
struct sdw_slave *slave = dev_to_sdw_dev(dev);
int read;
- read = sdw_read(slave, reg);
+ read = sdw_read_no_pm(slave, reg);
if (read < 0)
return read;
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 6c9f6988bc09..e8b3353c18eb 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -21,6 +21,7 @@
#include <linux/delay.h>
#include <linux/log2.h>
#include <linux/hwspinlock.h>
+#include <asm/unaligned.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -232,22 +233,20 @@ static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
{
- __be16 *b = buf;
-
- b[0] = cpu_to_be16(val << shift);
+ put_unaligned_be16(val << shift, buf);
}
static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
{
- __le16 *b = buf;
-
- b[0] = cpu_to_le16(val << shift);
+ put_unaligned_le16(val << shift, buf);
}
static void regmap_format_16_native(void *buf, unsigned int val,
unsigned int shift)
{
- *(u16 *)buf = val << shift;
+ u16 v = val << shift;
+
+ memcpy(buf, &v, sizeof(v));
}
static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
@@ -263,43 +262,39 @@ static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
{
- __be32 *b = buf;
-
- b[0] = cpu_to_be32(val << shift);
+ put_unaligned_be32(val << shift, buf);
}
static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
{
- __le32 *b = buf;
-
- b[0] = cpu_to_le32(val << shift);
+ put_unaligned_le32(val << shift, buf);
}
static void regmap_format_32_native(void *buf, unsigned int val,
unsigned int shift)
{
- *(u32 *)buf = val << shift;
+ u32 v = val << shift;
+
+ memcpy(buf, &v, sizeof(v));
}
#ifdef CONFIG_64BIT
static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
{
- __be64 *b = buf;
-
- b[0] = cpu_to_be64((u64)val << shift);
+ put_unaligned_be64((u64) val << shift, buf);
}
static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
{
- __le64 *b = buf;
-
- b[0] = cpu_to_le64((u64)val << shift);
+ put_unaligned_le64((u64) val << shift, buf);
}
static void regmap_format_64_native(void *buf, unsigned int val,
unsigned int shift)
{
- *(u64 *)buf = (u64)val << shift;
+ u64 v = (u64) val << shift;
+
+ memcpy(buf, &v, sizeof(v));
}
#endif
@@ -316,35 +311,34 @@ static unsigned int regmap_parse_8(const void *buf)
static unsigned int regmap_parse_16_be(const void *buf)
{
- const __be16 *b = buf;
-
- return be16_to_cpu(b[0]);
+ return get_unaligned_be16(buf);
}
static unsigned int regmap_parse_16_le(const void *buf)
{
- const __le16 *b = buf;
-
- return le16_to_cpu(b[0]);
+ return get_unaligned_le16(buf);
}
static void regmap_parse_16_be_inplace(void *buf)
{
- __be16 *b = buf;
+ u16 v = get_unaligned_be16(buf);
- b[0] = be16_to_cpu(b[0]);
+ memcpy(buf, &v, sizeof(v));
}
static void regmap_parse_16_le_inplace(void *buf)
{
- __le16 *b = buf;
+ u16 v = get_unaligned_le16(buf);
- b[0] = le16_to_cpu(b[0]);
+ memcpy(buf, &v, sizeof(v));
}
static unsigned int regmap_parse_16_native(const void *buf)
{
- return *(u16 *)buf;
+ u16 v;
+
+ memcpy(&v, buf, sizeof(v));
+ return v;
}
static unsigned int regmap_parse_24(const void *buf)
@@ -359,69 +353,67 @@ static unsigned int regmap_parse_24(const void *buf)
static unsigned int regmap_parse_32_be(const void *buf)
{
- const __be32 *b = buf;
-
- return be32_to_cpu(b[0]);
+ return get_unaligned_be32(buf);
}
static unsigned int regmap_parse_32_le(const void *buf)
{
- const __le32 *b = buf;
-
- return le32_to_cpu(b[0]);
+ return get_unaligned_le32(buf);
}
static void regmap_parse_32_be_inplace(void *buf)
{
- __be32 *b = buf;
+ u32 v = get_unaligned_be32(buf);
- b[0] = be32_to_cpu(b[0]);
+ memcpy(buf, &v, sizeof(v));
}
static void regmap_parse_32_le_inplace(void *buf)
{
- __le32 *b = buf;
+ u32 v = get_unaligned_le32(buf);
- b[0] = le32_to_cpu(b[0]);
+ memcpy(buf, &v, sizeof(v));
}
static unsigned int regmap_parse_32_native(const void *buf)
{
- return *(u32 *)buf;
+ u32 v;
+
+ memcpy(&v, buf, sizeof(v));
+ return v;
}
#ifdef CONFIG_64BIT
static unsigned int regmap_parse_64_be(const void *buf)
{
- const __be64 *b = buf;
-
- return be64_to_cpu(b[0]);
+ return get_unaligned_be64(buf);
}
static unsigned int regmap_parse_64_le(const void *buf)
{
- const __le64 *b = buf;
-
- return le64_to_cpu(b[0]);
+ return get_unaligned_le64(buf);
}
static void regmap_parse_64_be_inplace(void *buf)
{
- __be64 *b = buf;
+ u64 v = get_unaligned_be64(buf);
- b[0] = be64_to_cpu(b[0]);
+ memcpy(buf, &v, sizeof(v));
}
static void regmap_parse_64_le_inplace(void *buf)
{
- __le64 *b = buf;
+ u64 v = get_unaligned_le64(buf);
- b[0] = le64_to_cpu(b[0]);
+ memcpy(buf, &v, sizeof(v));
}
static unsigned int regmap_parse_64_native(const void *buf)
{
- return *(u64 *)buf;
+ u64 v;
+
+ memcpy(&v, buf, sizeof(v));
+ return v;
}
#endif
@@ -1336,6 +1328,7 @@ void regmap_exit(struct regmap *map)
if (map->hwlock)
hwspin_lock_free(map->hwlock);
kfree_const(map->name);
+ kfree(map->patch);
kfree(map);
}
EXPORT_SYMBOL_GPL(regmap_exit);
@@ -1350,7 +1343,7 @@ static int dev_get_regmap_match(struct device *dev, void *res, void *data)
/* If the user didn't specify a name match any */
if (data)
- return (*r)->name == data;
+ return !strcmp((*r)->name, data);
else
return 1;
}
@@ -2374,7 +2367,7 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg,
EXPORT_SYMBOL_GPL(regmap_raw_write_async);
static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
- unsigned int val_len)
+ unsigned int val_len, bool noinc)
{
struct regmap_range_node *range;
int ret;
@@ -2387,7 +2380,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
range = _regmap_range_lookup(map, reg);
if (range) {
ret = _regmap_select_page(map, &reg, range,
- val_len / map->format.val_bytes);
+ noinc ? 1 : val_len / map->format.val_bytes);
if (ret != 0)
return ret;
}
@@ -2425,7 +2418,7 @@ static int _regmap_bus_read(void *context, unsigned int reg,
if (!map->format.parse_val)
return -EINVAL;
- ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes);
+ ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false);
if (ret == 0)
*val = map->format.parse_val(work_val);
@@ -2543,7 +2536,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
/* Read bytes that fit into whole chunks */
for (i = 0; i < chunk_count; i++) {
- ret = _regmap_raw_read(map, reg, val, chunk_bytes);
+ ret = _regmap_raw_read(map, reg, val, chunk_bytes, false);
if (ret != 0)
goto out;
@@ -2554,7 +2547,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
/* Read remaining bytes */
if (val_len) {
- ret = _regmap_raw_read(map, reg, val, val_len);
+ ret = _regmap_raw_read(map, reg, val, val_len, false);
if (ret != 0)
goto out;
}
@@ -2629,7 +2622,7 @@ int regmap_noinc_read(struct regmap *map, unsigned int reg,
read_len = map->max_raw_read;
else
read_len = val_len;
- ret = _regmap_raw_read(map, reg, val, read_len);
+ ret = _regmap_raw_read(map, reg, val, read_len, true);
if (ret)
goto out_unlock;
val = ((u8 *)val) + read_len;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index d4913516823f..e101f286ac35 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -474,6 +474,7 @@ config BLK_DEV_RBD
config BLK_DEV_RSXX
tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver"
depends on PCI
+ select CRC32
help
Device driver for IBM's high speed PCIe SSD
storage device: Flash Adapter 900GB Full Height.
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index c0ebda1283cc..015c68017a1c 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -201,7 +201,6 @@ int aoeblk_init(void);
void aoeblk_exit(void);
void aoeblk_gdalloc(void *);
void aoedisk_rm_debugfs(struct aoedev *d);
-void aoedisk_rm_sysfs(struct aoedev *d);
int aoechr_init(void);
void aoechr_exit(void);
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 429ebb84b592..ff770e7d9e52 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -177,10 +177,15 @@ static struct attribute *aoe_attrs[] = {
NULL,
};
-static const struct attribute_group attr_group = {
+static const struct attribute_group aoe_attr_group = {
.attrs = aoe_attrs,
};
+static const struct attribute_group *aoe_attr_groups[] = {
+ &aoe_attr_group,
+ NULL,
+};
+
static const struct file_operations aoe_debugfs_fops = {
.open = aoe_debugfs_open,
.read = seq_read,
@@ -220,17 +225,6 @@ aoedisk_rm_debugfs(struct aoedev *d)
}
static int
-aoedisk_add_sysfs(struct aoedev *d)
-{
- return sysfs_create_group(&disk_to_dev(d->gd)->kobj, &attr_group);
-}
-void
-aoedisk_rm_sysfs(struct aoedev *d)
-{
- sysfs_remove_group(&disk_to_dev(d->gd)->kobj, &attr_group);
-}
-
-static int
aoeblk_open(struct block_device *bdev, fmode_t mode)
{
struct aoedev *d = bdev->bd_disk->private_data;
@@ -417,8 +411,7 @@ aoeblk_gdalloc(void *vp)
spin_unlock_irqrestore(&d->lock, flags);
- add_disk(gd);
- aoedisk_add_sysfs(d);
+ device_add_disk(NULL, gd, aoe_attr_groups);
aoedisk_add_debugfs(d);
spin_lock_irqsave(&d->lock, flags);
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 41060e9cedf2..f29a140cdbc1 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -275,7 +275,6 @@ freedev(struct aoedev *d)
del_timer_sync(&d->timer);
if (d->gd) {
aoedisk_rm_debugfs(d);
- aoedisk_rm_sysfs(d);
del_gendisk(d->gd);
put_disk(d->gd);
blk_cleanup_queue(d->blkq);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index bf222c4b2f82..04383f14c74a 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4074,21 +4074,22 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
if (UFDCS->rawcmd == 1)
UFDCS->rawcmd = 2;
- if (!(mode & FMODE_NDELAY)) {
- if (mode & (FMODE_READ|FMODE_WRITE)) {
- UDRS->last_checked = 0;
- clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
- check_disk_change(bdev);
- if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
- goto out;
- if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
- goto out;
- }
- res = -EROFS;
- if ((mode & FMODE_WRITE) &&
- !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
+ if (mode & (FMODE_READ|FMODE_WRITE)) {
+ UDRS->last_checked = 0;
+ clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
+ check_disk_change(bdev);
+ if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
+ goto out;
+ if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
goto out;
}
+
+ res = -EROFS;
+
+ if ((mode & FMODE_WRITE) &&
+ !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
+ goto out;
+
mutex_unlock(&open_lock);
mutex_unlock(&floppy_mutex);
return 0;
@@ -4713,7 +4714,7 @@ static int __init do_floppy_init(void)
/* to be cleaned up... */
disks[drive]->private_data = (void *)(long)drive;
disks[drive]->flags |= GENHD_FL_REMOVABLE;
- device_add_disk(&floppy_device[drive].dev, disks[drive]);
+ device_add_disk(&floppy_device[drive].dev, disks[drive], NULL);
}
return 0;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 9cd231a27328..19042b42a8ba 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -426,11 +426,12 @@ static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
* information.
*/
struct file *file = lo->lo_backing_file;
+ struct request_queue *q = lo->lo_queue;
int ret;
mode |= FALLOC_FL_KEEP_SIZE;
- if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) {
+ if (!blk_queue_discard(q)) {
ret = -EOPNOTSUPP;
goto out;
}
@@ -863,6 +864,23 @@ static void loop_config_discard(struct loop_device *lo)
struct file *file = lo->lo_backing_file;
struct inode *inode = file->f_mapping->host;
struct request_queue *q = lo->lo_queue;
+ u32 granularity, max_discard_sectors;
+
+ /*
+ * If the backing device is a block device, mirror its zeroing
+ * capability. Set the discard sectors to the block device's zeroing
+ * capabilities because loop discards result in blkdev_issue_zeroout(),
+ * not blkdev_issue_discard(). This maintains consistent behavior with
+ * file-backed loop devices: discarded regions read back as zero.
+ */
+ if (S_ISBLK(inode->i_mode) && !lo->lo_encrypt_key_size) {
+ struct request_queue *backingq;
+
+ backingq = bdev_get_queue(inode->i_bdev);
+
+ max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
+ granularity = backingq->limits.discard_granularity ?:
+ queue_physical_block_size(backingq);
/*
* We use punch hole to reclaim the free space used by the
@@ -870,22 +888,27 @@ static void loop_config_discard(struct loop_device *lo)
* encryption is enabled, because it may give an attacker
* useful information.
*/
- if ((!file->f_op->fallocate) ||
- lo->lo_encrypt_key_size) {
+ } else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) {
+ max_discard_sectors = 0;
+ granularity = 0;
+
+ } else {
+ max_discard_sectors = UINT_MAX >> 9;
+ granularity = inode->i_sb->s_blocksize;
+ }
+
+ if (max_discard_sectors) {
+ q->limits.discard_granularity = granularity;
+ blk_queue_max_discard_sectors(q, max_discard_sectors);
+ blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
+ blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
+ } else {
q->limits.discard_granularity = 0;
- q->limits.discard_alignment = 0;
blk_queue_max_discard_sectors(q, 0);
blk_queue_max_write_zeroes_sectors(q, 0);
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
- return;
}
-
- q->limits.discard_granularity = inode->i_sb->s_blocksize;
q->limits.discard_alignment = 0;
-
- blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
- blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
}
static void loop_unprepare_queue(struct loop_device *lo)
@@ -1218,7 +1241,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
if (lo->lo_offset != info->lo_offset ||
lo->lo_sizelimit != info->lo_sizelimit) {
sync_blockdev(lo->lo_device);
- kill_bdev(lo->lo_device);
+ invalidate_bdev(lo->lo_device);
}
/* I/O need to be drained during transfer transition */
@@ -1492,12 +1515,12 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
if (lo->lo_queue->limits.logical_block_size != arg) {
sync_blockdev(lo->lo_device);
- kill_bdev(lo->lo_device);
+ invalidate_bdev(lo->lo_device);
}
blk_mq_freeze_queue(lo->lo_queue);
- /* kill_bdev should have truncated all the pages */
+ /* invalidate_bdev should have truncated all the pages */
if (lo->lo_queue->limits.logical_block_size != arg &&
lo->lo_device->bd_inode->i_mapping->nrpages) {
err = -EAGAIN;
@@ -2259,6 +2282,8 @@ static void __exit loop_exit(void)
range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
+ mutex_lock(&loop_ctl_mutex);
+
idr_for_each(&loop_index_idr, &loop_exit_cb, NULL);
idr_destroy(&loop_index_idr);
@@ -2266,6 +2291,8 @@ static void __exit loop_exit(void)
unregister_blkdev(LOOP_MAJOR, "loop");
misc_deregister(&loop_misc);
+
+ mutex_unlock(&loop_ctl_mutex);
}
module_init(loop_init);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index d0666f5ce003..1d7d48d8a205 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3861,7 +3861,7 @@ skip_create_disk:
set_capacity(dd->disk, capacity);
/* Enable the block device and add it to /dev */
- device_add_disk(&dd->pdev->dev, dd->disk);
+ device_add_disk(&dd->pdev->dev, dd->disk, NULL);
dd->bdev = bdget_disk(dd->disk, 0);
/*
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 226103af30f0..81b955670b12 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -276,7 +276,7 @@ static void nbd_size_clear(struct nbd_device *nbd)
}
}
-static void nbd_size_update(struct nbd_device *nbd)
+static void nbd_size_update(struct nbd_device *nbd, bool start)
{
struct nbd_config *config = nbd->config;
struct block_device *bdev = bdget_disk(nbd->disk, 0);
@@ -292,7 +292,8 @@ static void nbd_size_update(struct nbd_device *nbd)
if (bdev) {
if (bdev->bd_disk) {
bd_set_size(bdev, config->bytesize);
- set_blocksize(bdev, config->blksize);
+ if (start)
+ set_blocksize(bdev, config->blksize);
} else
bdev->bd_invalidated = 1;
bdput(bdev);
@@ -307,7 +308,7 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
config->blksize = blocksize;
config->bytesize = blocksize * nr_blocks;
if (nbd->task_recv != NULL)
- nbd_size_update(nbd);
+ nbd_size_update(nbd, false);
}
static void nbd_complete_rq(struct request *req)
@@ -740,9 +741,9 @@ static void recv_work(struct work_struct *work)
blk_mq_complete_request(blk_mq_rq_from_pdu(cmd));
}
+ nbd_config_put(nbd);
atomic_dec(&config->recv_threads);
wake_up(&config->recv_wq);
- nbd_config_put(nbd);
kfree(args);
}
@@ -965,6 +966,12 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
if (!sock)
return err;
+ /*
+ * We need to make sure we don't get any errant requests while we're
+ * reallocating the ->socks array.
+ */
+ blk_mq_freeze_queue(nbd->disk->queue);
+
if (!netlink && !nbd->task_setup &&
!test_bit(NBD_BOUND, &config->runtime_flags))
nbd->task_setup = current;
@@ -974,25 +981,26 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
test_bit(NBD_BOUND, &config->runtime_flags))) {
dev_err(disk_to_dev(nbd->disk),
"Device being setup by another task");
- sockfd_put(sock);
- return -EBUSY;
+ err = -EBUSY;
+ goto put_socket;
+ }
+
+ nsock = kzalloc(sizeof(*nsock), GFP_KERNEL);
+ if (!nsock) {
+ err = -ENOMEM;
+ goto put_socket;
}
socks = krealloc(config->socks, (config->num_connections + 1) *
sizeof(struct nbd_sock *), GFP_KERNEL);
if (!socks) {
- sockfd_put(sock);
- return -ENOMEM;
+ kfree(nsock);
+ err = -ENOMEM;
+ goto put_socket;
}
config->socks = socks;
- nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
- if (!nsock) {
- sockfd_put(sock);
- return -ENOMEM;
- }
-
nsock->fallback_index = -1;
nsock->dead = false;
mutex_init(&nsock->tx_lock);
@@ -1002,8 +1010,14 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
nsock->cookie = 0;
socks[config->num_connections++] = nsock;
atomic_inc(&config->live_connections);
+ blk_mq_unfreeze_queue(nbd->disk->queue);
return 0;
+
+put_socket:
+ blk_mq_unfreeze_queue(nbd->disk->queue);
+ sockfd_put(sock);
+ return err;
}
static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
@@ -1239,7 +1253,7 @@ static int nbd_start_device(struct nbd_device *nbd)
args->index = i;
queue_work(nbd->recv_workq, &args->work);
}
- nbd_size_update(nbd);
+ nbd_size_update(nbd, true);
return error;
}
@@ -1442,6 +1456,7 @@ static void nbd_release(struct gendisk *disk, fmode_t mode)
if (test_bit(NBD_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
bdev->bd_openers == 0)
nbd_disconnect_and_put(nbd);
+ bdput(bdev);
nbd_config_put(nbd);
nbd_put(nbd);
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index c5c0b7c89481..4fef1fb918ec 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -571,6 +571,7 @@ static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
if (tag != -1U) {
cmd = &nq->cmds[tag];
cmd->tag = tag;
+ cmd->error = BLK_STS_OK;
cmd->nq = nq;
if (nq->dev->irqmode == NULL_IRQ_TIMER) {
hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
@@ -1085,7 +1086,7 @@ static int null_handle_rq(struct nullb_cmd *cmd)
len = bvec.bv_len;
err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
op_is_write(req_op(rq)), sector,
- req_op(rq) & REQ_FUA);
+ rq->cmd_flags & REQ_FUA);
if (err) {
spin_unlock_irq(&nullb->lock);
return err;
@@ -1433,6 +1434,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
cmd->timer.function = null_cmd_timer_expired;
}
cmd->rq = bd->rq;
+ cmd->error = BLK_STS_OK;
cmd->nq = nq;
blk_mq_start_request(bd->rq);
@@ -1480,7 +1482,12 @@ static void cleanup_queues(struct nullb *nullb)
static void null_del_dev(struct nullb *nullb)
{
- struct nullb_device *dev = nullb->dev;
+ struct nullb_device *dev;
+
+ if (!nullb)
+ return;
+
+ dev = nullb->dev;
ida_simple_remove(&nullb_indexes, nullb->index);
@@ -1844,6 +1851,7 @@ out_cleanup_queues:
cleanup_queues(nullb);
out_free_nullb:
kfree(nullb);
+ dev->nullb = NULL;
out:
return rv;
}
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index 7c6b86d98700..ba018f1da512 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -1,9 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/vmalloc.h>
+#include <linux/sizes.h>
#include "null_blk.h"
-/* zone_size in MBs to sectors. */
-#define ZONE_SIZE_SHIFT 11
+#define MB_TO_SECTS(mb) (((sector_t)mb * SZ_1M) >> SECTOR_SHIFT)
static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
{
@@ -12,7 +12,7 @@ static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
int null_zone_init(struct nullb_device *dev)
{
- sector_t dev_size = (sector_t)dev->size * 1024 * 1024;
+ sector_t dev_capacity_sects;
sector_t sector = 0;
unsigned int i;
@@ -20,10 +20,17 @@ int null_zone_init(struct nullb_device *dev)
pr_err("null_blk: zone_size must be power-of-two\n");
return -EINVAL;
}
+ if (dev->zone_size > dev->size) {
+ pr_err("Zone size larger than device capacity\n");
+ return -EINVAL;
+ }
+
+ dev_capacity_sects = MB_TO_SECTS(dev->size);
+ dev->zone_size_sects = MB_TO_SECTS(dev->zone_size);
+ dev->nr_zones = dev_capacity_sects >> ilog2(dev->zone_size_sects);
+ if (dev_capacity_sects & (dev->zone_size_sects - 1))
+ dev->nr_zones++;
- dev->zone_size_sects = dev->zone_size << ZONE_SIZE_SHIFT;
- dev->nr_zones = dev_size >>
- (SECTOR_SHIFT + ilog2(dev->zone_size_sects));
dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct blk_zone),
GFP_KERNEL | __GFP_ZERO);
if (!dev->zones)
@@ -33,7 +40,10 @@ int null_zone_init(struct nullb_device *dev)
struct blk_zone *zone = &dev->zones[i];
zone->start = zone->wp = sector;
- zone->len = dev->zone_size_sects;
+ if (zone->start + dev->zone_size_sects > dev_capacity_sects)
+ zone->len = dev_capacity_sects - zone->start;
+ else
+ zone->len = dev->zone_size_sects;
zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
zone->cond = BLK_ZONE_COND_EMPTY;
@@ -46,6 +56,7 @@ int null_zone_init(struct nullb_device *dev)
void null_zone_exit(struct nullb_device *dev)
{
kvfree(dev->zones);
+ dev->zones = NULL;
}
static void null_zone_fill_bio(struct nullb_device *dev, struct bio *bio,
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index afe1508d82c6..42bff6b1d6a8 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -466,7 +466,6 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
queue->queuedata = dev;
blk_queue_max_hw_sectors(queue, dev->bounce_size >> 9);
- blk_queue_segment_boundary(queue, -1UL);
blk_queue_dma_alignment(queue, dev->blk_size-1);
blk_queue_logical_block_size(queue, dev->blk_size);
@@ -500,7 +499,7 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
gendisk->disk_name, priv->model, priv->raw_capacity >> 11,
get_capacity(gendisk) >> 11);
- device_add_disk(&dev->sbd.core, gendisk);
+ device_add_disk(&dev->sbd.core, gendisk, NULL);
return 0;
fail_cleanup_queue:
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 1e3d5de9d838..c0c50816a10b 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -769,7 +769,7 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
dev_info(&dev->core, "%s: Using %lu MiB of GPU memory\n",
gendisk->disk_name, get_capacity(gendisk) >> 11);
- device_add_disk(&dev->core, gendisk);
+ device_add_disk(&dev->core, gendisk, NULL);
return 0;
fail_cleanup_queue:
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index d3ad1b8c133e..9f1265ce2e36 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3427,6 +3427,10 @@ static void cancel_tasks_sync(struct rbd_device *rbd_dev)
cancel_work_sync(&rbd_dev->unlock_work);
}
+/*
+ * header_rwsem must not be held to avoid a deadlock with
+ * rbd_dev_refresh() when flushing notifies.
+ */
static void rbd_unregister_watch(struct rbd_device *rbd_dev)
{
WARN_ON(waitqueue_active(&rbd_dev->lock_waitq));
@@ -4120,6 +4124,9 @@ static ssize_t rbd_config_info_show(struct device *dev,
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
return sprintf(buf, "%s\n", rbd_dev->config_info);
}
@@ -4231,6 +4238,9 @@ static ssize_t rbd_image_refresh(struct device *dev,
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
int ret;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
ret = rbd_dev_refresh(rbd_dev);
if (ret)
return ret;
@@ -5719,9 +5729,10 @@ static int rbd_dev_header_name(struct rbd_device *rbd_dev)
static void rbd_dev_image_release(struct rbd_device *rbd_dev)
{
- rbd_dev_unprobe(rbd_dev);
if (rbd_dev->opts)
rbd_unregister_watch(rbd_dev);
+
+ rbd_dev_unprobe(rbd_dev);
rbd_dev->image_format = 0;
kfree(rbd_dev->spec->image_id);
rbd_dev->spec->image_id = NULL;
@@ -5732,6 +5743,9 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev)
* device. If this image is the one being mapped (i.e., not a
* parent), initiate a watch on its header object before using that
* object to get detailed information about the rbd image.
+ *
+ * On success, returns with header_rwsem held for write if called
+ * with @depth == 0.
*/
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
{
@@ -5764,9 +5778,12 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
}
}
+ if (!depth)
+ down_write(&rbd_dev->header_rwsem);
+
ret = rbd_dev_header_info(rbd_dev);
if (ret)
- goto err_out_watch;
+ goto err_out_probe;
/*
* If this image is the one being mapped, we have pool name and
@@ -5812,10 +5829,11 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
return 0;
err_out_probe:
- rbd_dev_unprobe(rbd_dev);
-err_out_watch:
+ if (!depth)
+ up_write(&rbd_dev->header_rwsem);
if (!depth)
rbd_unregister_watch(rbd_dev);
+ rbd_dev_unprobe(rbd_dev);
err_out_format:
rbd_dev->image_format = 0;
kfree(rbd_dev->spec->image_id);
@@ -5834,6 +5852,9 @@ static ssize_t do_rbd_add(struct bus_type *bus,
struct rbd_client *rbdc;
int rc;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
if (!try_module_get(THIS_MODULE))
return -ENODEV;
@@ -5872,12 +5893,9 @@ static ssize_t do_rbd_add(struct bus_type *bus,
goto err_out_rbd_dev;
}
- down_write(&rbd_dev->header_rwsem);
rc = rbd_dev_image_probe(rbd_dev, 0);
- if (rc < 0) {
- up_write(&rbd_dev->header_rwsem);
+ if (rc < 0)
goto err_out_rbd_dev;
- }
/* If we are mapping a snapshot it must be marked read-only */
if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
@@ -5986,6 +6004,9 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
bool force = false;
int ret;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
dev_id = -1;
opt_buf[0] = '\0';
sscanf(buf, "%d %5s", &dev_id, opt_buf);
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index 14056dc45064..08acfe11752b 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -179,15 +179,17 @@ static ssize_t rsxx_cram_read(struct file *fp, char __user *ubuf,
{
struct rsxx_cardinfo *card = file_inode(fp)->i_private;
char *buf;
- ssize_t st;
+ int st;
buf = kzalloc(cnt, GFP_KERNEL);
if (!buf)
return -ENOMEM;
st = rsxx_creg_read(card, CREG_ADD_CRAM + (u32)*ppos, cnt, buf, 1);
- if (!st)
- st = copy_to_user(ubuf, buf, cnt);
+ if (!st) {
+ if (copy_to_user(ubuf, buf, cnt))
+ st = -EFAULT;
+ }
kfree(buf);
if (st)
return st;
@@ -881,6 +883,7 @@ static int rsxx_pci_probe(struct pci_dev *dev,
card->event_wq = create_singlethread_workqueue(DRIVER_NAME"_event");
if (!card->event_wq) {
dev_err(CARD_TO_DEV(card), "Failed card event setup.\n");
+ st = -ENOMEM;
goto failed_event_handler;
}
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index 1a92f9e65937..3894aa0f350b 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -226,7 +226,7 @@ int rsxx_attach_dev(struct rsxx_cardinfo *card)
set_capacity(card->gendisk, card->size8 >> 9);
else
set_capacity(card->gendisk, 0);
- device_add_disk(CARD_TO_DEV(card), card->gendisk);
+ device_add_disk(CARD_TO_DEV(card), card->gendisk, NULL);
card->bdev_attached = 1;
}
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 27323fa23997..80a5806ede03 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -3104,7 +3104,7 @@ static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
{
dev_dbg(&skdev->pdev->dev, "add_disk\n");
- device_add_disk(parent, skdev->disk);
+ device_add_disk(parent, skdev->disk, NULL);
return 0;
}
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 5d7024057540..6b7b0d8a2acb 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -862,7 +862,7 @@ static int probe_disk(struct vdc_port *port)
port->vdisk_size, (port->vdisk_size >> (20 - 9)),
port->vio.ver.major, port->vio.ver.minor);
- device_add_disk(&port->vio.vdev->dev, g);
+ device_add_disk(&port->vio.vdev->dev, g, NULL);
return 0;
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 728c9a9609f0..c2d9459ec5d1 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -31,6 +31,15 @@ struct virtio_blk_vq {
} ____cacheline_aligned_in_smp;
struct virtio_blk {
+ /*
+ * This mutex must be held by anything that may run after
+ * virtblk_remove() sets vblk->vdev to NULL.
+ *
+ * blk-mq, virtqueue processing, and sysfs attribute code paths are
+ * shut down before vblk->vdev is set to NULL and therefore do not need
+ * to hold this mutex.
+ */
+ struct mutex vdev_mutex;
struct virtio_device *vdev;
/* The disk structure for the kernel. */
@@ -42,6 +51,13 @@ struct virtio_blk {
/* Process context for config space updates */
struct work_struct config_work;
+ /*
+ * Tracks references from block_device_operations open/release and
+ * virtio_driver probe/remove so this object can be freed once no
+ * longer in use.
+ */
+ refcount_t refs;
+
/* What host tells us, plus 2 for header & tailer. */
unsigned int sg_elems;
@@ -277,9 +293,14 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
if (err == -ENOSPC)
blk_mq_stop_hw_queue(hctx);
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
- if (err == -ENOMEM || err == -ENOSPC)
+ switch (err) {
+ case -ENOSPC:
return BLK_STS_DEV_RESOURCE;
- return BLK_STS_IOERR;
+ case -ENOMEM:
+ return BLK_STS_RESOURCE;
+ default:
+ return BLK_STS_IOERR;
+ }
}
if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
@@ -315,10 +336,55 @@ out:
return err;
}
+static void virtblk_get(struct virtio_blk *vblk)
+{
+ refcount_inc(&vblk->refs);
+}
+
+static void virtblk_put(struct virtio_blk *vblk)
+{
+ if (refcount_dec_and_test(&vblk->refs)) {
+ ida_simple_remove(&vd_index_ida, vblk->index);
+ mutex_destroy(&vblk->vdev_mutex);
+ kfree(vblk);
+ }
+}
+
+static int virtblk_open(struct block_device *bd, fmode_t mode)
+{
+ struct virtio_blk *vblk = bd->bd_disk->private_data;
+ int ret = 0;
+
+ mutex_lock(&vblk->vdev_mutex);
+
+ if (vblk->vdev)
+ virtblk_get(vblk);
+ else
+ ret = -ENXIO;
+
+ mutex_unlock(&vblk->vdev_mutex);
+ return ret;
+}
+
+static void virtblk_release(struct gendisk *disk, fmode_t mode)
+{
+ struct virtio_blk *vblk = disk->private_data;
+
+ virtblk_put(vblk);
+}
+
/* We provide getgeo only to please some old bootloader/partitioning tools */
static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
{
struct virtio_blk *vblk = bd->bd_disk->private_data;
+ int ret = 0;
+
+ mutex_lock(&vblk->vdev_mutex);
+
+ if (!vblk->vdev) {
+ ret = -ENXIO;
+ goto out;
+ }
/* see if the host passed in geometry config */
if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
@@ -334,12 +400,16 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
geo->sectors = 1 << 5;
geo->cylinders = get_capacity(bd->bd_disk) >> 11;
}
- return 0;
+out:
+ mutex_unlock(&vblk->vdev_mutex);
+ return ret;
}
static const struct block_device_operations virtblk_fops = {
.ioctl = virtblk_ioctl,
.owner = THIS_MODULE,
+ .open = virtblk_open,
+ .release = virtblk_release,
.getgeo = virtblk_getgeo,
};
@@ -353,8 +423,8 @@ static int minor_to_index(int minor)
return minor >> PART_BITS;
}
-static ssize_t virtblk_serial_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t serial_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
int err;
@@ -373,7 +443,7 @@ static ssize_t virtblk_serial_show(struct device *dev,
return err;
}
-static DEVICE_ATTR(serial, 0444, virtblk_serial_show, NULL);
+static DEVICE_ATTR_RO(serial);
/* The queue's logical block size must be set before calling this */
static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
@@ -549,8 +619,8 @@ static const char *const virtblk_cache_types[] = {
};
static ssize_t
-virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+cache_type_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct gendisk *disk = dev_to_disk(dev);
struct virtio_blk *vblk = disk->private_data;
@@ -568,8 +638,7 @@ virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
}
static ssize_t
-virtblk_cache_type_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
struct virtio_blk *vblk = disk->private_data;
@@ -579,12 +648,38 @@ virtblk_cache_type_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
}
-static const struct device_attribute dev_attr_cache_type_ro =
- __ATTR(cache_type, 0444,
- virtblk_cache_type_show, NULL);
-static const struct device_attribute dev_attr_cache_type_rw =
- __ATTR(cache_type, 0644,
- virtblk_cache_type_show, virtblk_cache_type_store);
+static DEVICE_ATTR_RW(cache_type);
+
+static struct attribute *virtblk_attrs[] = {
+ &dev_attr_serial.attr,
+ &dev_attr_cache_type.attr,
+ NULL,
+};
+
+static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct gendisk *disk = dev_to_disk(dev);
+ struct virtio_blk *vblk = disk->private_data;
+ struct virtio_device *vdev = vblk->vdev;
+
+ if (a == &dev_attr_cache_type.attr &&
+ !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
+ return S_IRUGO;
+
+ return a->mode;
+}
+
+static const struct attribute_group virtblk_attr_group = {
+ .attrs = virtblk_attrs,
+ .is_visible = virtblk_attrs_are_visible,
+};
+
+static const struct attribute_group *virtblk_attr_groups[] = {
+ &virtblk_attr_group,
+ NULL,
+};
static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
@@ -667,6 +762,10 @@ static int virtblk_probe(struct virtio_device *vdev)
goto out_free_index;
}
+ /* This reference is dropped in virtblk_remove(). */
+ refcount_set(&vblk->refs, 1);
+ mutex_init(&vblk->vdev_mutex);
+
vblk->vdev = vdev;
vblk->sg_elems = sg_elems;
@@ -784,30 +883,16 @@ static int virtblk_probe(struct virtio_device *vdev)
virtblk_update_capacity(vblk, false);
virtio_device_ready(vdev);
- device_add_disk(&vdev->dev, vblk->disk);
- err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
- if (err)
- goto out_del_disk;
-
- if (virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
- err = device_create_file(disk_to_dev(vblk->disk),
- &dev_attr_cache_type_rw);
- else
- err = device_create_file(disk_to_dev(vblk->disk),
- &dev_attr_cache_type_ro);
- if (err)
- goto out_del_disk;
+ device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
return 0;
-out_del_disk:
- del_gendisk(vblk->disk);
- blk_cleanup_queue(vblk->disk->queue);
out_free_tags:
blk_mq_free_tag_set(&vblk->tag_set);
out_put_disk:
put_disk(vblk->disk);
out_free_vq:
vdev->config->del_vqs(vdev);
+ kfree(vblk->vqs);
out_free_vblk:
kfree(vblk);
out_free_index:
@@ -819,8 +904,6 @@ out:
static void virtblk_remove(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
- int index = vblk->index;
- int refc;
/* Make sure no work handler is accessing the device. */
flush_work(&vblk->config_work);
@@ -830,18 +913,21 @@ static void virtblk_remove(struct virtio_device *vdev)
blk_mq_free_tag_set(&vblk->tag_set);
+ mutex_lock(&vblk->vdev_mutex);
+
/* Stop all the virtqueues. */
vdev->config->reset(vdev);
- refc = kref_read(&disk_to_dev(vblk->disk)->kobj.kref);
+ /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
+ vblk->vdev = NULL;
+
put_disk(vblk->disk);
vdev->config->del_vqs(vdev);
kfree(vblk->vqs);
- kfree(vblk);
- /* Only free device id if we don't have any users */
- if (refc == 1)
- ida_simple_remove(&vd_index_ida, index);
+ mutex_unlock(&vblk->vdev_mutex);
+
+ virtblk_put(vblk);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 3666afa639d1..d98cfd3b64ff 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -202,7 +202,7 @@ static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num)
#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
-static int do_block_io_op(struct xen_blkif_ring *ring);
+static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags);
static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
struct blkif_request *req,
struct pending_req *pending_req);
@@ -615,6 +615,8 @@ int xen_blkif_schedule(void *arg)
struct xen_vbd *vbd = &blkif->vbd;
unsigned long timeout;
int ret;
+ bool do_eoi;
+ unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
set_freezable();
while (!kthread_should_stop()) {
@@ -639,16 +641,23 @@ int xen_blkif_schedule(void *arg)
if (timeout == 0)
goto purge_gnt_list;
+ do_eoi = ring->waiting_reqs;
+
ring->waiting_reqs = 0;
smp_mb(); /* clear flag *before* checking for work */
- ret = do_block_io_op(ring);
+ ret = do_block_io_op(ring, &eoi_flags);
if (ret > 0)
ring->waiting_reqs = 1;
if (ret == -EACCES)
wait_event_interruptible(ring->shutdown_wq,
kthread_should_stop());
+ if (do_eoi && !ring->waiting_reqs) {
+ xen_irq_lateeoi(ring->irq, eoi_flags);
+ eoi_flags |= XEN_EOI_FLAG_SPURIOUS;
+ }
+
purge_gnt_list:
if (blkif->vbd.feature_gnt_persistent &&
time_after(jiffies, ring->next_lru)) {
@@ -841,8 +850,11 @@ again:
pages[i]->page = persistent_gnt->page;
pages[i]->persistent_gnt = persistent_gnt;
} else {
- if (get_free_page(ring, &pages[i]->page))
- goto out_of_memory;
+ if (get_free_page(ring, &pages[i]->page)) {
+ put_free_pages(ring, pages_to_gnt, segs_to_map);
+ ret = -ENOMEM;
+ goto out;
+ }
addr = vaddr(pages[i]->page);
pages_to_gnt[segs_to_map] = pages[i]->page;
pages[i]->persistent_gnt = NULL;
@@ -858,10 +870,8 @@ again:
break;
}
- if (segs_to_map) {
+ if (segs_to_map)
ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
- BUG_ON(ret);
- }
/*
* Now swizzle the MFN in our domain with the MFN from the other domain
@@ -876,7 +886,7 @@ again:
pr_debug("invalid buffer -- could not remap it\n");
put_free_pages(ring, &pages[seg_idx]->page, 1);
pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
- ret |= 1;
+ ret |= !ret;
goto next;
}
pages[seg_idx]->handle = map[new_map_idx].handle;
@@ -928,17 +938,18 @@ next:
}
segs_to_map = 0;
last_map = map_until;
- if (map_until != num)
+ if (!ret && map_until != num)
goto again;
- return ret;
-
-out_of_memory:
- pr_alert("%s: out of memory\n", __func__);
- put_free_pages(ring, pages_to_gnt, segs_to_map);
- for (i = last_map; i < num; i++)
+out:
+ for (i = last_map; i < num; i++) {
+ /* Don't zap current batch's valid persistent grants. */
+ if(i >= map_until)
+ pages[i]->persistent_gnt = NULL;
pages[i]->handle = BLKBACK_INVALID_HANDLE;
- return -ENOMEM;
+ }
+
+ return ret;
}
static int xen_blkbk_map_seg(struct pending_req *pending_req)
@@ -1121,7 +1132,7 @@ static void end_block_io_op(struct bio *bio)
* and transmute it to the block API to hand it over to the proper block disk.
*/
static int
-__do_block_io_op(struct xen_blkif_ring *ring)
+__do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
{
union blkif_back_rings *blk_rings = &ring->blk_rings;
struct blkif_request req;
@@ -1144,6 +1155,9 @@ __do_block_io_op(struct xen_blkif_ring *ring)
if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
break;
+ /* We've seen a request, so clear spurious eoi flag. */
+ *eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS;
+
if (kthread_should_stop()) {
more_to_do = 1;
break;
@@ -1202,13 +1216,13 @@ done:
}
static int
-do_block_io_op(struct xen_blkif_ring *ring)
+do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
{
union blkif_back_rings *blk_rings = &ring->blk_rings;
int more_to_do;
do {
- more_to_do = __do_block_io_op(ring);
+ more_to_do = __do_block_io_op(ring, eoi_flags);
if (more_to_do)
break;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 25c41ce070a7..42af2f37ba4e 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -237,9 +237,8 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref,
BUG();
}
- err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn,
- xen_blkif_be_int, 0,
- "blkif-backend", ring);
+ err = bind_interdomain_evtchn_to_irqhandler_lateeoi(blkif->domid,
+ evtchn, xen_blkif_be_int, 0, "blkif-backend", ring);
if (err < 0) {
xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
ring->blk_rings.common.sring = NULL;
@@ -265,6 +264,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
if (ring->xenblkd) {
kthread_stop(ring->xenblkd);
+ ring->xenblkd = NULL;
wake_up(&ring->shutdown_wq);
}
@@ -652,7 +652,8 @@ static int xen_blkbk_probe(struct xenbus_device *dev,
/* setup back pointer */
be->blkif->be = be;
- err = xenbus_watch_pathfmt(dev, &be->backend_watch, backend_changed,
+ err = xenbus_watch_pathfmt(dev, &be->backend_watch, NULL,
+ backend_changed,
"%s/%s", dev->nodename, "physical-device");
if (err)
goto fail;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 9a57af79f330..1b06c8e46ffa 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -47,6 +47,7 @@
#include <linux/bitmap.h>
#include <linux/list.h>
#include <linux/workqueue.h>
+#include <linux/sched/mm.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
@@ -935,7 +936,8 @@ static void blkif_set_queue_limits(struct blkfront_info *info)
if (info->feature_discard) {
blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq);
blk_queue_max_discard_sectors(rq, get_capacity(gd));
- rq->limits.discard_granularity = info->discard_granularity;
+ rq->limits.discard_granularity = info->discard_granularity ?:
+ info->physical_sector_size;
rq->limits.discard_alignment = info->discard_alignment;
if (info->feature_secdiscard)
blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq);
@@ -2168,19 +2170,12 @@ static void blkfront_closing(struct blkfront_info *info)
static void blkfront_setup_discard(struct blkfront_info *info)
{
- int err;
- unsigned int discard_granularity;
- unsigned int discard_alignment;
-
info->feature_discard = 1;
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "discard-granularity", "%u", &discard_granularity,
- "discard-alignment", "%u", &discard_alignment,
- NULL);
- if (!err) {
- info->discard_granularity = discard_granularity;
- info->discard_alignment = discard_alignment;
- }
+ info->discard_granularity = xenbus_read_unsigned(info->xbdev->otherend,
+ "discard-granularity",
+ 0);
+ info->discard_alignment = xenbus_read_unsigned(info->xbdev->otherend,
+ "discard-alignment", 0);
info->feature_secdiscard =
!!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure",
0);
@@ -2188,10 +2183,12 @@ static void blkfront_setup_discard(struct blkfront_info *info)
static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
{
- unsigned int psegs, grants;
+ unsigned int psegs, grants, memflags;
int err, i;
struct blkfront_info *info = rinfo->dev_info;
+ memflags = memalloc_noio_save();
+
if (info->max_indirect_segments == 0) {
if (!HAS_EXTRA_REQ)
grants = BLKIF_MAX_SEGMENTS_PER_REQUEST;
@@ -2223,7 +2220,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
BUG_ON(!list_empty(&rinfo->indirect_pages));
for (i = 0; i < num; i++) {
- struct page *indirect_page = alloc_page(GFP_NOIO);
+ struct page *indirect_page = alloc_page(GFP_KERNEL);
if (!indirect_page)
goto out_of_memory;
list_add(&indirect_page->lru, &rinfo->indirect_pages);
@@ -2234,15 +2231,15 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
rinfo->shadow[i].grants_used =
kvcalloc(grants,
sizeof(rinfo->shadow[i].grants_used[0]),
- GFP_NOIO);
+ GFP_KERNEL);
rinfo->shadow[i].sg = kvcalloc(psegs,
sizeof(rinfo->shadow[i].sg[0]),
- GFP_NOIO);
+ GFP_KERNEL);
if (info->max_indirect_segments)
rinfo->shadow[i].indirect_grants =
kvcalloc(INDIRECT_GREFS(grants),
sizeof(rinfo->shadow[i].indirect_grants[0]),
- GFP_NOIO);
+ GFP_KERNEL);
if ((rinfo->shadow[i].grants_used == NULL) ||
(rinfo->shadow[i].sg == NULL) ||
(info->max_indirect_segments &&
@@ -2251,6 +2248,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
sg_init_table(rinfo->shadow[i].sg, psegs);
}
+ memalloc_noio_restore(memflags);
return 0;
@@ -2270,6 +2268,9 @@ out_of_memory:
__free_page(indirect_page);
}
}
+
+ memalloc_noio_restore(memflags);
+
return -ENOMEM;
}
@@ -2421,7 +2422,7 @@ static void blkfront_connect(struct blkfront_info *info)
for (i = 0; i < info->nr_rings; i++)
kick_pending_request_queues(&info->rinfo[i]);
- device_add_disk(&info->xbdev->dev, info->gd);
+ device_add_disk(&info->xbdev->dev, info->gd, NULL);
info->is_ready = 1;
return;
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 76abe40bfa83..104206a79501 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -873,7 +873,7 @@ static ssize_t mm_stat_show(struct device *dev,
zram->limit_pages << PAGE_SHIFT,
max_used << PAGE_SHIFT,
(u64)atomic64_read(&zram->stats.same_pages),
- pool_stats.pages_compacted,
+ atomic_long_read(&pool_stats.pages_compacted),
(u64)atomic64_read(&zram->stats.huge_pages));
up_read(&zram->init_lock);
@@ -1729,8 +1729,7 @@ static int zram_add(void)
zram->disk->queue->backing_dev_info->capabilities |=
(BDI_CAP_STABLE_WRITES | BDI_CAP_SYNCHRONOUS_IO);
- disk_to_dev(zram->disk)->groups = zram_disk_attr_groups;
- add_disk(zram->disk);
+ device_add_disk(NULL, zram->disk, zram_disk_attr_groups);
strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
@@ -1766,6 +1765,7 @@ static int zram_remove(struct zram *zram)
mutex_unlock(&bdev->bd_mutex);
zram_debugfs_unregister(zram);
+
/* Make sure all the pending I/O are finished */
fsync_bdev(bdev);
zram_reset_device(zram);
@@ -1802,7 +1802,8 @@ static ssize_t hot_add_show(struct class *class,
return ret;
return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
}
-static CLASS_ATTR_RO(hot_add);
+static struct class_attribute class_attr_hot_add =
+ __ATTR(hot_add, 0400, hot_add_show, NULL);
static ssize_t hot_remove_store(struct class *class,
struct class_attribute *attr,
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index e3e4d929e74f..ff6203c331ff 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -324,6 +324,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = {
{ 0x4103, "BCM4330B1" }, /* 002.001.003 */
{ 0x410e, "BCM43341B0" }, /* 002.001.014 */
{ 0x4406, "BCM4324B3" }, /* 002.004.006 */
+ { 0x4606, "BCM4324B5" }, /* 002.006.006 */
{ 0x6109, "BCM4335C0" }, /* 003.001.009 */
{ 0x610c, "BCM4354" }, /* 003.001.012 */
{ 0x2122, "BCM4343A0" }, /* 001.001.034 */
@@ -334,6 +335,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = {
};
static const struct bcm_subver_table bcm_usb_subver_table[] = {
+ { 0x2105, "BCM20703A1" }, /* 001.001.005 */
{ 0x210b, "BCM43142A0" }, /* 001.001.011 */
{ 0x2112, "BCM4314A0" }, /* 001.001.018 */
{ 0x2118, "BCM20702A0" }, /* 001.001.024 */
diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
index 7df3eed1ef5e..874172aa8e41 100644
--- a/drivers/bluetooth/btqcomsmd.c
+++ b/drivers/bluetooth/btqcomsmd.c
@@ -166,8 +166,10 @@ static int btqcomsmd_probe(struct platform_device *pdev)
btq->cmd_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_CMD",
btqcomsmd_cmd_callback, btq);
- if (IS_ERR(btq->cmd_channel))
- return PTR_ERR(btq->cmd_channel);
+ if (IS_ERR(btq->cmd_channel)) {
+ ret = PTR_ERR(btq->cmd_channel);
+ goto destroy_acl_channel;
+ }
/* The local-bd-address property is usually injected by the
* bootloader which has access to the allocated BD address.
@@ -179,8 +181,10 @@ static int btqcomsmd_probe(struct platform_device *pdev)
}
hdev = hci_alloc_dev();
- if (!hdev)
- return -ENOMEM;
+ if (!hdev) {
+ ret = -ENOMEM;
+ goto destroy_cmd_channel;
+ }
hci_set_drvdata(hdev, btq);
btq->hdev = hdev;
@@ -194,14 +198,21 @@ static int btqcomsmd_probe(struct platform_device *pdev)
hdev->set_bdaddr = qca_set_bdaddr_rome;
ret = hci_register_dev(hdev);
- if (ret < 0) {
- hci_free_dev(hdev);
- return ret;
- }
+ if (ret < 0)
+ goto hci_free_dev;
platform_set_drvdata(pdev, btq);
return 0;
+
+hci_free_dev:
+ hci_free_dev(hdev);
+destroy_cmd_channel:
+ rpmsg_destroy_ept(btq->cmd_channel);
+destroy_acl_channel:
+ rpmsg_destroy_ept(btq->acl_channel);
+
+ return ret;
}
static int btqcomsmd_remove(struct platform_device *pdev)
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 8d1cd2479e36..cc51395d8b0e 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -343,11 +343,11 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
* the end.
*/
len = patch_length;
- buf = kmemdup(btrtl_dev->fw_data + patch_offset, patch_length,
- GFP_KERNEL);
+ buf = kvmalloc(patch_length, GFP_KERNEL);
if (!buf)
return -ENOMEM;
+ memcpy(buf, btrtl_dev->fw_data + patch_offset, patch_length - 4);
memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4);
*_buf = buf;
@@ -415,8 +415,10 @@ static int rtl_load_file(struct hci_dev *hdev, const char *name, u8 **buff)
if (ret < 0)
return ret;
ret = fw->size;
- *buff = kmemdup(fw->data, ret, GFP_KERNEL);
- if (!*buff)
+ *buff = kvmalloc(fw->size, GFP_KERNEL);
+ if (*buff)
+ memcpy(*buff, fw->data, ret);
+ else
ret = -ENOMEM;
release_firmware(fw);
@@ -454,14 +456,14 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev,
goto out;
if (btrtl_dev->cfg_len > 0) {
- tbuff = kzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL);
+ tbuff = kvzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL);
if (!tbuff) {
ret = -ENOMEM;
goto out;
}
memcpy(tbuff, fw_data, ret);
- kfree(fw_data);
+ kvfree(fw_data);
memcpy(tbuff + ret, btrtl_dev->cfg_data, btrtl_dev->cfg_len);
ret += btrtl_dev->cfg_len;
@@ -474,7 +476,7 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev,
ret = rtl_download_firmware(hdev, fw_data, ret);
out:
- kfree(fw_data);
+ kvfree(fw_data);
return ret;
}
@@ -501,8 +503,8 @@ static struct sk_buff *btrtl_read_local_version(struct hci_dev *hdev)
void btrtl_free(struct btrtl_device_info *btrtl_dev)
{
- kfree(btrtl_dev->fw_data);
- kfree(btrtl_dev->cfg_data);
+ kvfree(btrtl_dev->fw_data);
+ kvfree(btrtl_dev->cfg_data);
kfree(btrtl_dev);
}
EXPORT_SYMBOL_GPL(btrtl_free);
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 59e5fc5eec8f..3e386f68faa0 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -115,6 +115,7 @@ struct bcm_device {
u32 oper_speed;
int irq;
bool irq_active_low;
+ bool irq_acquired;
#ifdef CONFIG_PM
struct hci_uart *hu;
@@ -288,6 +289,8 @@ static int bcm_request_irq(struct bcm_data *bcm)
goto unlock;
}
+ bdev->irq_acquired = true;
+
device_init_wakeup(bdev->dev, true);
pm_runtime_set_autosuspend_delay(bdev->dev,
@@ -456,7 +459,7 @@ static int bcm_close(struct hci_uart *hu)
}
if (bdev) {
- if (IS_ENABLED(CONFIG_PM) && bdev->irq > 0) {
+ if (IS_ENABLED(CONFIG_PM) && bdev->irq_acquired) {
devm_free_irq(bdev->dev, bdev->irq, bdev);
device_init_wakeup(bdev->dev, false);
pm_runtime_disable(bdev->dev);
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 8eede1197cd2..79b96251de80 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -257,6 +257,9 @@ static int h5_close(struct hci_uart *hu)
skb_queue_purge(&h5->rel);
skb_queue_purge(&h5->unrel);
+ kfree_skb(h5->rx_skb);
+ h5->rx_skb = NULL;
+
if (h5->vnd && h5->vnd->close)
h5->vnd->close(h5);
@@ -803,7 +806,7 @@ static int h5_serdev_probe(struct serdev_device *serdev)
if (!h5)
return -ENOMEM;
- set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.flags);
+ set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.hdev_flags);
h5->hu = &h5->serdev_hu;
h5->serdev_hu.serdev = serdev;
@@ -882,6 +885,11 @@ static int h5_btrtl_setup(struct h5 *h5)
/* Give the device some time before the hci-core sends it a reset */
usleep_range(10000, 20000);
+ /* Enable controller to do both LE scan and BR/EDR inquiry
+ * simultaneously.
+ */
+ set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &h5->hu->hdev->quirks);
+
out_free:
btrtl_free(btrtl_dev);
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index efeb8137ec67..48560e646e53 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -545,6 +545,7 @@ static void hci_uart_tty_close(struct tty_struct *tty)
clear_bit(HCI_UART_PROTO_READY, &hu->flags);
percpu_up_write(&hu->proto_lock);
+ cancel_work_sync(&hu->init_ready);
cancel_work_sync(&hu->write_work);
if (hdev) {
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index 46e20444ba19..7b3aade431e5 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -369,7 +369,10 @@ void hci_uart_unregister_device(struct hci_uart *hu)
struct hci_dev *hdev = hu->hdev;
clear_bit(HCI_UART_PROTO_READY, &hu->flags);
- hci_unregister_dev(hdev);
+
+ cancel_work_sync(&hu->init_ready);
+ if (test_bit(HCI_UART_REGISTERED, &hu->flags))
+ hci_unregister_dev(hdev);
hci_free_dev(hdev);
cancel_work_sync(&hu->write_work);
diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c
index e906ecfe23dd..9cb0733a0399 100644
--- a/drivers/bus/fsl-mc/fsl-mc-allocator.c
+++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c
@@ -292,8 +292,10 @@ int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
goto error;
mc_adev = resource->data;
- if (!mc_adev)
+ if (!mc_adev) {
+ error = -EINVAL;
goto error;
+ }
*new_mc_adev = mc_adev;
return 0;
diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c
index 7226cfc49b6f..3f806599748a 100644
--- a/drivers/bus/fsl-mc/mc-io.c
+++ b/drivers/bus/fsl-mc/mc-io.c
@@ -129,7 +129,12 @@ error_destroy_mc_io:
*/
void fsl_destroy_mc_io(struct fsl_mc_io *mc_io)
{
- struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
+ struct fsl_mc_device *dpmcp_dev;
+
+ if (!mc_io)
+ return;
+
+ dpmcp_dev = mc_io->dpmcp_dev;
if (dpmcp_dev)
fsl_mc_io_unset_dpmcp(mc_io);
diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
index e31c02dc7770..cbd970fb02f1 100644
--- a/drivers/bus/hisi_lpc.c
+++ b/drivers/bus/hisi_lpc.c
@@ -359,6 +359,26 @@ static int hisi_lpc_acpi_xlat_io_res(struct acpi_device *adev,
}
/*
+ * Released firmware describes the IO port max address as 0x3fff, which is
+ * the max host bus address. Fixup to a proper range. This will probably
+ * never be fixed in firmware.
+ */
+static void hisi_lpc_acpi_fixup_child_resource(struct device *hostdev,
+ struct resource *r)
+{
+ if (r->end != 0x3fff)
+ return;
+
+ if (r->start == 0xe4)
+ r->end = 0xe4 + 0x04 - 1;
+ else if (r->start == 0x2f8)
+ r->end = 0x2f8 + 0x08 - 1;
+ else
+ dev_warn(hostdev, "unrecognised resource %pR to fixup, ignoring\n",
+ r);
+}
+
+/*
* hisi_lpc_acpi_set_io_res - set the resources for a child
* @child: the device node to be updated the I/O resource
* @hostdev: the device node associated with host controller
@@ -419,8 +439,11 @@ static int hisi_lpc_acpi_set_io_res(struct device *child,
return -ENOMEM;
}
count = 0;
- list_for_each_entry(rentry, &resource_list, node)
- resources[count++] = *rentry->res;
+ list_for_each_entry(rentry, &resource_list, node) {
+ resources[count] = *rentry->res;
+ hisi_lpc_acpi_fixup_child_resource(hostdev, &resources[count]);
+ count++;
+ }
acpi_dev_free_resource_list(&resource_list);
diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c
index 1b14256376d2..7c1da45be166 100644
--- a/drivers/bus/mips_cdmm.c
+++ b/drivers/bus/mips_cdmm.c
@@ -544,10 +544,8 @@ static void mips_cdmm_bus_discover(struct mips_cdmm_bus *bus)
dev_set_name(&dev->dev, "cdmm%u-%u", cpu, id);
++id;
ret = device_register(&dev->dev);
- if (ret) {
+ if (ret)
put_device(&dev->dev);
- kfree(dev);
- }
}
}
diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c
index b040447575ad..dcfb32ee5cb6 100644
--- a/drivers/bus/omap_l3_noc.c
+++ b/drivers/bus/omap_l3_noc.c
@@ -285,7 +285,7 @@ static int omap_l3_probe(struct platform_device *pdev)
*/
l3->debug_irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler,
- 0x0, "l3-dbg-irq", l3);
+ IRQF_NO_THREAD, "l3-dbg-irq", l3);
if (ret) {
dev_err(l3->dev, "request_irq failed for %d\n",
l3->debug_irq);
@@ -294,7 +294,7 @@ static int omap_l3_probe(struct platform_device *pdev)
l3->app_irq = platform_get_irq(pdev, 1);
ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler,
- 0x0, "l3-app-irq", l3);
+ IRQF_NO_THREAD, "l3-app-irq", l3);
if (ret)
dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq);
diff --git a/drivers/bus/qcom-ebi2.c b/drivers/bus/qcom-ebi2.c
index a6444244c411..bfb67aa00bec 100644
--- a/drivers/bus/qcom-ebi2.c
+++ b/drivers/bus/qcom-ebi2.c
@@ -357,8 +357,10 @@ static int qcom_ebi2_probe(struct platform_device *pdev)
/* Figure out the chipselect */
ret = of_property_read_u32(child, "reg", &csindex);
- if (ret)
+ if (ret) {
+ of_node_put(child);
return ret;
+ }
if (csindex > 5) {
dev_err(dev,
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 1b76d9585902..2ca2cc56bcef 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -345,7 +345,7 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
if (ret)
goto unlock;
- *buf = readl(rsb->regs + RSB_DATA);
+ *buf = readl(rsb->regs + RSB_DATA) & GENMASK(len * 8 - 1, 0);
unlock:
mutex_unlock(&rsb->lock);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 54c8c8644df2..b6a278183d82 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -1814,7 +1814,9 @@ static int sysc_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- reset_control_assert(ddata->rsts);
+
+ if (!reset_control_status(ddata->rsts))
+ reset_control_assert(ddata->rsts);
unprepare:
sysc_unprepare(ddata);
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 72cd96a8eb19..c4192f9e4db9 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -775,6 +775,13 @@ static int probe_gdrom_setupqueue(void)
static int probe_gdrom(struct platform_device *devptr)
{
int err;
+
+ /*
+ * Ensure our "one" device is initialized properly in case of previous
+ * usages of it
+ */
+ memset(&gd, 0, sizeof(gd));
+
/* Start the device */
if (gdrom_execute_diagnostic() != 1) {
pr_warning("ATA Probe for GDROM failed\n");
@@ -857,6 +864,8 @@ static int remove_gdrom(struct platform_device *devptr)
if (gdrom_major)
unregister_blkdev(gdrom_major, GDROM_DEV_NAME);
unregister_cdrom(gd.cd_info);
+ kfree(gd.cd_info);
+ kfree(gd.toc);
return 0;
}
@@ -872,7 +881,7 @@ static struct platform_driver gdrom_driver = {
static int __init init_gdrom(void)
{
int rc;
- gd.toc = NULL;
+
rc = platform_driver_register(&gdrom_driver);
if (rc)
return rc;
@@ -888,8 +897,6 @@ static void __exit exit_gdrom(void)
{
platform_device_unregister(pd);
platform_driver_unregister(&gdrom_driver);
- kfree(gd.toc);
- kfree(gd.cd_info);
}
module_init(init_gdrom);
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index 6231714ef3c8..cee9e42ce547 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -125,7 +125,7 @@ config AGP_HP_ZX1
config AGP_PARISC
tristate "HP Quicksilver AGP support"
- depends on AGP && PARISC && 64BIT
+ depends on AGP && PARISC && 64BIT && IOMMU_SBA
help
This option gives you AGP GART support for the HP Quicksilver
AGP bus adapter on HP PA-RISC machines (Ok, just on the C8000
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index c6271ce250b3..0941d38b2d32 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -304,8 +304,10 @@ static int intel_gtt_setup_scratch_page(void)
if (intel_private.needs_dmar) {
dma_addr = pci_map_page(intel_private.pcidev, page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
+ if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) {
+ __free_page(page);
return -EINVAL;
+ }
intel_private.scratch_page_dma = dma_addr;
} else
@@ -846,6 +848,7 @@ void intel_gtt_insert_page(dma_addr_t addr,
unsigned int flags)
{
intel_private.driver->write_entry(addr, pg, flags);
+ readl(intel_private.gtt + pg);
if (intel_private.driver->chipset_flush)
intel_private.driver->chipset_flush();
}
@@ -871,7 +874,7 @@ void intel_gtt_insert_sg_entries(struct sg_table *st,
j++;
}
}
- wmb();
+ readl(intel_private.gtt + j - 1);
if (intel_private.driver->chipset_flush)
intel_private.driver->chipset_flush();
}
@@ -1105,6 +1108,7 @@ static void i9xx_cleanup(void)
static void i9xx_chipset_flush(void)
{
+ wmb();
if (intel_private.i9xx_flush_page)
writel(1, intel_private.i9xx_flush_page);
}
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index c0732f032248..68f02318cee3 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -975,6 +975,8 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
if (ACPI_SUCCESS(status)) {
hdp->hd_phys_address = addr.address.minimum;
hdp->hd_address = ioremap(addr.address.minimum, addr.address.address_length);
+ if (!hdp->hd_address)
+ return AE_ERROR;
if (hpet_is_known(hdp)) {
iounmap(hdp->hd_address);
@@ -988,6 +990,8 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
hdp->hd_phys_address = fixmem32->address;
hdp->hd_address = ioremap(fixmem32->address,
HPET_RANGE_SIZE);
+ if (!hdp->hd_address)
+ return AE_ERROR;
if (hpet_is_known(hdp)) {
iounmap(hdp->hd_address);
diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c
index 62c6696c1dbd..b6d7db362b21 100644
--- a/drivers/char/hw_random/ks-sa-rng.c
+++ b/drivers/char/hw_random/ks-sa-rng.c
@@ -216,6 +216,7 @@ static int ks_sa_rng_probe(struct platform_device *pdev)
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "Failed to enable SA power-domain\n");
+ pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
return ret;
}
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index f615684028af..ba01f24db6db 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -72,7 +72,7 @@ static int timeriomem_rng_read(struct hwrng *hwrng, void *data,
*/
if (retval > 0)
usleep_range(period_us,
- period_us + min(1, period_us / 100));
+ period_us + max(1, period_us / 100));
*(u32 *)data = readl(priv->io_base);
retval += sizeof(u32);
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 980eb7c60952..cb85516e1eb3 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -30,6 +30,7 @@
#include <linux/workqueue.h>
#include <linux/uuid.h>
#include <linux/nospec.h>
+#include <linux/vmalloc.h>
#define PFX "IPMI message handler: "
@@ -1089,7 +1090,7 @@ static void free_user_work(struct work_struct *work)
remove_work);
cleanup_srcu_struct(&user->release_barrier);
- kfree(user);
+ vfree(user);
}
int ipmi_create_user(unsigned int if_num,
@@ -1121,7 +1122,7 @@ int ipmi_create_user(unsigned int if_num,
if (rv)
return rv;
- new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
+ new_user = vzalloc(sizeof(*new_user));
if (!new_user)
return -ENOMEM;
@@ -1170,7 +1171,7 @@ int ipmi_create_user(unsigned int if_num,
out_kfree:
srcu_read_unlock(&ipmi_interfaces_srcu, index);
- kfree(new_user);
+ vfree(new_user);
return rv;
}
EXPORT_SYMBOL(ipmi_create_user);
@@ -3134,8 +3135,8 @@ static void __get_guid(struct ipmi_smi *intf)
if (rv)
/* Send failed, no GUID available. */
bmc->dyn_guid_set = 0;
-
- wait_event(intf->waitq, bmc->dyn_guid_set != 2);
+ else
+ wait_event(intf->waitq, bmc->dyn_guid_set != 2);
/* dyn_guid_set makes the guid data available. */
smp_rmb();
diff --git a/drivers/char/ipmi/ipmi_si_pci.c b/drivers/char/ipmi/ipmi_si_pci.c
index 022e03634ce2..9e9700b1a8e6 100644
--- a/drivers/char/ipmi/ipmi_si_pci.c
+++ b/drivers/char/ipmi/ipmi_si_pci.c
@@ -18,11 +18,6 @@ module_param_named(trypci, si_trypci, bool, 0);
MODULE_PARM_DESC(trypci, "Setting this to zero will disable the"
" default scan of the interfaces identified via pci");
-#define PCI_CLASS_SERIAL_IPMI 0x0c07
-#define PCI_CLASS_SERIAL_IPMI_SMIC 0x0c0700
-#define PCI_CLASS_SERIAL_IPMI_KCS 0x0c0701
-#define PCI_CLASS_SERIAL_IPMI_BT 0x0c0702
-
#define PCI_DEVICE_ID_HP_MMC 0x121A
static void ipmi_pci_cleanup(struct si_sm_io *io)
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d5f970d039bb..297a716f5a56 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1150,14 +1150,14 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
* We take into account the first, second and third-order deltas
* in order to make our estimate.
*/
- delta = sample.jiffies - state->last_time;
- state->last_time = sample.jiffies;
+ delta = sample.jiffies - READ_ONCE(state->last_time);
+ WRITE_ONCE(state->last_time, sample.jiffies);
- delta2 = delta - state->last_delta;
- state->last_delta = delta;
+ delta2 = delta - READ_ONCE(state->last_delta);
+ WRITE_ONCE(state->last_delta, delta);
- delta3 = delta2 - state->last_delta2;
- state->last_delta2 = delta2;
+ delta3 = delta2 - READ_ONCE(state->last_delta2);
+ WRITE_ONCE(state->last_delta2, delta2);
if (delta < 0)
delta = -delta;
@@ -2071,7 +2071,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return -EPERM;
if (crng_init < 2)
return -ENODATA;
- crng_reseed(&primary_crng, NULL);
+ crng_reseed(&primary_crng, &input_pool);
crng_global_init_time = jiffies - 1;
return 0;
default:
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index 8eeb4190207d..dce22b7fc544 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -776,17 +776,21 @@ static int __init tlclk_init(void)
{
int ret;
+ telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
+
+ alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
+ if (!alarm_events) {
+ ret = -ENOMEM;
+ goto out1;
+ }
+
ret = register_chrdev(tlclk_major, "telco_clock", &tlclk_fops);
if (ret < 0) {
printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major);
+ kfree(alarm_events);
return ret;
}
tlclk_major = ret;
- alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
- if (!alarm_events) {
- ret = -ENOMEM;
- goto out1;
- }
/* Read telecom clock IRQ number (Set by BIOS) */
if (!request_region(TLCLK_BASE, 8, "telco_clock")) {
@@ -795,7 +799,6 @@ static int __init tlclk_init(void)
ret = -EBUSY;
goto out2;
}
- telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
if (0x0F == telclk_interrupt ) { /* not MCPBL0010 ? */
printk(KERN_ERR "telclk_interrupt = 0x%x non-mcpbl0010 hw.\n",
@@ -836,8 +839,8 @@ out3:
release_region(TLCLK_BASE, 8);
out2:
kfree(alarm_events);
-out1:
unregister_chrdev(tlclk_major, "telco_clock");
+out1:
return ret;
}
diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c
index 5a8720df2b51..462476467bff 100644
--- a/drivers/char/tpm/eventlog/common.c
+++ b/drivers/char/tpm/eventlog/common.c
@@ -104,20 +104,20 @@ static int tpm_read_log(struct tpm_chip *chip)
*
* If an event log is found then the securityfs files are setup to
* export it to userspace, otherwise nothing is done.
- *
- * Returns -ENODEV if the firmware has no event log or securityfs is not
- * supported.
*/
-int tpm_bios_log_setup(struct tpm_chip *chip)
+void tpm_bios_log_setup(struct tpm_chip *chip)
{
const char *name = dev_name(&chip->dev);
unsigned int cnt;
int log_version;
int rc = 0;
+ if (chip->flags & TPM_CHIP_FLAG_VIRTUAL)
+ return;
+
rc = tpm_read_log(chip);
if (rc < 0)
- return rc;
+ return;
log_version = rc;
cnt = 0;
@@ -163,13 +163,12 @@ int tpm_bios_log_setup(struct tpm_chip *chip)
cnt++;
}
- return 0;
+ return;
err:
- rc = PTR_ERR(chip->bios_dir[cnt]);
chip->bios_dir[cnt] = NULL;
tpm_bios_log_teardown(chip);
- return rc;
+ return;
}
void tpm_bios_log_teardown(struct tpm_chip *chip)
diff --git a/drivers/char/tpm/eventlog/efi.c b/drivers/char/tpm/eventlog/efi.c
index 3e673ab22cb4..abd3beeb5158 100644
--- a/drivers/char/tpm/eventlog/efi.c
+++ b/drivers/char/tpm/eventlog/efi.c
@@ -43,6 +43,11 @@ int tpm_read_log_efi(struct tpm_chip *chip)
log_size = log_tbl->size;
memunmap(log_tbl);
+ if (!log_size) {
+ pr_warn("UEFI TPM log area empty\n");
+ return -EIO;
+ }
+
log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl) + log_size,
MEMREMAP_WB);
if (!log_tbl) {
diff --git a/drivers/char/tpm/eventlog/tpm1.c b/drivers/char/tpm/eventlog/tpm1.c
index 58c84784ba25..a4621c83e2bf 100644
--- a/drivers/char/tpm/eventlog/tpm1.c
+++ b/drivers/char/tpm/eventlog/tpm1.c
@@ -129,6 +129,7 @@ static void *tpm1_bios_measurements_next(struct seq_file *m, void *v,
u32 converted_event_size;
u32 converted_event_type;
+ (*pos)++;
converted_event_size = do_endian_conversion(event->event_size);
v += sizeof(struct tcpa_event) + converted_event_size;
@@ -146,7 +147,6 @@ static void *tpm1_bios_measurements_next(struct seq_file *m, void *v,
((v + sizeof(struct tcpa_event) + converted_event_size) >= limit))
return NULL;
- (*pos)++;
return v;
}
diff --git a/drivers/char/tpm/eventlog/tpm2.c b/drivers/char/tpm/eventlog/tpm2.c
index 41b9f6c92da7..aec49c925cee 100644
--- a/drivers/char/tpm/eventlog/tpm2.c
+++ b/drivers/char/tpm/eventlog/tpm2.c
@@ -143,6 +143,7 @@ static void *tpm2_bios_measurements_next(struct seq_file *m, void *v,
size_t event_size;
void *marker;
+ (*pos)++;
event_header = log->bios_event_log;
if (v == SEQ_START_TOKEN) {
@@ -167,7 +168,6 @@ static void *tpm2_bios_measurements_next(struct seq_file *m, void *v,
if (((v + event_size) >= limit) || (event_size == 0))
return NULL;
- (*pos)++;
return v;
}
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 0b01eb7b14e5..f79f87794273 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -276,13 +276,8 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev,
chip->cdev.owner = THIS_MODULE;
chip->cdevs.owner = THIS_MODULE;
- chip->work_space.context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (!chip->work_space.context_buf) {
- rc = -ENOMEM;
- goto out;
- }
- chip->work_space.session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (!chip->work_space.session_buf) {
+ rc = tpm2_init_space(&chip->work_space, TPM2_SPACE_BUFFER_SIZE);
+ if (rc) {
rc = -ENOMEM;
goto out;
}
@@ -463,9 +458,7 @@ int tpm_chip_register(struct tpm_chip *chip)
tpm_sysfs_add_device(chip);
- rc = tpm_bios_log_setup(chip);
- if (rc != 0 && rc != -ENODEV)
- return rc;
+ tpm_bios_log_setup(chip);
tpm_add_ppi(chip);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index f3501d05264f..b9a30f0b8825 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -188,6 +188,7 @@ struct tpm_space {
u8 *context_buf;
u32 session_tbl[3];
u8 *session_buf;
+ u32 buf_size;
};
enum tpm_chip_flags {
@@ -278,6 +279,9 @@ struct tpm_output_header {
#define TPM_TAG_RQU_COMMAND 193
+/* TPM2 specific constants. */
+#define TPM2_SPACE_BUFFER_SIZE 16384 /* 16 kB */
+
struct stclear_flags_t {
__be16 tag;
u8 deactivated;
@@ -595,13 +599,13 @@ void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type);
unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
int tpm2_probe(struct tpm_chip *chip);
int tpm2_find_cc(struct tpm_chip *chip, u32 cc);
-int tpm2_init_space(struct tpm_space *space);
+int tpm2_init_space(struct tpm_space *space, unsigned int buf_size);
void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space);
int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u32 cc,
u8 *cmd);
int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space,
u32 cc, u8 *buf, size_t *bufsiz);
-int tpm_bios_log_setup(struct tpm_chip *chip);
+void tpm_bios_log_setup(struct tpm_chip *chip);
void tpm_bios_log_teardown(struct tpm_chip *chip);
#endif
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index ef381caf5f43..e71c6b24aed1 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -960,6 +960,7 @@ static int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
if (nr_commands !=
be32_to_cpup((__be32 *)&buf.data[TPM_HEADER_SIZE + 5])) {
+ rc = -EFAULT;
tpm_buf_destroy(&buf);
goto out;
}
diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
index d2e101b32482..9f4e22dcde27 100644
--- a/drivers/char/tpm/tpm2-space.c
+++ b/drivers/char/tpm/tpm2-space.c
@@ -43,18 +43,21 @@ static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space)
}
}
-int tpm2_init_space(struct tpm_space *space)
+int tpm2_init_space(struct tpm_space *space, unsigned int buf_size)
{
- space->context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ space->context_buf = kzalloc(buf_size, GFP_KERNEL);
if (!space->context_buf)
return -ENOMEM;
- space->session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ space->session_buf = kzalloc(buf_size, GFP_KERNEL);
if (space->session_buf == NULL) {
kfree(space->context_buf);
+ /* Prevent caller getting a dangling pointer. */
+ space->context_buf = NULL;
return -ENOMEM;
}
+ space->buf_size = buf_size;
return 0;
}
@@ -276,8 +279,10 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u32 cc,
sizeof(space->context_tbl));
memcpy(&chip->work_space.session_tbl, &space->session_tbl,
sizeof(space->session_tbl));
- memcpy(chip->work_space.context_buf, space->context_buf, PAGE_SIZE);
- memcpy(chip->work_space.session_buf, space->session_buf, PAGE_SIZE);
+ memcpy(chip->work_space.context_buf, space->context_buf,
+ space->buf_size);
+ memcpy(chip->work_space.session_buf, space->session_buf,
+ space->buf_size);
rc = tpm2_load_space(chip);
if (rc) {
@@ -456,7 +461,7 @@ static int tpm2_save_space(struct tpm_chip *chip)
continue;
rc = tpm2_save_context(chip, space->context_tbl[i],
- space->context_buf, PAGE_SIZE,
+ space->context_buf, space->buf_size,
&offset);
if (rc == -ENOENT) {
space->context_tbl[i] = 0;
@@ -474,9 +479,8 @@ static int tpm2_save_space(struct tpm_chip *chip)
continue;
rc = tpm2_save_context(chip, space->session_tbl[i],
- space->session_buf, PAGE_SIZE,
+ space->session_buf, space->buf_size,
&offset);
-
if (rc == -ENOENT) {
/* handle error saving session, just forget it */
space->session_tbl[i] = 0;
@@ -522,8 +526,10 @@ int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space,
sizeof(space->context_tbl));
memcpy(&space->session_tbl, &chip->work_space.session_tbl,
sizeof(space->session_tbl));
- memcpy(space->context_buf, chip->work_space.context_buf, PAGE_SIZE);
- memcpy(space->session_buf, chip->work_space.session_buf, PAGE_SIZE);
+ memcpy(space->context_buf, chip->work_space.context_buf,
+ space->buf_size);
+ memcpy(space->session_buf, chip->work_space.session_buf,
+ space->buf_size);
return 0;
}
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 763fc7e6c005..20f27100708b 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -26,6 +26,7 @@
#include "tpm.h"
#define ACPI_SIG_TPM2 "TPM2"
+#define TPM_CRB_MAX_RESOURCES 3
static const guid_t crb_acpi_start_guid =
GUID_INIT(0x6BBF6CAB, 0x5463, 0x4714,
@@ -95,7 +96,6 @@ enum crb_status {
struct crb_priv {
u32 sm;
const char *hid;
- void __iomem *iobase;
struct crb_regs_head __iomem *regs_h;
struct crb_regs_tail __iomem *regs_t;
u8 __iomem *cmd;
@@ -438,21 +438,27 @@ static const struct tpm_class_ops tpm_crb = {
static int crb_check_resource(struct acpi_resource *ares, void *data)
{
- struct resource *io_res = data;
+ struct resource *iores_array = data;
struct resource_win win;
struct resource *res = &(win.res);
+ int i;
if (acpi_dev_resource_memory(ares, res) ||
acpi_dev_resource_address_space(ares, &win)) {
- *io_res = *res;
- io_res->name = NULL;
+ for (i = 0; i < TPM_CRB_MAX_RESOURCES + 1; ++i) {
+ if (resource_type(iores_array + i) != IORESOURCE_MEM) {
+ iores_array[i] = *res;
+ iores_array[i].name = NULL;
+ break;
+ }
+ }
}
return 1;
}
-static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv,
- struct resource *io_res, u64 start, u32 size)
+static void __iomem *crb_map_res(struct device *dev, struct resource *iores,
+ void __iomem **iobase_ptr, u64 start, u32 size)
{
struct resource new_res = {
.start = start,
@@ -464,10 +470,16 @@ static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv,
if (start != new_res.start)
return (void __iomem *) ERR_PTR(-EINVAL);
- if (!resource_contains(io_res, &new_res))
+ if (!iores)
return devm_ioremap_resource(dev, &new_res);
- return priv->iobase + (new_res.start - io_res->start);
+ if (!*iobase_ptr) {
+ *iobase_ptr = devm_ioremap_resource(dev, iores);
+ if (IS_ERR(*iobase_ptr))
+ return *iobase_ptr;
+ }
+
+ return *iobase_ptr + (new_res.start - iores->start);
}
/*
@@ -494,9 +506,13 @@ static u64 crb_fixup_cmd_size(struct device *dev, struct resource *io_res,
static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
struct acpi_table_tpm2 *buf)
{
- struct list_head resources;
- struct resource io_res;
+ struct list_head acpi_resource_list;
+ struct resource iores_array[TPM_CRB_MAX_RESOURCES + 1] = { {0} };
+ void __iomem *iobase_array[TPM_CRB_MAX_RESOURCES] = {NULL};
struct device *dev = &device->dev;
+ struct resource *iores;
+ void __iomem **iobase_ptr;
+ int i;
u32 pa_high, pa_low;
u64 cmd_pa;
u32 cmd_size;
@@ -505,21 +521,41 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
u32 rsp_size;
int ret;
- INIT_LIST_HEAD(&resources);
- ret = acpi_dev_get_resources(device, &resources, crb_check_resource,
- &io_res);
+ INIT_LIST_HEAD(&acpi_resource_list);
+ ret = acpi_dev_get_resources(device, &acpi_resource_list,
+ crb_check_resource, iores_array);
if (ret < 0)
return ret;
- acpi_dev_free_resource_list(&resources);
+ acpi_dev_free_resource_list(&acpi_resource_list);
- if (resource_type(&io_res) != IORESOURCE_MEM) {
+ if (resource_type(iores_array) != IORESOURCE_MEM) {
dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n");
return -EINVAL;
+ } else if (resource_type(iores_array + TPM_CRB_MAX_RESOURCES) ==
+ IORESOURCE_MEM) {
+ dev_warn(dev, "TPM2 ACPI table defines too many memory resources\n");
+ memset(iores_array + TPM_CRB_MAX_RESOURCES,
+ 0, sizeof(*iores_array));
+ iores_array[TPM_CRB_MAX_RESOURCES].flags = 0;
}
- priv->iobase = devm_ioremap_resource(dev, &io_res);
- if (IS_ERR(priv->iobase))
- return PTR_ERR(priv->iobase);
+ iores = NULL;
+ iobase_ptr = NULL;
+ for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) {
+ if (buf->control_address >= iores_array[i].start &&
+ buf->control_address + sizeof(struct crb_regs_tail) - 1 <=
+ iores_array[i].end) {
+ iores = iores_array + i;
+ iobase_ptr = iobase_array + i;
+ break;
+ }
+ }
+
+ priv->regs_t = crb_map_res(dev, iores, iobase_ptr, buf->control_address,
+ sizeof(struct crb_regs_tail));
+
+ if (IS_ERR(priv->regs_t))
+ return PTR_ERR(priv->regs_t);
/* The ACPI IO region starts at the head area and continues to include
* the control area, as one nice sane region except for some older
@@ -527,9 +563,10 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
*/
if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
(priv->sm == ACPI_TPM2_MEMORY_MAPPED)) {
- if (buf->control_address == io_res.start +
+ if (iores &&
+ buf->control_address == iores->start +
sizeof(*priv->regs_h))
- priv->regs_h = priv->iobase;
+ priv->regs_h = *iobase_ptr;
else
dev_warn(dev, FW_BUG "Bad ACPI memory layout");
}
@@ -538,13 +575,6 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
if (ret)
return ret;
- priv->regs_t = crb_map_res(dev, priv, &io_res, buf->control_address,
- sizeof(struct crb_regs_tail));
- if (IS_ERR(priv->regs_t)) {
- ret = PTR_ERR(priv->regs_t);
- goto out_relinquish_locality;
- }
-
/*
* PTT HW bug w/a: wake up the device to access
* possibly not retained registers.
@@ -556,13 +586,26 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
pa_high = ioread32(&priv->regs_t->ctrl_cmd_pa_high);
pa_low = ioread32(&priv->regs_t->ctrl_cmd_pa_low);
cmd_pa = ((u64)pa_high << 32) | pa_low;
- cmd_size = crb_fixup_cmd_size(dev, &io_res, cmd_pa,
- ioread32(&priv->regs_t->ctrl_cmd_size));
+ cmd_size = ioread32(&priv->regs_t->ctrl_cmd_size);
+
+ iores = NULL;
+ iobase_ptr = NULL;
+ for (i = 0; iores_array[i].end; ++i) {
+ if (cmd_pa >= iores_array[i].start &&
+ cmd_pa <= iores_array[i].end) {
+ iores = iores_array + i;
+ iobase_ptr = iobase_array + i;
+ break;
+ }
+ }
+
+ if (iores)
+ cmd_size = crb_fixup_cmd_size(dev, iores, cmd_pa, cmd_size);
dev_dbg(dev, "cmd_hi = %X cmd_low = %X cmd_size %X\n",
pa_high, pa_low, cmd_size);
- priv->cmd = crb_map_res(dev, priv, &io_res, cmd_pa, cmd_size);
+ priv->cmd = crb_map_res(dev, iores, iobase_ptr, cmd_pa, cmd_size);
if (IS_ERR(priv->cmd)) {
ret = PTR_ERR(priv->cmd);
goto out;
@@ -570,11 +613,25 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
memcpy_fromio(&__rsp_pa, &priv->regs_t->ctrl_rsp_pa, 8);
rsp_pa = le64_to_cpu(__rsp_pa);
- rsp_size = crb_fixup_cmd_size(dev, &io_res, rsp_pa,
- ioread32(&priv->regs_t->ctrl_rsp_size));
+ rsp_size = ioread32(&priv->regs_t->ctrl_rsp_size);
+
+ iores = NULL;
+ iobase_ptr = NULL;
+ for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) {
+ if (rsp_pa >= iores_array[i].start &&
+ rsp_pa <= iores_array[i].end) {
+ iores = iores_array + i;
+ iobase_ptr = iobase_array + i;
+ break;
+ }
+ }
+
+ if (iores)
+ rsp_size = crb_fixup_cmd_size(dev, iores, rsp_pa, rsp_size);
if (cmd_pa != rsp_pa) {
- priv->rsp = crb_map_res(dev, priv, &io_res, rsp_pa, rsp_size);
+ priv->rsp = crb_map_res(dev, iores, iobase_ptr,
+ rsp_pa, rsp_size);
ret = PTR_ERR_OR_ZERO(priv->rsp);
goto out;
}
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index 77e47dc5aacc..3ba67bc6baba 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 IBM Corporation
+ * Copyright (C) 2012-2020 IBM Corporation
*
* Author: Ashley Lai <ashleydlai@gmail.com>
*
@@ -141,6 +141,64 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
}
/**
+ * ibmvtpm_crq_send_init - Send a CRQ initialize message
+ * @ibmvtpm: vtpm device struct
+ *
+ * Return:
+ * 0 on success.
+ * Non-zero on failure.
+ */
+static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
+{
+ int rc;
+
+ rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD);
+ if (rc != H_SUCCESS)
+ dev_err(ibmvtpm->dev,
+ "%s failed rc=%d\n", __func__, rc);
+
+ return rc;
+}
+
+/**
+ * tpm_ibmvtpm_resume - Resume from suspend
+ *
+ * @dev: device struct
+ *
+ * Return: Always 0.
+ */
+static int tpm_ibmvtpm_resume(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
+ int rc = 0;
+
+ do {
+ if (rc)
+ msleep(100);
+ rc = plpar_hcall_norets(H_ENABLE_CRQ,
+ ibmvtpm->vdev->unit_address);
+ } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+ if (rc) {
+ dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = vio_enable_interrupts(ibmvtpm->vdev);
+ if (rc) {
+ dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = ibmvtpm_crq_send_init(ibmvtpm);
+ if (rc)
+ dev_err(dev, "Error send_init rc=%d\n", rc);
+
+ return rc;
+}
+
+/**
* tpm_ibmvtpm_send() - Send a TPM command
* @chip: tpm chip struct
* @buf: buffer contains data to send
@@ -153,6 +211,7 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
+ bool retry = true;
int rc, sig;
if (!ibmvtpm->rtce_buf) {
@@ -186,18 +245,27 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
*/
ibmvtpm->tpm_processing_cmd = true;
+again:
rc = ibmvtpm_send_crq(ibmvtpm->vdev,
IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND,
count, ibmvtpm->rtce_dma_handle);
if (rc != H_SUCCESS) {
+ /*
+ * H_CLOSED can be returned after LPM resume. Call
+ * tpm_ibmvtpm_resume() to re-enable the CRQ then retry
+ * ibmvtpm_send_crq() once before failing.
+ */
+ if (rc == H_CLOSED && retry) {
+ tpm_ibmvtpm_resume(ibmvtpm->dev);
+ retry = false;
+ goto again;
+ }
dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
- rc = 0;
ibmvtpm->tpm_processing_cmd = false;
- } else
- rc = 0;
+ }
spin_unlock(&ibmvtpm->rtce_lock);
- return rc;
+ return 0;
}
static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
@@ -276,26 +344,6 @@ static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
}
/**
- * ibmvtpm_crq_send_init - Send a CRQ initialize message
- * @ibmvtpm: vtpm device struct
- *
- * Return:
- * 0 on success.
- * Non-zero on failure.
- */
-static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
-{
- int rc;
-
- rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD);
- if (rc != H_SUCCESS)
- dev_err(ibmvtpm->dev,
- "ibmvtpm_crq_send_init failed rc=%d\n", rc);
-
- return rc;
-}
-
-/**
* tpm_ibmvtpm_remove - ibm vtpm remove entry point
* @vdev: vio device struct
*
@@ -407,44 +455,6 @@ static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
}
-/**
- * tpm_ibmvtpm_resume - Resume from suspend
- *
- * @dev: device struct
- *
- * Return: Always 0.
- */
-static int tpm_ibmvtpm_resume(struct device *dev)
-{
- struct tpm_chip *chip = dev_get_drvdata(dev);
- struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
- int rc = 0;
-
- do {
- if (rc)
- msleep(100);
- rc = plpar_hcall_norets(H_ENABLE_CRQ,
- ibmvtpm->vdev->unit_address);
- } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
-
- if (rc) {
- dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
- return rc;
- }
-
- rc = vio_enable_interrupts(ibmvtpm->vdev);
- if (rc) {
- dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
- return rc;
- }
-
- rc = ibmvtpm_crq_send_init(ibmvtpm);
- if (rc)
- dev_err(dev, "Error send_init rc=%d\n", rc);
-
- return rc;
-}
-
static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
{
return (status == 0);
@@ -578,6 +588,7 @@ static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
*/
while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
ibmvtpm_crq_process(crq, ibmvtpm);
+ wake_up_interruptible(&ibmvtpm->crq_queue.wq);
crq->valid = 0;
smp_wmb();
}
@@ -625,6 +636,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
}
crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
+ init_waitqueue_head(&crq_q->wq);
ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
CRQ_RES_BUF_SIZE,
DMA_BIDIRECTIONAL);
@@ -677,6 +689,13 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
if (rc)
goto init_irq_cleanup;
+ if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
+ ibmvtpm->rtce_buf != NULL,
+ HZ)) {
+ dev_err(dev, "CRQ response timed out\n");
+ goto init_irq_cleanup;
+ }
+
return tpm_chip_register(chip);
init_irq_cleanup:
do {
diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h
index 91dfe766d080..4f6a124601db 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.h
+++ b/drivers/char/tpm/tpm_ibmvtpm.h
@@ -31,6 +31,7 @@ struct ibmvtpm_crq_queue {
struct ibmvtpm_crq *crq_addr;
u32 index;
u32 num_entry;
+ wait_queue_head_t wq;
};
struct ibmvtpm_dev {
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index f08949a5f678..5a3a4f095391 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -31,6 +31,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/kernel.h>
+#include <linux/dmi.h>
#include "tpm.h"
#include "tpm_tis_core.h"
@@ -53,8 +54,8 @@ static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *da
return container_of(data, struct tpm_tis_tcg_phy, priv);
}
-static bool interrupts = true;
-module_param(interrupts, bool, 0444);
+static int interrupts = -1;
+module_param(interrupts, int, 0444);
MODULE_PARM_DESC(interrupts, "Enable interrupts");
static bool itpm;
@@ -67,6 +68,28 @@ module_param(force, bool, 0444);
MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
#endif
+static int tpm_tis_disable_irq(const struct dmi_system_id *d)
+{
+ if (interrupts == -1) {
+ pr_notice("tpm_tis: %s detected: disabling interrupts.\n", d->ident);
+ interrupts = 0;
+ }
+
+ return 0;
+}
+
+static const struct dmi_system_id tpm_tis_dmi_table[] = {
+ {
+ .callback = tpm_tis_disable_irq,
+ .ident = "ThinkPad T490s",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T490s"),
+ },
+ },
+ {}
+};
+
#if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
static int has_hid(struct acpi_device *dev, const char *hid)
{
@@ -196,6 +219,8 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info)
int irq = -1;
int rc;
+ dmi_check_system(tpm_tis_dmi_table);
+
rc = check_acpi_tpm2(dev);
if (rc)
return rc;
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 0eaea3a7b8f4..c9a5f34097df 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -129,7 +129,8 @@ static bool check_locality(struct tpm_chip *chip, int l)
if (rc < 0)
return false;
- if ((access & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
+ if ((access & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID
+ | TPM_ACCESS_REQUEST_USE)) ==
(TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) {
priv->locality = l;
return true;
@@ -138,58 +139,13 @@ static bool check_locality(struct tpm_chip *chip, int l)
return false;
}
-static bool locality_inactive(struct tpm_chip *chip, int l)
-{
- struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
- int rc;
- u8 access;
-
- rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access);
- if (rc < 0)
- return false;
-
- if ((access & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY))
- == TPM_ACCESS_VALID)
- return true;
-
- return false;
-}
-
static int release_locality(struct tpm_chip *chip, int l)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
- unsigned long stop, timeout;
- long rc;
tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
- stop = jiffies + chip->timeout_a;
-
- if (chip->flags & TPM_CHIP_FLAG_IRQ) {
-again:
- timeout = stop - jiffies;
- if ((long)timeout <= 0)
- return -1;
-
- rc = wait_event_interruptible_timeout(priv->int_queue,
- (locality_inactive(chip, l)),
- timeout);
-
- if (rc > 0)
- return 0;
-
- if (rc == -ERESTARTSYS && freezing(current)) {
- clear_thread_flag(TIF_SIGPENDING);
- goto again;
- }
- } else {
- do {
- if (locality_inactive(chip, l))
- return 0;
- tpm_msleep(TPM_TIMEOUT);
- } while (time_before(jiffies, stop));
- }
- return -1;
+ return 0;
}
static int request_locality(struct tpm_chip *chip, int l)
@@ -437,6 +393,9 @@ static void disable_interrupts(struct tpm_chip *chip)
u32 intmask;
int rc;
+ if (priv->irq == 0)
+ return;
+
rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
if (rc < 0)
intmask = 0;
@@ -984,9 +943,12 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
if (irq) {
tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
irq);
- if (!(chip->flags & TPM_CHIP_FLAG_IRQ))
+ if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
dev_err(&chip->dev, FW_BUG
"TPM interrupt not working, polling instead\n");
+
+ disable_interrupts(chip);
+ }
} else {
tpm_tis_probe_irq(chip, intmask);
}
@@ -1001,7 +963,7 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
return 0;
out_err:
- if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL))
+ if (chip->ops->clk_enable != NULL)
chip->ops->clk_enable(chip, false);
tpm_tis_remove(chip);
diff --git a/drivers/char/tpm/tpmrm-dev.c b/drivers/char/tpm/tpmrm-dev.c
index 1a0e97a5da5a..162fb16243d0 100644
--- a/drivers/char/tpm/tpmrm-dev.c
+++ b/drivers/char/tpm/tpmrm-dev.c
@@ -22,7 +22,7 @@ static int tpmrm_open(struct inode *inode, struct file *file)
if (priv == NULL)
return -ENOMEM;
- rc = tpm2_init_space(&priv->space);
+ rc = tpm2_init_space(&priv->space, TPM2_SPACE_BUFFER_SIZE);
if (rc) {
kfree(priv);
return -ENOMEM;
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
index 774748497ace..e56ac5adb5fc 100644
--- a/drivers/char/ttyprintk.c
+++ b/drivers/char/ttyprintk.c
@@ -159,12 +159,23 @@ static int tpk_ioctl(struct tty_struct *tty,
return 0;
}
+/*
+ * TTY operations hangup function.
+ */
+static void tpk_hangup(struct tty_struct *tty)
+{
+ struct ttyprintk_port *tpkp = tty->driver_data;
+
+ tty_port_hangup(&tpkp->port);
+}
+
static const struct tty_operations ttyprintk_ops = {
.open = tpk_open,
.close = tpk_close,
.write = tpk_write,
.write_room = tpk_write_room,
.ioctl = tpk_ioctl,
+ .hangup = tpk_hangup,
};
static const struct tty_port_operations null_ops = { };
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index b353a5e5f8b1..ca71ee939533 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -2142,6 +2142,7 @@ static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
{ 0 },
};
+MODULE_DEVICE_TABLE(virtio, id_table);
static unsigned int features[] = {
VIRTIO_CONSOLE_F_SIZE,
@@ -2154,6 +2155,7 @@ static struct virtio_device_id rproc_serial_id_table[] = {
#endif
{ 0 },
};
+MODULE_DEVICE_TABLE(virtio, rproc_serial_id_table);
static unsigned int rproc_serial_features[] = {
};
@@ -2306,6 +2308,5 @@ static void __exit fini(void)
module_init(init);
module_exit(fini);
-MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio console driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
index 90988e7a5b47..2e7da9b379d4 100644
--- a/drivers/clk/at91/clk-main.c
+++ b/drivers/clk/at91/clk-main.c
@@ -517,12 +517,17 @@ static int clk_sam9x5_main_set_parent(struct clk_hw *hw, u8 index)
return -EINVAL;
regmap_read(regmap, AT91_CKGR_MOR, &tmp);
- tmp &= ~MOR_KEY_MASK;
if (index && !(tmp & AT91_PMC_MOSCSEL))
- regmap_write(regmap, AT91_CKGR_MOR, tmp | AT91_PMC_MOSCSEL);
+ tmp = AT91_PMC_MOSCSEL;
else if (!index && (tmp & AT91_PMC_MOSCSEL))
- regmap_write(regmap, AT91_CKGR_MOR, tmp & ~AT91_PMC_MOSCSEL);
+ tmp = 0;
+ else
+ return 0;
+
+ regmap_update_bits(regmap, AT91_CKGR_MOR,
+ AT91_PMC_MOSCSEL | MOR_KEY_MASK,
+ tmp | AT91_PMC_KEY);
while (!clk_sam9x5_main_ready(regmap))
cpu_relax();
diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
index 791770a563fc..6fac6383d024 100644
--- a/drivers/clk/at91/clk-usb.c
+++ b/drivers/clk/at91/clk-usb.c
@@ -78,6 +78,9 @@ static int at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw,
tmp_parent_rate = req->rate * div;
tmp_parent_rate = clk_hw_round_rate(parent,
tmp_parent_rate);
+ if (!tmp_parent_rate)
+ continue;
+
tmp_rate = DIV_ROUND_CLOSEST(tmp_parent_rate, div);
if (tmp_rate < req->rate)
tmp_diff = req->rate - tmp_rate;
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 7bef0666ae7e..e4fee233849d 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -1319,8 +1319,10 @@ static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman,
pll->hw.init = &init;
ret = devm_clk_hw_register(cprman->dev, &pll->hw);
- if (ret)
+ if (ret) {
+ kfree(pll);
return NULL;
+ }
return &pll->hw;
}
@@ -1447,13 +1449,13 @@ static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman,
return &clock->hw;
}
-static struct clk *bcm2835_register_gate(struct bcm2835_cprman *cprman,
+static struct clk_hw *bcm2835_register_gate(struct bcm2835_cprman *cprman,
const struct bcm2835_gate_data *data)
{
- return clk_register_gate(cprman->dev, data->name, data->parent,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
- cprman->regs + data->ctl_reg,
- CM_GATE_BIT, 0, &cprman->regs_lock);
+ return clk_hw_register_gate(cprman->dev, data->name, data->parent,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
+ cprman->regs + data->ctl_reg,
+ CM_GATE_BIT, 0, &cprman->regs_lock);
}
typedef struct clk_hw *(*bcm2835_clk_register)(struct bcm2835_cprman *cprman,
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 4080d4e78e8e..f3aaefafba89 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -211,6 +211,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
return ret;
err_reg:
+ of_node_put(s2mps11_clks[0].clk_np);
while (--i >= 0)
clkdev_drop(s2mps11_clks[i].lookup);
diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
index a985bf5e1ac6..c65d30bba700 100644
--- a/drivers/clk/clk-scmi.c
+++ b/drivers/clk/clk-scmi.c
@@ -103,6 +103,8 @@ static const struct clk_ops scmi_clk_ops = {
static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk)
{
int ret;
+ unsigned long min_rate, max_rate;
+
struct clk_init_data init = {
.flags = CLK_GET_RATE_NOCACHE,
.num_parents = 0,
@@ -112,9 +114,23 @@ static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk)
sclk->hw.init = &init;
ret = devm_clk_hw_register(dev, &sclk->hw);
- if (!ret)
- clk_hw_set_rate_range(&sclk->hw, sclk->info->range.min_rate,
- sclk->info->range.max_rate);
+ if (ret)
+ return ret;
+
+ if (sclk->info->rate_discrete) {
+ int num_rates = sclk->info->list.num_rates;
+
+ if (num_rates <= 0)
+ return -EINVAL;
+
+ min_rate = sclk->info->list.rates[0];
+ max_rate = sclk->info->list.rates[num_rates - 1];
+ } else {
+ min_rate = sclk->info->range.min_rate;
+ max_rate = sclk->info->range.max_rate;
+ }
+
+ clk_hw_set_rate_range(&sclk->hw, min_rate, max_rate);
return ret;
}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 71621a171f8a..3806fd8fef0b 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -40,6 +40,17 @@ static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
+static struct hlist_head *all_lists[] = {
+ &clk_root_list,
+ &clk_orphan_list,
+ NULL,
+};
+
+static struct hlist_head *orphan_list[] = {
+ &clk_orphan_list,
+ NULL,
+};
+
/*** private data structures ***/
struct clk_core {
@@ -101,7 +112,11 @@ static int clk_pm_runtime_get(struct clk_core *core)
return 0;
ret = pm_runtime_get_sync(core->dev);
- return ret < 0 ? ret : 0;
+ if (ret < 0) {
+ pm_runtime_put_noidle(core->dev);
+ return ret;
+ }
+ return 0;
}
static void clk_pm_runtime_put(struct clk_core *core)
@@ -2614,17 +2629,6 @@ static int inited = 0;
static DEFINE_MUTEX(clk_debug_lock);
static HLIST_HEAD(clk_debug_list);
-static struct hlist_head *all_lists[] = {
- &clk_root_list,
- &clk_orphan_list,
- NULL,
-};
-
-static struct hlist_head *orphan_list[] = {
- &clk_orphan_list,
- NULL,
-};
-
static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
int level)
{
@@ -3105,6 +3109,9 @@ static int __clk_core_init(struct clk_core *core)
out:
clk_pm_runtime_put(core);
unlock:
+ if (ret)
+ hlist_del_init(&core->child_node);
+
clk_prepare_unlock();
if (!ret)
@@ -3321,6 +3328,34 @@ static const struct clk_ops clk_nodrv_ops = {
.set_parent = clk_nodrv_set_parent,
};
+static void clk_core_evict_parent_cache_subtree(struct clk_core *root,
+ struct clk_core *target)
+{
+ int i;
+ struct clk_core *child;
+
+ for (i = 0; i < root->num_parents; i++)
+ if (root->parents[i] == target)
+ root->parents[i] = NULL;
+
+ hlist_for_each_entry(child, &root->children, child_node)
+ clk_core_evict_parent_cache_subtree(child, target);
+}
+
+/* Remove this clk from all parent caches */
+static void clk_core_evict_parent_cache(struct clk_core *core)
+{
+ struct hlist_head **lists;
+ struct clk_core *root;
+
+ lockdep_assert_held(&prepare_lock);
+
+ for (lists = all_lists; *lists; lists++)
+ hlist_for_each_entry(root, *lists, child_node)
+ clk_core_evict_parent_cache_subtree(root, core);
+
+}
+
/**
* clk_unregister - unregister a currently registered clock
* @clk: clock to unregister
@@ -3359,6 +3394,8 @@ void clk_unregister(struct clk *clk)
clk_core_set_parent_nolock(child, NULL);
}
+ clk_core_evict_parent_cache(clk->core);
+
hlist_del_init(&clk->core->child_node);
if (clk->core->prepare_count)
@@ -3591,20 +3628,19 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
/* search the list of notifiers for this clk */
list_for_each_entry(cn, &clk_notifier_list, node)
if (cn->clk == clk)
- break;
+ goto found;
/* if clk wasn't in the notifier list, allocate new clk_notifier */
- if (cn->clk != clk) {
- cn = kzalloc(sizeof(*cn), GFP_KERNEL);
- if (!cn)
- goto out;
+ cn = kzalloc(sizeof(*cn), GFP_KERNEL);
+ if (!cn)
+ goto out;
- cn->clk = clk;
- srcu_init_notifier_head(&cn->notifier_head);
+ cn->clk = clk;
+ srcu_init_notifier_head(&cn->notifier_head);
- list_add(&cn->node, &clk_notifier_list);
- }
+ list_add(&cn->node, &clk_notifier_list);
+found:
ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
clk->core->notifier_count++;
@@ -3629,32 +3665,28 @@ EXPORT_SYMBOL_GPL(clk_notifier_register);
*/
int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
{
- struct clk_notifier *cn = NULL;
- int ret = -EINVAL;
+ struct clk_notifier *cn;
+ int ret = -ENOENT;
if (!clk || !nb)
return -EINVAL;
clk_prepare_lock();
- list_for_each_entry(cn, &clk_notifier_list, node)
- if (cn->clk == clk)
- break;
-
- if (cn->clk == clk) {
- ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
+ list_for_each_entry(cn, &clk_notifier_list, node) {
+ if (cn->clk == clk) {
+ ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
- clk->core->notifier_count--;
+ clk->core->notifier_count--;
- /* XXX the notifier code should handle this better */
- if (!cn->notifier_head.head) {
- srcu_cleanup_notifier_head(&cn->notifier_head);
- list_del(&cn->node);
- kfree(cn);
+ /* XXX the notifier code should handle this better */
+ if (!cn->notifier_head.head) {
+ srcu_cleanup_notifier_head(&cn->notifier_head);
+ list_del(&cn->node);
+ kfree(cn);
+ }
+ break;
}
-
- } else {
- ret = -ENOENT;
}
clk_prepare_unlock();
diff --git a/drivers/clk/davinci/pll.c b/drivers/clk/davinci/pll.c
index 1c99e992d638..796b428998ae 100644
--- a/drivers/clk/davinci/pll.c
+++ b/drivers/clk/davinci/pll.c
@@ -491,7 +491,7 @@ struct clk *davinci_pll_clk_register(struct device *dev,
parent_name = postdiv_name;
}
- pllen = kzalloc(sizeof(*pllout), GFP_KERNEL);
+ pllen = kzalloc(sizeof(*pllen), GFP_KERNEL);
if (!pllen) {
ret = -ENOMEM;
goto err_unregister_postdiv;
diff --git a/drivers/clk/ingenic/jz4770-cgu.c b/drivers/clk/ingenic/jz4770-cgu.c
index bf46a0df2004..e3057bb5ffd8 100644
--- a/drivers/clk/ingenic/jz4770-cgu.c
+++ b/drivers/clk/ingenic/jz4770-cgu.c
@@ -436,8 +436,10 @@ static void __init jz4770_cgu_init(struct device_node *np)
cgu = ingenic_cgu_new(jz4770_cgu_clocks,
ARRAY_SIZE(jz4770_cgu_clocks), np);
- if (!cgu)
+ if (!cgu) {
pr_err("%s: failed to initialise CGU\n", __func__);
+ return;
+ }
retval = ingenic_cgu_register_clocks(cgu);
if (retval)
diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
index 3e04617ac47f..6fdad22a583d 100644
--- a/drivers/clk/meson/clk-pll.c
+++ b/drivers/clk/meson/clk-pll.c
@@ -197,7 +197,7 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
if (parent_rate == 0 || rate == 0)
return -EINVAL;
- old_rate = rate;
+ old_rate = clk_hw_get_rate(hw);
pllt = meson_clk_get_pll_settings(rate, pll);
if (!pllt)
diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
index 499f5962c8b0..5d10733d6c04 100644
--- a/drivers/clk/mvebu/armada-37xx-periph.c
+++ b/drivers/clk/mvebu/armada-37xx-periph.c
@@ -74,6 +74,7 @@ struct clk_pm_cpu {
void __iomem *reg_div;
u8 shift_div;
struct regmap *nb_pm_base;
+ unsigned long l1_expiration;
};
#define to_clk_double_div(_hw) container_of(_hw, struct clk_double_div, hw)
@@ -428,33 +429,6 @@ static u8 clk_pm_cpu_get_parent(struct clk_hw *hw)
return val;
}
-static int clk_pm_cpu_set_parent(struct clk_hw *hw, u8 index)
-{
- struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
- struct regmap *base = pm_cpu->nb_pm_base;
- int load_level;
-
- /*
- * We set the clock parent only if the DVFS is available but
- * not enabled.
- */
- if (IS_ERR(base) || armada_3700_pm_dvfs_is_enabled(base))
- return -EINVAL;
-
- /* Set the parent clock for all the load level */
- for (load_level = 0; load_level < LOAD_LEVEL_NR; load_level++) {
- unsigned int reg, mask, val,
- offset = ARMADA_37XX_NB_TBG_SEL_OFF;
-
- armada_3700_pm_dvfs_update_regs(load_level, &reg, &offset);
-
- val = index << offset;
- mask = ARMADA_37XX_NB_TBG_SEL_MASK << offset;
- regmap_update_bits(base, reg, mask, val);
- }
- return 0;
-}
-
static unsigned long clk_pm_cpu_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -502,8 +476,10 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
}
/*
- * Switching the CPU from the L2 or L3 frequencies (300 and 200 Mhz
- * respectively) to L0 frequency (1.2 Ghz) requires a significant
+ * Workaround when base CPU frequnecy is 1000 or 1200 MHz
+ *
+ * Switching the CPU from the L2 or L3 frequencies (250/300 or 200 MHz
+ * respectively) to L0 frequency (1/1.2 GHz) requires a significant
* amount of time to let VDD stabilize to the appropriate
* voltage. This amount of time is large enough that it cannot be
* covered by the hardware countdown register. Due to this, the CPU
@@ -513,26 +489,56 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
* To work around this problem, we prevent switching directly from the
* L2/L3 frequencies to the L0 frequency, and instead switch to the L1
* frequency in-between. The sequence therefore becomes:
- * 1. First switch from L2/L3(200/300MHz) to L1(600MHZ)
+ * 1. First switch from L2/L3 (200/250/300 MHz) to L1 (500/600 MHz)
* 2. Sleep 20ms for stabling VDD voltage
- * 3. Then switch from L1(600MHZ) to L0(1200Mhz).
+ * 3. Then switch from L1 (500/600 MHz) to L0 (1000/1200 MHz).
*/
-static void clk_pm_cpu_set_rate_wa(unsigned long rate, struct regmap *base)
+static void clk_pm_cpu_set_rate_wa(struct clk_pm_cpu *pm_cpu,
+ unsigned int new_level, unsigned long rate,
+ struct regmap *base)
{
unsigned int cur_level;
- if (rate != 1200 * 1000 * 1000)
- return;
-
regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level);
cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
- if (cur_level <= ARMADA_37XX_DVFS_LOAD_1)
+
+ if (cur_level == new_level)
+ return;
+
+ /*
+ * System wants to go to L1 on its own. If we are going from L2/L3,
+ * remember when 20ms will expire. If from L0, set the value so that
+ * next switch to L0 won't have to wait.
+ */
+ if (new_level == ARMADA_37XX_DVFS_LOAD_1) {
+ if (cur_level == ARMADA_37XX_DVFS_LOAD_0)
+ pm_cpu->l1_expiration = jiffies;
+ else
+ pm_cpu->l1_expiration = jiffies + msecs_to_jiffies(20);
return;
+ }
+
+ /*
+ * If we are setting to L2/L3, just invalidate L1 expiration time,
+ * sleeping is not needed.
+ */
+ if (rate < 1000*1000*1000)
+ goto invalidate_l1_exp;
+
+ /*
+ * We are going to L0 with rate >= 1GHz. Check whether we have been at
+ * L1 for long enough time. If not, go to L1 for 20ms.
+ */
+ if (pm_cpu->l1_expiration && jiffies >= pm_cpu->l1_expiration)
+ goto invalidate_l1_exp;
regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD,
ARMADA_37XX_NB_CPU_LOAD_MASK,
ARMADA_37XX_DVFS_LOAD_1);
msleep(20);
+
+invalidate_l1_exp:
+ pm_cpu->l1_expiration = 0;
}
static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -566,7 +572,9 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
reg = ARMADA_37XX_NB_CPU_LOAD;
mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
- clk_pm_cpu_set_rate_wa(rate, base);
+ /* Apply workaround when base CPU frequency is 1000 or 1200 MHz */
+ if (parent_rate >= 1000*1000*1000)
+ clk_pm_cpu_set_rate_wa(pm_cpu, load_level, rate, base);
regmap_update_bits(base, reg, mask, load_level);
@@ -580,7 +588,6 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_pm_cpu_ops = {
.get_parent = clk_pm_cpu_get_parent,
- .set_parent = clk_pm_cpu_set_parent,
.round_rate = clk_pm_cpu_round_rate,
.set_rate = clk_pm_cpu_set_rate,
.recalc_rate = clk_pm_cpu_recalc_rate,
diff --git a/drivers/clk/mvebu/armada-37xx-xtal.c b/drivers/clk/mvebu/armada-37xx-xtal.c
index 612d65ede10a..5370514959e1 100644
--- a/drivers/clk/mvebu/armada-37xx-xtal.c
+++ b/drivers/clk/mvebu/armada-37xx-xtal.c
@@ -15,8 +15,8 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#define NB_GPIO1_LATCH 0xC
-#define XTAL_MODE BIT(31)
+#define NB_GPIO1_LATCH 0x8
+#define XTAL_MODE BIT(9)
static int armada_3700_xtal_clock_probe(struct platform_device *pdev)
{
diff --git a/drivers/clk/qcom/a53-pll.c b/drivers/clk/qcom/a53-pll.c
index 45cfc57bff92..af6ac17c7dae 100644
--- a/drivers/clk/qcom/a53-pll.c
+++ b/drivers/clk/qcom/a53-pll.c
@@ -93,6 +93,7 @@ static const struct of_device_id qcom_a53pll_match_table[] = {
{ .compatible = "qcom,msm8916-a53pll" },
{ }
};
+MODULE_DEVICE_TABLE(of, qcom_a53pll_match_table);
static struct platform_driver qcom_a53pll_driver = {
.probe = qcom_a53pll_probe,
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index ac2b0aa1e8b5..03e0ade7a6f3 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -268,7 +268,7 @@ static struct clk_pll gpll0 = {
.l_reg = 0x21004,
.m_reg = 0x21008,
.n_reg = 0x2100c,
- .config_reg = 0x21014,
+ .config_reg = 0x21010,
.mode_reg = 0x21000,
.status_reg = 0x2101c,
.status_bit = 17,
@@ -295,7 +295,7 @@ static struct clk_pll gpll1 = {
.l_reg = 0x20004,
.m_reg = 0x20008,
.n_reg = 0x2000c,
- .config_reg = 0x20014,
+ .config_reg = 0x20010,
.mode_reg = 0x20000,
.status_reg = 0x2001c,
.status_bit = 17,
@@ -322,7 +322,7 @@ static struct clk_pll gpll2 = {
.l_reg = 0x4a004,
.m_reg = 0x4a008,
.n_reg = 0x4a00c,
- .config_reg = 0x4a014,
+ .config_reg = 0x4a010,
.mode_reg = 0x4a000,
.status_reg = 0x4a01c,
.status_bit = 17,
@@ -349,7 +349,7 @@ static struct clk_pll bimc_pll = {
.l_reg = 0x23004,
.m_reg = 0x23008,
.n_reg = 0x2300c,
- .config_reg = 0x23014,
+ .config_reg = 0x23010,
.mode_reg = 0x23000,
.status_reg = 0x2301c,
.status_bit = 17,
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
index 772a08101ddf..0ccd6b79cb5e 100644
--- a/drivers/clk/qcom/gcc-msm8998.c
+++ b/drivers/clk/qcom/gcc-msm8998.c
@@ -124,7 +124,7 @@ static struct pll_vco fabia_vco[] = {
static struct clk_alpha_pll gpll0 = {
.offset = 0x0,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.vco_table = fabia_vco,
.num_vco = ARRAY_SIZE(fabia_vco),
.clkr = {
@@ -134,58 +134,58 @@ static struct clk_alpha_pll gpll0 = {
.name = "gpll0",
.parent_names = (const char *[]){ "xo" },
.num_parents = 1,
- .ops = &clk_alpha_pll_ops,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
}
},
};
static struct clk_alpha_pll_postdiv gpll0_out_even = {
.offset = 0x0,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_even",
.parent_names = (const char *[]){ "gpll0" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll0_out_main = {
.offset = 0x0,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_main",
.parent_names = (const char *[]){ "gpll0" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll0_out_odd = {
.offset = 0x0,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_odd",
.parent_names = (const char *[]){ "gpll0" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll0_out_test = {
.offset = 0x0,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_test",
.parent_names = (const char *[]){ "gpll0" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll gpll1 = {
.offset = 0x1000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.vco_table = fabia_vco,
.num_vco = ARRAY_SIZE(fabia_vco),
.clkr = {
@@ -195,58 +195,58 @@ static struct clk_alpha_pll gpll1 = {
.name = "gpll1",
.parent_names = (const char *[]){ "xo" },
.num_parents = 1,
- .ops = &clk_alpha_pll_ops,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
}
},
};
static struct clk_alpha_pll_postdiv gpll1_out_even = {
.offset = 0x1000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll1_out_even",
.parent_names = (const char *[]){ "gpll1" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll1_out_main = {
.offset = 0x1000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll1_out_main",
.parent_names = (const char *[]){ "gpll1" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll1_out_odd = {
.offset = 0x1000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll1_out_odd",
.parent_names = (const char *[]){ "gpll1" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll1_out_test = {
.offset = 0x1000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll1_out_test",
.parent_names = (const char *[]){ "gpll1" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll gpll2 = {
.offset = 0x2000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.vco_table = fabia_vco,
.num_vco = ARRAY_SIZE(fabia_vco),
.clkr = {
@@ -256,58 +256,58 @@ static struct clk_alpha_pll gpll2 = {
.name = "gpll2",
.parent_names = (const char *[]){ "xo" },
.num_parents = 1,
- .ops = &clk_alpha_pll_ops,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
}
},
};
static struct clk_alpha_pll_postdiv gpll2_out_even = {
.offset = 0x2000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll2_out_even",
.parent_names = (const char *[]){ "gpll2" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll2_out_main = {
.offset = 0x2000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll2_out_main",
.parent_names = (const char *[]){ "gpll2" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll2_out_odd = {
.offset = 0x2000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll2_out_odd",
.parent_names = (const char *[]){ "gpll2" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll2_out_test = {
.offset = 0x2000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll2_out_test",
.parent_names = (const char *[]){ "gpll2" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll gpll3 = {
.offset = 0x3000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.vco_table = fabia_vco,
.num_vco = ARRAY_SIZE(fabia_vco),
.clkr = {
@@ -317,58 +317,58 @@ static struct clk_alpha_pll gpll3 = {
.name = "gpll3",
.parent_names = (const char *[]){ "xo" },
.num_parents = 1,
- .ops = &clk_alpha_pll_ops,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
}
},
};
static struct clk_alpha_pll_postdiv gpll3_out_even = {
.offset = 0x3000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll3_out_even",
.parent_names = (const char *[]){ "gpll3" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll3_out_main = {
.offset = 0x3000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll3_out_main",
.parent_names = (const char *[]){ "gpll3" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll3_out_odd = {
.offset = 0x3000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll3_out_odd",
.parent_names = (const char *[]){ "gpll3" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll3_out_test = {
.offset = 0x3000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll3_out_test",
.parent_names = (const char *[]){ "gpll3" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll gpll4 = {
.offset = 0x77000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.vco_table = fabia_vco,
.num_vco = ARRAY_SIZE(fabia_vco),
.clkr = {
@@ -378,52 +378,52 @@ static struct clk_alpha_pll gpll4 = {
.name = "gpll4",
.parent_names = (const char *[]){ "xo" },
.num_parents = 1,
- .ops = &clk_alpha_pll_ops,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
}
},
};
static struct clk_alpha_pll_postdiv gpll4_out_even = {
.offset = 0x77000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4_out_even",
.parent_names = (const char *[]){ "gpll4" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll4_out_main = {
.offset = 0x77000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4_out_main",
.parent_names = (const char *[]){ "gpll4" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll4_out_odd = {
.offset = 0x77000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4_out_odd",
.parent_names = (const char *[]){ "gpll4" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll4_out_test = {
.offset = 0x77000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4_out_test",
.parent_names = (const char *[]){ "gpll4" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
index 6d2b56891559..6e03b467395b 100644
--- a/drivers/clk/renesas/r9a06g032-clocks.c
+++ b/drivers/clk/renesas/r9a06g032-clocks.c
@@ -51,7 +51,7 @@ struct r9a06g032_clkdesc {
u16 sel, g1, r1, g2, r2;
} dual;
};
-} __packed;
+};
#define I_GATE(_clk, _rst, _rdy, _midle, _scon, _mirack, _mistat) \
{ .gate = _clk, .reset = _rst, \
diff --git a/drivers/clk/rockchip/clk-half-divider.c b/drivers/clk/rockchip/clk-half-divider.c
index b8da6e799423..6a371d05218d 100644
--- a/drivers/clk/rockchip/clk-half-divider.c
+++ b/drivers/clk/rockchip/clk-half-divider.c
@@ -166,7 +166,7 @@ struct clk *rockchip_clk_register_halfdiv(const char *name,
unsigned long flags,
spinlock_t *lock)
{
- struct clk *clk;
+ struct clk *clk = ERR_PTR(-ENOMEM);
struct clk_mux *mux = NULL;
struct clk_gate *gate = NULL;
struct clk_divider *div = NULL;
diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
index 7af48184b022..8d11d76e1db7 100644
--- a/drivers/clk/rockchip/clk-rk3228.c
+++ b/drivers/clk/rockchip/clk-rk3228.c
@@ -144,7 +144,7 @@ PNAME(mux_usb480m_p) = { "usb480m_phy", "xin24m" };
PNAME(mux_hdmiphy_p) = { "hdmiphy_phy", "xin24m" };
PNAME(mux_aclk_cpu_src_p) = { "cpll_aclk_cpu", "gpll_aclk_cpu", "hdmiphy_aclk_cpu" };
-PNAME(mux_pll_src_4plls_p) = { "cpll", "gpll", "hdmiphy" "usb480m" };
+PNAME(mux_pll_src_4plls_p) = { "cpll", "gpll", "hdmiphy", "usb480m" };
PNAME(mux_pll_src_3plls_p) = { "cpll", "gpll", "hdmiphy" };
PNAME(mux_pll_src_2plls_p) = { "cpll", "gpll" };
PNAME(mux_sclk_hdmi_cec_p) = { "cpll", "gpll", "xin24m" };
@@ -163,8 +163,6 @@ PNAME(mux_i2s_out_p) = { "i2s1_pre", "xin12m" };
PNAME(mux_i2s2_p) = { "i2s2_src", "i2s2_frac", "xin12m" };
PNAME(mux_sclk_spdif_p) = { "sclk_spdif_src", "spdif_frac", "xin12m" };
-PNAME(mux_aclk_gpu_pre_p) = { "cpll_gpu", "gpll_gpu", "hdmiphy_gpu", "usb480m_gpu" };
-
PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" };
PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" };
PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" };
@@ -475,16 +473,9 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
RK2928_CLKSEL_CON(24), 6, 10, DFLAGS,
RK2928_CLKGATE_CON(2), 8, GFLAGS),
- GATE(0, "cpll_gpu", "cpll", 0,
- RK2928_CLKGATE_CON(3), 13, GFLAGS),
- GATE(0, "gpll_gpu", "gpll", 0,
- RK2928_CLKGATE_CON(3), 13, GFLAGS),
- GATE(0, "hdmiphy_gpu", "hdmiphy", 0,
- RK2928_CLKGATE_CON(3), 13, GFLAGS),
- GATE(0, "usb480m_gpu", "usb480m", 0,
+ COMPOSITE(0, "aclk_gpu_pre", mux_pll_src_4plls_p, 0,
+ RK2928_CLKSEL_CON(34), 5, 2, MFLAGS, 0, 5, DFLAGS,
RK2928_CLKGATE_CON(3), 13, GFLAGS),
- COMPOSITE_NOGATE(0, "aclk_gpu_pre", mux_aclk_gpu_pre_p, 0,
- RK2928_CLKSEL_CON(34), 5, 2, MFLAGS, 0, 5, DFLAGS),
COMPOSITE(SCLK_SPI0, "sclk_spi0", mux_pll_src_2plls_p, 0,
RK2928_CLKSEL_CON(25), 8, 1, MFLAGS, 0, 7, DFLAGS,
@@ -589,8 +580,8 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
GATE(0, "pclk_peri_noc", "pclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(12), 2, GFLAGS),
/* PD_GPU */
- GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(13), 14, GFLAGS),
- GATE(0, "aclk_gpu_noc", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(13), 15, GFLAGS),
+ GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(7), 14, GFLAGS),
+ GATE(0, "aclk_gpu_noc", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(7), 15, GFLAGS),
/* PD_BUS */
GATE(0, "sclk_initmem_mbist", "aclk_cpu", 0, RK2928_CLKGATE_CON(8), 1, GFLAGS),
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 442309b56920..8086756e7f07 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -1072,7 +1072,7 @@ static const struct samsung_gate_clock exynos4210_gate_clks[] __initconst = {
GATE(CLK_PCIE, "pcie", "aclk133", GATE_IP_FSYS, 14, 0, 0),
GATE(CLK_SMMU_PCIE, "smmu_pcie", "aclk133", GATE_IP_FSYS, 18, 0, 0),
GATE(CLK_MODEMIF, "modemif", "aclk100", GATE_IP_PERIL, 28, 0, 0),
- GATE(CLK_CHIPID, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0),
+ GATE(CLK_CHIPID, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, CLK_IGNORE_UNUSED, 0),
GATE(CLK_SYSREG, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0,
CLK_IGNORE_UNUSED, 0),
GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4210_GATE_IP_PERIR, 11, 0,
@@ -1113,7 +1113,7 @@ static const struct samsung_gate_clock exynos4x12_gate_clks[] __initconst = {
0),
GATE(CLK_TSADC, "tsadc", "aclk133", E4X12_GATE_BUS_FSYS1, 16, 0, 0),
GATE(CLK_MIPI_HSI, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0),
- GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0),
+ GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, CLK_IGNORE_UNUSED, 0),
GATE(CLK_SYSREG, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1,
CLK_IGNORE_UNUSED, 0),
GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4X12_GATE_IP_PERIR, 11, 0,
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 6473af8903c5..662bb441f139 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -597,7 +597,7 @@ static const struct samsung_div_clock exynos5800_div_clks[] __initconst = {
static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = {
GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam",
- GATE_BUS_TOP, 24, 0, 0),
+ GATE_BUS_TOP, 24, CLK_IS_CRITICAL, 0),
GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler",
GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0),
GATE(CLK_MAU_EPLL, "mau_epll", "mout_user_mau_epll",
@@ -984,25 +984,25 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
GATE(0, "aclk300_jpeg", "mout_user_aclk300_jpeg",
GATE_BUS_TOP, 4, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk333_432_isp0", "mout_user_aclk333_432_isp0",
- GATE_BUS_TOP, 5, 0, 0),
+ GATE_BUS_TOP, 5, CLK_IS_CRITICAL, 0),
GATE(0, "aclk300_gscl", "mout_user_aclk300_gscl",
GATE_BUS_TOP, 6, CLK_IS_CRITICAL, 0),
GATE(0, "aclk333_432_gscl", "mout_user_aclk333_432_gscl",
GATE_BUS_TOP, 7, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk333_432_isp", "mout_user_aclk333_432_isp",
- GATE_BUS_TOP, 8, 0, 0),
+ GATE_BUS_TOP, 8, CLK_IS_CRITICAL, 0),
GATE(CLK_PCLK66_GPIO, "pclk66_gpio", "mout_user_pclk66_gpio",
GATE_BUS_TOP, 9, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk66_psgen", "mout_user_aclk66_psgen",
GATE_BUS_TOP, 10, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk266_isp", "mout_user_aclk266_isp",
- GATE_BUS_TOP, 13, 0, 0),
+ GATE_BUS_TOP, 13, CLK_IS_CRITICAL, 0),
GATE(0, "aclk166", "mout_user_aclk166",
GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0),
GATE(CLK_ACLK333, "aclk333", "mout_user_aclk333",
GATE_BUS_TOP, 15, CLK_IS_CRITICAL, 0),
GATE(0, "aclk400_isp", "mout_user_aclk400_isp",
- GATE_BUS_TOP, 16, 0, 0),
+ GATE_BUS_TOP, 16, CLK_IS_CRITICAL, 0),
GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl",
GATE_BUS_TOP, 17, CLK_IS_CRITICAL, 0),
GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1",
@@ -1208,8 +1208,10 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
GATE_IP_GSCL1, 3, 0, 0),
GATE(CLK_SMMU_FIMCL1, "smmu_fimcl1", "dout_gscl_blk_333",
GATE_IP_GSCL1, 4, 0, 0),
- GATE(CLK_GSCL_WA, "gscl_wa", "sclk_gscl_wa", GATE_IP_GSCL1, 12, 0, 0),
- GATE(CLK_GSCL_WB, "gscl_wb", "sclk_gscl_wb", GATE_IP_GSCL1, 13, 0, 0),
+ GATE(CLK_GSCL_WA, "gscl_wa", "sclk_gscl_wa", GATE_IP_GSCL1, 12,
+ CLK_IS_CRITICAL, 0),
+ GATE(CLK_GSCL_WB, "gscl_wb", "sclk_gscl_wb", GATE_IP_GSCL1, 13,
+ CLK_IS_CRITICAL, 0),
GATE(CLK_SMMU_FIMCL3, "smmu_fimcl3,", "dout_gscl_blk_333",
GATE_IP_GSCL1, 16, 0, 0),
GATE(CLK_FIMC_LITE3, "fimc_lite3", "aclk333_432_gscl",
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index 302596dc79a2..0f2a9d767eef 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -1680,7 +1680,8 @@ static const struct samsung_gate_clock peric_gate_clks[] __initconst = {
GATE(CLK_SCLK_PCM1, "sclk_pcm1", "sclk_pcm1_peric",
ENABLE_SCLK_PERIC, 7, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_I2S1, "sclk_i2s1", "sclk_i2s1_peric",
- ENABLE_SCLK_PERIC, 6, CLK_SET_RATE_PARENT, 0),
+ ENABLE_SCLK_PERIC, 6,
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
GATE(CLK_SCLK_SPI2, "sclk_spi2", "sclk_spi2_peric", ENABLE_SCLK_PERIC,
5, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_SPI1, "sclk_spi1", "sclk_spi1_peric", ENABLE_SCLK_PERIC,
diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
index 492d51691080..9b02980ee929 100644
--- a/drivers/clk/samsung/clk-exynos7.c
+++ b/drivers/clk/samsung/clk-exynos7.c
@@ -541,8 +541,13 @@ static const struct samsung_gate_clock top1_gate_clks[] __initconst = {
GATE(CLK_ACLK_FSYS0_200, "aclk_fsys0_200", "dout_aclk_fsys0_200",
ENABLE_ACLK_TOP13, 28, CLK_SET_RATE_PARENT |
CLK_IS_CRITICAL, 0),
+ /*
+ * This clock is required for the CMU_FSYS1 registers access, keep it
+ * enabled permanently until proper runtime PM support is added.
+ */
GATE(CLK_ACLK_FSYS1_200, "aclk_fsys1_200", "dout_aclk_fsys1_200",
- ENABLE_ACLK_TOP13, 24, CLK_SET_RATE_PARENT, 0),
+ ENABLE_ACLK_TOP13, 24, CLK_SET_RATE_PARENT |
+ CLK_IS_CRITICAL, 0),
GATE(CLK_SCLK_PHY_FSYS1_26M, "sclk_phy_fsys1_26m",
"dout_sclk_phy_fsys1_26m", ENABLE_SCLK_TOP1_FSYS11,
diff --git a/drivers/clk/sirf/clk-atlas6.c b/drivers/clk/sirf/clk-atlas6.c
index 0cd11e6893af..25ed60776560 100644
--- a/drivers/clk/sirf/clk-atlas6.c
+++ b/drivers/clk/sirf/clk-atlas6.c
@@ -136,7 +136,7 @@ static void __init atlas6_clk_init(struct device_node *np)
for (i = pll1; i < maxclk; i++) {
atlas6_clks[i] = clk_register(NULL, atlas6_clk_hw_array[i]);
- BUG_ON(!atlas6_clks[i]);
+ BUG_ON(IS_ERR(atlas6_clks[i]));
}
clk_register_clkdev(atlas6_clks[cpu], NULL, "cpu");
clk_register_clkdev(atlas6_clks[io], NULL, "io");
diff --git a/drivers/clk/socfpga/clk-gate-a10.c b/drivers/clk/socfpga/clk-gate-a10.c
index 36376c542055..637e26babf89 100644
--- a/drivers/clk/socfpga/clk-gate-a10.c
+++ b/drivers/clk/socfpga/clk-gate-a10.c
@@ -157,6 +157,7 @@ static void __init __socfpga_gate_init(struct device_node *node,
if (IS_ERR(socfpga_clk->sys_mgr_base_addr)) {
pr_err("%s: failed to find altr,sys-mgr regmap!\n",
__func__);
+ kfree(socfpga_clk);
return;
}
}
diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
index aa7a6e6a15b6..14918896811d 100644
--- a/drivers/clk/socfpga/clk-gate.c
+++ b/drivers/clk/socfpga/clk-gate.c
@@ -107,7 +107,7 @@ static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk,
val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift;
val &= GENMASK(socfpgaclk->width - 1, 0);
/* Check for GPIO_DB_CLK by its offset */
- if ((int) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET)
+ if ((uintptr_t) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET)
div = val + 1;
else
div = (1 << val);
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
index c4d0b6f6abf2..fc2e2839fe57 100644
--- a/drivers/clk/socfpga/clk-pll-s10.c
+++ b/drivers/clk/socfpga/clk-pll-s10.c
@@ -38,7 +38,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
/* read VCO1 reg for numerator and denominator */
reg = readl(socfpgaclk->hw.reg);
refdiv = (reg & SOCFPGA_PLL_REFDIV_MASK) >> SOCFPGA_PLL_REFDIV_SHIFT;
- vco_freq = (unsigned long long)parent_rate / refdiv;
+
+ vco_freq = parent_rate;
+ do_div(vco_freq, refdiv);
/* Read mdiv and fdiv from the fdbck register */
reg = readl(socfpgaclk->hw.reg + 0x4);
diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
index 5bed36e12951..7327e90735c8 100644
--- a/drivers/clk/socfpga/clk-s10.c
+++ b/drivers/clk/socfpga/clk-s10.c
@@ -107,7 +107,7 @@ static const struct stratix10_perip_cnt_clock s10_main_perip_cnt_clks[] = {
{ STRATIX10_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux),
0, 0, 2, 0xB0, 1},
{ STRATIX10_EMAC_PTP_FREE_CLK, "emac_ptp_free_clk", NULL, emac_ptp_free_mux,
- ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 4, 0xB0, 2},
+ ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 2, 0xB0, 2},
{ STRATIX10_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux,
ARRAY_SIZE(gpio_db_free_mux), 0, 0, 0, 0xB0, 3},
{ STRATIX10_SDMMC_FREE_CLK, "sdmmc_free_clk", NULL, sdmmc_free_mux,
diff --git a/drivers/clk/sprd/pll.c b/drivers/clk/sprd/pll.c
index 640270f51aa5..eb8862752c2b 100644
--- a/drivers/clk/sprd/pll.c
+++ b/drivers/clk/sprd/pll.c
@@ -105,7 +105,7 @@ static unsigned long _sprd_pll_recalc_rate(const struct sprd_pll *pll,
cfg = kcalloc(regs_num, sizeof(*cfg), GFP_KERNEL);
if (!cfg)
- return -ENOMEM;
+ return parent_rate;
for (i = 0; i < regs_num; i++)
cfg[i] = sprd_pll_read(pll, i);
diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c
index 918ba3164da9..cd856210db58 100644
--- a/drivers/clk/st/clk-flexgen.c
+++ b/drivers/clk/st/clk-flexgen.c
@@ -373,6 +373,7 @@ static void __init st_of_flexgen_setup(struct device_node *np)
break;
}
+ flex_flags &= ~CLK_IS_CRITICAL;
of_clk_detect_critical(np, i, &flex_flags);
/*
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
index 9ac6c299e074..19304d6b2c05 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
@@ -381,6 +381,7 @@ static struct clk_div_table ths_div_table[] = {
{ .val = 1, .div = 2 },
{ .val = 2, .div = 4 },
{ .val = 3, .div = 6 },
+ { /* Sentinel */ },
};
static const char * const ths_parents[] = { "osc24M" };
static struct ccu_div ths_clk = {
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
index d425b47cef17..1197ace59124 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
@@ -223,7 +223,7 @@ static const char * const psi_ahb1_ahb2_parents[] = { "osc24M", "osc32k",
static SUNXI_CCU_MP_WITH_MUX(psi_ahb1_ahb2_clk, "psi-ahb1-ahb2",
psi_ahb1_ahb2_parents,
0x510,
- 0, 5, /* M */
+ 0, 2, /* M */
8, 2, /* P */
24, 2, /* mux */
0);
@@ -232,19 +232,19 @@ static const char * const ahb3_apb1_apb2_parents[] = { "osc24M", "osc32k",
"psi-ahb1-ahb2",
"pll-periph0" };
static SUNXI_CCU_MP_WITH_MUX(ahb3_clk, "ahb3", ahb3_apb1_apb2_parents, 0x51c,
- 0, 5, /* M */
+ 0, 2, /* M */
8, 2, /* P */
24, 2, /* mux */
0);
static SUNXI_CCU_MP_WITH_MUX(apb1_clk, "apb1", ahb3_apb1_apb2_parents, 0x520,
- 0, 5, /* M */
+ 0, 2, /* M */
8, 2, /* P */
24, 2, /* mux */
0);
static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", ahb3_apb1_apb2_parents, 0x524,
- 0, 5, /* M */
+ 0, 2, /* M */
8, 2, /* P */
24, 2, /* mux */
0);
@@ -662,7 +662,7 @@ static struct ccu_mux hdmi_cec_clk = {
.common = {
.reg = 0xb10,
- .features = CCU_FEATURE_VARIABLE_PREDIV,
+ .features = CCU_FEATURE_FIXED_PREDIV,
.hw.init = CLK_HW_INIT_PARENTS("hdmi-cec",
hdmi_cec_parents,
&ccu_mux_ops,
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index 61e3ba12773e..d9789378caf5 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -328,6 +328,7 @@ static struct clk_div_table ths_div_table[] = {
{ .val = 1, .div = 2 },
{ .val = 2, .div = 4 },
{ .val = 3, .div = 6 },
+ { /* Sentinel */ },
};
static SUNXI_CCU_DIV_TABLE_WITH_GATE(ths_clk, "ths", "osc24M",
0x074, 0, 2, ths_div_table, BIT(31), 0);
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 004b411b640b..2bd035d48816 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -98,7 +98,7 @@ static void sun6i_a31_get_pll1_factors(struct factors_request *req)
* Round down the frequency to the closest multiple of either
* 6 or 16
*/
- u32 round_freq_6 = round_down(freq_mhz, 6);
+ u32 round_freq_6 = rounddown(freq_mhz, 6);
u32 round_freq_16 = round_down(freq_mhz, 16);
if (round_freq_6 > round_freq_16)
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
index de466b4446da..0efcb200dde5 100644
--- a/drivers/clk/tegra/clk-id.h
+++ b/drivers/clk/tegra/clk-id.h
@@ -233,6 +233,7 @@ enum clk_id {
tegra_clk_sdmmc4,
tegra_clk_sdmmc4_8,
tegra_clk_se,
+ tegra_clk_se_10,
tegra_clk_soc_therm,
tegra_clk_soc_therm_8,
tegra_clk_sor0,
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index b137c5d34eec..9d05fb48686d 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -650,7 +650,7 @@ static struct tegra_periph_init_data periph_clks[] = {
INT8("host1x", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_HOST1X, 28, 0, tegra_clk_host1x_8),
INT8("host1x", mux_pllc4_out1_pllc_pllc4_out2_pllp_clkm_plla_pllc4_out0, CLK_SOURCE_HOST1X, 28, 0, tegra_clk_host1x_9),
INT8("se", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SE, 127, TEGRA_PERIPH_ON_APB, tegra_clk_se),
- INT8("se", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_SE, 127, TEGRA_PERIPH_ON_APB, tegra_clk_se),
+ INT8("se", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_SE, 127, TEGRA_PERIPH_ON_APB, tegra_clk_se_10),
INT8("2d", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_2D, 21, 0, tegra_clk_gr2d_8),
INT8("3d", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_3D, 24, 0, tegra_clk_gr3d_8),
INT8("vic03", mux_pllm_pllc_pllp_plla_pllc2_c3_clkm, CLK_SOURCE_VIC03, 178, 0, tegra_clk_vic03),
diff --git a/drivers/clk/tegra/clk-tegra-pmc.c b/drivers/clk/tegra/clk-tegra-pmc.c
index a35579a3f884..476dab494c44 100644
--- a/drivers/clk/tegra/clk-tegra-pmc.c
+++ b/drivers/clk/tegra/clk-tegra-pmc.c
@@ -60,16 +60,16 @@ struct pmc_clk_init_data {
static DEFINE_SPINLOCK(clk_out_lock);
-static const char *clk_out1_parents[] = { "clk_m", "clk_m_div2",
- "clk_m_div4", "extern1",
+static const char *clk_out1_parents[] = { "osc", "osc_div2",
+ "osc_div4", "extern1",
};
-static const char *clk_out2_parents[] = { "clk_m", "clk_m_div2",
- "clk_m_div4", "extern2",
+static const char *clk_out2_parents[] = { "osc", "osc_div2",
+ "osc_div4", "extern2",
};
-static const char *clk_out3_parents[] = { "clk_m", "clk_m_div2",
- "clk_m_div4", "extern3",
+static const char *clk_out3_parents[] = { "osc", "osc_div2",
+ "osc_div4", "extern3",
};
static struct pmc_clk_init_data pmc_clks[] = {
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index e0aaecd98fbf..678019f86bc7 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -1274,6 +1274,8 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA30_CLK_I2S3_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
{ TEGRA30_CLK_I2S4_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
{ TEGRA30_CLK_VIMCLK_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+ { TEGRA30_CLK_HDA, TEGRA30_CLK_PLL_P, 102000000, 0 },
+ { TEGRA30_CLK_HDA2CODEC_2X, TEGRA30_CLK_PLL_P, 48000000, 0 },
/* must be the last entry */
{ TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0 },
};
diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c
index 688e403333b9..14926e07d09a 100644
--- a/drivers/clk/ti/adpll.c
+++ b/drivers/clk/ti/adpll.c
@@ -193,15 +193,8 @@ static const char *ti_adpll_clk_get_name(struct ti_adpll_data *d,
if (err)
return NULL;
} else {
- const char *base_name = "adpll";
- char *buf;
-
- buf = devm_kzalloc(d->dev, 8 + 1 + strlen(base_name) + 1 +
- strlen(postfix), GFP_KERNEL);
- if (!buf)
- return NULL;
- sprintf(buf, "%08lx.%s.%s", d->pa, base_name, postfix);
- name = buf;
+ name = devm_kasprintf(d->dev, GFP_KERNEL, "%08lx.adpll.%s",
+ d->pa, postfix);
}
return name;
diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c
index 07a805125e98..11d92311e162 100644
--- a/drivers/clk/ti/clockdomain.c
+++ b/drivers/clk/ti/clockdomain.c
@@ -146,10 +146,12 @@ static void __init of_ti_clockdomain_setup(struct device_node *node)
if (clk_hw_get_flags(clk_hw) & CLK_IS_BASIC) {
pr_warn("can't setup clkdm for basic clk %s\n",
__clk_get_name(clk));
+ clk_put(clk);
continue;
}
to_clk_hw_omap(clk_hw)->clkdm_name = clkdm_name;
omap2_init_clk_clkdm(clk_hw);
+ clk_put(clk);
}
}
diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c
index 030e8b2c1050..c6478d0b996b 100644
--- a/drivers/clk/ti/composite.c
+++ b/drivers/clk/ti/composite.c
@@ -196,6 +196,7 @@ cleanup:
if (!cclk->comp_clks[i])
continue;
list_del(&cclk->comp_clks[i]->link);
+ kfree(cclk->comp_clks[i]->parent_names);
kfree(cclk->comp_clks[i]);
}
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
index 071af44b1ba8..e33ce851837e 100644
--- a/drivers/clk/ti/fapll.c
+++ b/drivers/clk/ti/fapll.c
@@ -497,6 +497,7 @@ static struct clk * __init ti_fapll_synth_setup(struct fapll_data *fd,
{
struct clk_init_data *init;
struct fapll_synth *synth;
+ struct clk *clk = ERR_PTR(-ENOMEM);
init = kzalloc(sizeof(*init), GFP_KERNEL);
if (!init)
@@ -519,13 +520,19 @@ static struct clk * __init ti_fapll_synth_setup(struct fapll_data *fd,
synth->hw.init = init;
synth->clk_pll = pll_clk;
- return clk_register(NULL, &synth->hw);
+ clk = clk_register(NULL, &synth->hw);
+ if (IS_ERR(clk)) {
+ pr_err("failed to register clock\n");
+ goto free;
+ }
+
+ return clk;
free:
kfree(synth);
kfree(init);
- return ERR_PTR(-ENOMEM);
+ return clk;
}
static void __init ti_fapll_setup(struct device_node *node)
diff --git a/drivers/clk/uniphier/clk-uniphier-mux.c b/drivers/clk/uniphier/clk-uniphier-mux.c
index 2c243a894f3b..3a52ab968ac2 100644
--- a/drivers/clk/uniphier/clk-uniphier-mux.c
+++ b/drivers/clk/uniphier/clk-uniphier-mux.c
@@ -40,10 +40,10 @@ static int uniphier_clk_mux_set_parent(struct clk_hw *hw, u8 index)
static u8 uniphier_clk_mux_get_parent(struct clk_hw *hw)
{
struct uniphier_clk_mux *mux = to_uniphier_clk_mux(hw);
- int num_parents = clk_hw_get_num_parents(hw);
+ unsigned int num_parents = clk_hw_get_num_parents(hw);
int ret;
unsigned int val;
- u8 i;
+ unsigned int i;
ret = regmap_read(mux->regmap, mux->reg, &val);
if (ret)
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 0445ad7e559e..e67ab217eef4 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -827,15 +827,24 @@ static void arch_timer_evtstrm_enable(int divider)
static void arch_timer_configure_evtstream(void)
{
- int evt_stream_div, pos;
+ int evt_stream_div, lsb;
+
+ /*
+ * As the event stream can at most be generated at half the frequency
+ * of the counter, use half the frequency when computing the divider.
+ */
+ evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ / 2;
+
+ /*
+ * Find the closest power of two to the divisor. If the adjacent bit
+ * of lsb (last set bit, starts from 0) is set, then we use (lsb + 1).
+ */
+ lsb = fls(evt_stream_div) - 1;
+ if (lsb > 0 && (evt_stream_div & BIT(lsb - 1)))
+ lsb++;
- /* Find the closest power of two to the divisor */
- evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
- pos = fls(evt_stream_div);
- if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
- pos--;
/* enable event stream */
- arch_timer_evtstrm_enable(min(pos, 15));
+ arch_timer_evtstrm_enable(max(0, min(lsb, 15)));
}
static void arch_counter_set_user_access(void)
diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c
index 29d51755e18b..a7eb858a84a0 100644
--- a/drivers/clocksource/cadence_ttc_timer.c
+++ b/drivers/clocksource/cadence_ttc_timer.c
@@ -419,10 +419,8 @@ static int __init ttc_setup_clockevent(struct clk *clk,
ttcce->ttc.clk = clk;
err = clk_prepare_enable(ttcce->ttc.clk);
- if (err) {
- kfree(ttcce);
- return err;
- }
+ if (err)
+ goto out_kfree;
ttcce->ttc.clk_rate_change_nb.notifier_call =
ttc_rate_change_clockevent_cb;
@@ -432,7 +430,7 @@ static int __init ttc_setup_clockevent(struct clk *clk,
&ttcce->ttc.clk_rate_change_nb);
if (err) {
pr_warn("Unable to register clock notifier.\n");
- return err;
+ goto out_kfree;
}
ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk);
@@ -461,15 +459,17 @@ static int __init ttc_setup_clockevent(struct clk *clk,
err = request_irq(irq, ttc_clock_event_interrupt,
IRQF_TIMER, ttcce->ce.name, ttcce);
- if (err) {
- kfree(ttcce);
- return err;
- }
+ if (err)
+ goto out_kfree;
clockevents_config_and_register(&ttcce->ce,
ttcce->ttc.freq / PRESCALE, 1, 0xfffe);
return 0;
+
+out_kfree:
+ kfree(ttcce);
+ return err;
}
/**
diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c
index 1f5f734e4919..a018199575e3 100644
--- a/drivers/clocksource/dw_apb_timer.c
+++ b/drivers/clocksource/dw_apb_timer.c
@@ -225,7 +225,8 @@ static int apbt_next_event(unsigned long delta,
/**
* dw_apb_clockevent_init() - use an APB timer as a clock_event_device
*
- * @cpu: The CPU the events will be targeted at.
+ * @cpu: The CPU the events will be targeted at or -1 if CPU affiliation
+ * isn't required.
* @name: The name used for the timer and the IRQ for it.
* @rating: The rating to give the timer.
* @base: I/O base for the timer registers.
@@ -260,7 +261,7 @@ dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
dw_ced->ced.max_delta_ticks = 0x7fffffff;
dw_ced->ced.min_delta_ns = clockevent_delta2ns(5000, &dw_ced->ced);
dw_ced->ced.min_delta_ticks = 5000;
- dw_ced->ced.cpumask = cpumask_of(cpu);
+ dw_ced->ced.cpumask = cpu < 0 ? cpu_possible_mask : cpumask_of(cpu);
dw_ced->ced.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ;
dw_ced->ced.set_state_shutdown = apbt_shutdown;
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index 69866cd8f4bb..3e4d0e5733d3 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -146,10 +146,6 @@ static int num_called;
static int __init dw_apb_timer_init(struct device_node *timer)
{
switch (num_called) {
- case 0:
- pr_debug("%s: found clockevent timer\n", __func__);
- add_clockevent(timer);
- break;
case 1:
pr_debug("%s: found clocksource timer\n", __func__);
add_clocksource(timer);
@@ -160,6 +156,8 @@ static int __init dw_apb_timer_init(struct device_node *timer)
#endif
break;
default:
+ pr_debug("%s: found clockevent timer\n", __func__);
+ add_clockevent(timer);
break;
}
diff --git a/drivers/clocksource/h8300_timer8.c b/drivers/clocksource/h8300_timer8.c
index 1d740a8c42ab..47114c2a7cb5 100644
--- a/drivers/clocksource/h8300_timer8.c
+++ b/drivers/clocksource/h8300_timer8.c
@@ -169,7 +169,7 @@ static int __init h8300_8timer_init(struct device_node *node)
return PTR_ERR(clk);
}
- ret = ENXIO;
+ ret = -ENXIO;
base = of_iomap(node, 0);
if (!base) {
pr_err("failed to map registers for clockevent\n");
diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c
index f6ddae30933f..dae8c0c2e606 100644
--- a/drivers/clocksource/mxs_timer.c
+++ b/drivers/clocksource/mxs_timer.c
@@ -138,10 +138,7 @@ static void mxs_irq_clear(char *state)
/* Clear pending interrupt */
timrot_irq_acknowledge();
-
-#ifdef DEBUG
- pr_info("%s: changing mode to %s\n", __func__, state)
-#endif /* DEBUG */
+ pr_debug("%s: changing mode to %s\n", __func__, state);
}
static int mxs_shutdown(struct clock_event_device *evt)
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index aca30f45172e..9e86404a361f 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -701,7 +701,8 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpumask_copy(policy->cpus, topology_core_cpumask(cpu));
}
- if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
+ if (check_amd_hwpstate_cpu(cpu) && boot_cpu_data.x86 < 0x19 &&
+ !acpi_pstate_strict) {
cpumask_clear(policy->cpus);
cpumask_set_cpu(cpu, policy->cpus);
cpumask_copy(data->freqdomain_cpus,
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index 0df16eb1eb3c..a36452bd9612 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -25,6 +25,10 @@
#include "cpufreq-dt.h"
+/* Clk register set */
+#define ARMADA_37XX_CLK_TBG_SEL 0
+#define ARMADA_37XX_CLK_TBG_SEL_CPU_OFF 22
+
/* Power management in North Bridge register set */
#define ARMADA_37XX_NB_L0L1 0x18
#define ARMADA_37XX_NB_L2L3 0x1C
@@ -69,6 +73,8 @@
#define LOAD_LEVEL_NR 4
#define MIN_VOLT_MV 1000
+#define MIN_VOLT_MV_FOR_L1_1000MHZ 1108
+#define MIN_VOLT_MV_FOR_L1_1200MHZ 1155
/* AVS value for the corresponding voltage (in mV) */
static int avs_map[] = {
@@ -120,10 +126,15 @@ static struct armada_37xx_dvfs *armada_37xx_cpu_freq_info_get(u32 freq)
* will be configured then the DVFS will be enabled.
*/
static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
- struct clk *clk, u8 *divider)
+ struct regmap *clk_base, u8 *divider)
{
+ u32 cpu_tbg_sel;
int load_lvl;
- struct clk *parent;
+
+ /* Determine to which TBG clock is CPU connected */
+ regmap_read(clk_base, ARMADA_37XX_CLK_TBG_SEL, &cpu_tbg_sel);
+ cpu_tbg_sel >>= ARMADA_37XX_CLK_TBG_SEL_CPU_OFF;
+ cpu_tbg_sel &= ARMADA_37XX_NB_TBG_SEL_MASK;
for (load_lvl = 0; load_lvl < LOAD_LEVEL_NR; load_lvl++) {
unsigned int reg, mask, val, offset = 0;
@@ -142,6 +153,11 @@ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
mask = (ARMADA_37XX_NB_CLK_SEL_MASK
<< ARMADA_37XX_NB_CLK_SEL_OFF);
+ /* Set TBG index, for all levels we use the same TBG */
+ val = cpu_tbg_sel << ARMADA_37XX_NB_TBG_SEL_OFF;
+ mask = (ARMADA_37XX_NB_TBG_SEL_MASK
+ << ARMADA_37XX_NB_TBG_SEL_OFF);
+
/*
* Set cpu divider based on the pre-computed array in
* order to have balanced step.
@@ -160,14 +176,6 @@ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
regmap_update_bits(base, reg, mask, val);
}
-
- /*
- * Set cpu clock source, for all the level we keep the same
- * clock source that the one already configured. For this one
- * we need to use the clock framework
- */
- parent = clk_get_parent(clk);
- clk_set_parent(clk, parent);
}
/*
@@ -202,6 +210,8 @@ static u32 armada_37xx_avs_val_match(int target_vm)
* - L2 & L3 voltage should be about 150mv smaller than L0 voltage.
* This function calculates L1 & L2 & L3 AVS values dynamically based
* on L0 voltage and fill all AVS values to the AVS value table.
+ * When base CPU frequency is 1000 or 1200 MHz then there is additional
+ * minimal avs value for load L1.
*/
static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
struct armada_37xx_dvfs *dvfs)
@@ -233,6 +243,19 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
for (load_level = 1; load_level < LOAD_LEVEL_NR; load_level++)
dvfs->avs[load_level] = avs_min;
+ /*
+ * Set the avs values for load L0 and L1 when base CPU frequency
+ * is 1000/1200 MHz to its typical initial values according to
+ * the Armada 3700 Hardware Specifications.
+ */
+ if (dvfs->cpu_freq_max >= 1000*1000*1000) {
+ if (dvfs->cpu_freq_max >= 1200*1000*1000)
+ avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1200MHZ);
+ else
+ avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1000MHZ);
+ dvfs->avs[0] = dvfs->avs[1] = avs_min;
+ }
+
return;
}
@@ -252,6 +275,26 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
target_vm = avs_map[l0_vdd_min] - 150;
target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
dvfs->avs[2] = dvfs->avs[3] = armada_37xx_avs_val_match(target_vm);
+
+ /*
+ * Fix the avs value for load L1 when base CPU frequency is 1000/1200 MHz,
+ * otherwise the CPU gets stuck when switching from load L1 to load L0.
+ * Also ensure that avs value for load L1 is not higher than for L0.
+ */
+ if (dvfs->cpu_freq_max >= 1000*1000*1000) {
+ u32 avs_min_l1;
+
+ if (dvfs->cpu_freq_max >= 1200*1000*1000)
+ avs_min_l1 = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1200MHZ);
+ else
+ avs_min_l1 = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1000MHZ);
+
+ if (avs_min_l1 > dvfs->avs[0])
+ avs_min_l1 = dvfs->avs[0];
+
+ if (dvfs->avs[1] < avs_min_l1)
+ dvfs->avs[1] = avs_min_l1;
+ }
}
static void __init armada37xx_cpufreq_avs_setup(struct regmap *base,
@@ -360,11 +403,16 @@ static int __init armada37xx_cpufreq_driver_init(void)
struct platform_device *pdev;
unsigned long freq;
unsigned int cur_frequency, base_frequency;
- struct regmap *nb_pm_base, *avs_base;
+ struct regmap *nb_clk_base, *nb_pm_base, *avs_base;
struct device *cpu_dev;
int load_lvl, ret;
struct clk *clk, *parent;
+ nb_clk_base =
+ syscon_regmap_lookup_by_compatible("marvell,armada-3700-periph-clock-nb");
+ if (IS_ERR(nb_clk_base))
+ return -ENODEV;
+
nb_pm_base =
syscon_regmap_lookup_by_compatible("marvell,armada-3700-nb-pm");
@@ -423,7 +471,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
return -EINVAL;
}
- dvfs = armada_37xx_cpu_freq_info_get(cur_frequency);
+ dvfs = armada_37xx_cpu_freq_info_get(base_frequency);
if (!dvfs) {
clk_put(clk);
return -EINVAL;
@@ -441,7 +489,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
armada37xx_cpufreq_avs_configure(avs_base, dvfs);
armada37xx_cpufreq_avs_setup(avs_base, dvfs);
- armada37xx_cpufreq_dvfs_setup(nb_pm_base, clk, dvfs->divider);
+ armada37xx_cpufreq_dvfs_setup(nb_pm_base, nb_clk_base, dvfs->divider);
clk_put(clk);
for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
@@ -458,6 +506,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
/* Now that everything is setup, enable the DVFS at hardware level */
armada37xx_cpufreq_enable_dvfs(nb_pm_base);
+ memset(&pdata, 0, sizeof(pdata));
pdata.suspend = armada37xx_cpufreq_suspend;
pdata.resume = armada37xx_cpufreq_resume;
@@ -474,7 +523,7 @@ disable_dvfs:
remove_opp:
/* clean-up the already added opp before leaving */
while (load_lvl-- > ARMADA_37XX_DVFS_LOAD_0) {
- freq = cur_frequency / dvfs->divider[load_lvl];
+ freq = base_frequency / dvfs->divider[load_lvl];
dev_pm_opp_remove(cpu_dev, freq);
}
@@ -485,6 +534,12 @@ remove_opp:
/* late_initcall, to guarantee the driver is loaded after A37xx clock driver */
late_initcall(armada37xx_cpufreq_driver_init);
+static const struct of_device_id __maybe_unused armada37xx_cpufreq_of_match[] = {
+ { .compatible = "marvell,armada-3700-nb-pm" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, armada37xx_cpufreq_of_match);
+
MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>");
MODULE_DESCRIPTION("Armada 37xx cpufreq driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 77b0e5d0fb13..a3c82f530d60 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -566,6 +566,16 @@ unmap_base:
return ret;
}
+static void brcm_avs_prepare_uninit(struct platform_device *pdev)
+{
+ struct private_data *priv;
+
+ priv = platform_get_drvdata(pdev);
+
+ iounmap(priv->avs_intr_base);
+ iounmap(priv->base);
+}
+
static int brcm_avs_cpufreq_init(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *freq_table;
@@ -701,21 +711,21 @@ static int brcm_avs_cpufreq_probe(struct platform_device *pdev)
brcm_avs_driver.driver_data = pdev;
- return cpufreq_register_driver(&brcm_avs_driver);
+ ret = cpufreq_register_driver(&brcm_avs_driver);
+ if (ret)
+ brcm_avs_prepare_uninit(pdev);
+
+ return ret;
}
static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
{
- struct private_data *priv;
int ret;
ret = cpufreq_unregister_driver(&brcm_avs_driver);
- if (ret)
- return ret;
+ WARN_ON(ret);
- priv = platform_get_drvdata(pdev);
- iounmap(priv->base);
- iounmap(priv->avs_intr_base);
+ brcm_avs_prepare_uninit(pdev);
return 0;
}
diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c
index 1608f7105c9f..ad743f2f31e7 100644
--- a/drivers/cpufreq/highbank-cpufreq.c
+++ b/drivers/cpufreq/highbank-cpufreq.c
@@ -104,6 +104,13 @@ out_put_node:
}
module_init(hb_cpufreq_driver_init);
+static const struct of_device_id __maybe_unused hb_cpufreq_of_match[] = {
+ { .compatible = "calxeda,highbank" },
+ { .compatible = "calxeda,ecx-2000" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, hb_cpufreq_of_match);
+
MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>");
MODULE_DESCRIPTION("Calxeda Highbank cpufreq driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index d8c3595e9023..a0cbbdfc7735 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -311,6 +311,9 @@ static int imx6ul_opp_check_speed_grading(struct device *dev)
np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-ocotp");
if (!np)
+ np = of_find_compatible_node(NULL, NULL,
+ "fsl,imx6ull-ocotp");
+ if (!np)
return -ENOENT;
base = of_iomap(np, 0);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 29f25d5d65e0..c2d222f97db7 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -712,7 +712,7 @@ static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max,
rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
WRITE_ONCE(all_cpu_data[cpu]->hwp_cap_cached, cap);
- if (global.no_turbo)
+ if (global.no_turbo || global.turbo_disabled)
*current_max = HWP_GUARANTEED_PERF(cap);
else
*current_max = HWP_HIGHEST_PERF(cap);
@@ -957,7 +957,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
update_turbo_state();
if (global.turbo_disabled) {
- pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
+ pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
mutex_unlock(&intel_pstate_limits_lock);
mutex_unlock(&intel_pstate_driver_lock);
return -EPERM;
@@ -1420,20 +1420,22 @@ static void intel_pstate_max_within_limits(struct cpudata *cpu)
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
cpu->pstate.min_pstate = pstate_funcs.get_min();
- cpu->pstate.max_pstate = pstate_funcs.get_max();
cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
cpu->pstate.scaling = pstate_funcs.get_scaling();
- cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
if (hwp_active && !hwp_mode_bdw) {
unsigned int phy_max, current_max;
intel_pstate_get_hwp_max(cpu->cpu, &phy_max, &current_max);
cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
+ cpu->pstate.turbo_pstate = phy_max;
+ cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(READ_ONCE(cpu->hwp_cap_cached));
} else {
cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+ cpu->pstate.max_pstate = pstate_funcs.get_max();
}
+ cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
if (pstate_funcs.get_aperf_mperf_shift)
cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
@@ -2324,9 +2326,15 @@ static int intel_pstate_update_status(const char *buf, size_t size)
{
int ret;
- if (size == 3 && !strncmp(buf, "off", size))
- return intel_pstate_driver ?
- intel_pstate_unregister_driver() : -EINVAL;
+ if (size == 3 && !strncmp(buf, "off", size)) {
+ if (!intel_pstate_driver)
+ return -EINVAL;
+
+ if (hwp_active)
+ return -EBUSY;
+
+ return intel_pstate_unregister_driver();
+ }
if (size == 6 && !strncmp(buf, "active", size)) {
if (intel_pstate_driver) {
diff --git a/drivers/cpufreq/loongson1-cpufreq.c b/drivers/cpufreq/loongson1-cpufreq.c
index be89416e2358..9d902f67f871 100644
--- a/drivers/cpufreq/loongson1-cpufreq.c
+++ b/drivers/cpufreq/loongson1-cpufreq.c
@@ -217,6 +217,7 @@ static struct platform_driver ls1x_cpufreq_platdrv = {
module_platform_driver(ls1x_cpufreq_platdrv);
+MODULE_ALIAS("platform:ls1x-cpufreq");
MODULE_AUTHOR("Kelvin Cheung <keguang.zhang@gmail.com>");
MODULE_DESCRIPTION("Loongson1 CPUFreq driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index eb8920d39818..5a81e20f0282 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -554,6 +554,7 @@ static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
{ }
};
+MODULE_DEVICE_TABLE(of, mtk_cpufreq_machines);
static int __init mtk_cpufreq_driver_init(void)
{
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index fb77b39a4ce3..818f92798fb9 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -881,9 +881,9 @@ static int get_transition_latency(struct powernow_k8_data *data)
/* Take a frequency, and issue the fid/vid transition command */
static int transition_frequency_fidvid(struct powernow_k8_data *data,
- unsigned int index)
+ unsigned int index,
+ struct cpufreq_policy *policy)
{
- struct cpufreq_policy *policy;
u32 fid = 0;
u32 vid = 0;
int res;
@@ -915,9 +915,6 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
freqs.old = find_khz_freq_from_fid(data->currfid);
freqs.new = find_khz_freq_from_fid(fid);
- policy = cpufreq_cpu_get(smp_processor_id());
- cpufreq_cpu_put(policy);
-
cpufreq_freq_transition_begin(policy, &freqs);
res = transition_fid_vid(data, fid, vid);
cpufreq_freq_transition_end(policy, &freqs, res);
@@ -972,7 +969,7 @@ static long powernowk8_target_fn(void *arg)
powernow_k8_acpi_pst_values(data, newstate);
- ret = transition_frequency_fidvid(data, newstate);
+ ret = transition_frequency_fidvid(data, newstate, pol);
if (ret) {
pr_err("transition frequency failed\n");
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 5fff39dae625..5da985604692 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -885,12 +885,15 @@ static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
unsigned long action, void *unused)
{
int cpu;
- struct cpufreq_policy cpu_policy;
+ struct cpufreq_policy *cpu_policy;
rebooting = true;
for_each_online_cpu(cpu) {
- cpufreq_get_policy(&cpu_policy, cpu);
- powernv_cpufreq_target_index(&cpu_policy, get_nominal_index());
+ cpu_policy = cpufreq_cpu_get(cpu);
+ if (!cpu_policy)
+ continue;
+ powernv_cpufreq_target_index(cpu_policy, get_nominal_index());
+ cpufreq_cpu_put(cpu_policy);
}
return NOTIFY_DONE;
@@ -903,6 +906,7 @@ static struct notifier_block powernv_cpufreq_reboot_nb = {
void powernv_cpufreq_work_fn(struct work_struct *work)
{
struct chip *chip = container_of(work, struct chip, throttle);
+ struct cpufreq_policy *policy;
unsigned int cpu;
cpumask_t mask;
@@ -917,12 +921,14 @@ void powernv_cpufreq_work_fn(struct work_struct *work)
chip->restore = false;
for_each_cpu(cpu, &mask) {
int index;
- struct cpufreq_policy policy;
- cpufreq_get_policy(&policy, cpu);
- index = cpufreq_table_find_index_c(&policy, policy.cur);
- powernv_cpufreq_target_index(&policy, index);
- cpumask_andnot(&mask, &mask, policy.cpus);
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ continue;
+ index = cpufreq_table_find_index_c(policy, policy->cur);
+ powernv_cpufreq_target_index(policy, index);
+ cpumask_andnot(&mask, &mask, policy->cpus);
+ cpufreq_cpu_put(policy);
}
out:
put_online_cpus();
@@ -1081,6 +1087,12 @@ free_and_return:
static inline void clean_chip_info(void)
{
+ int i;
+
+ /* flush any pending work items */
+ if (chips)
+ for (i = 0; i < nr_chips; i++)
+ cancel_work_sync(&chips[i].throttle);
kfree(chips);
}
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 87a98ec77773..0338885332a7 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -246,6 +246,7 @@ static struct platform_driver scpi_cpufreq_platdrv = {
};
module_platform_driver(scpi_cpufreq_platdrv);
+MODULE_ALIAS("platform:scpi-cpufreq");
MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
MODULE_DESCRIPTION("ARM SCPI CPUFreq interface driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
index 47105735df12..2d09960afa59 100644
--- a/drivers/cpufreq/sti-cpufreq.c
+++ b/drivers/cpufreq/sti-cpufreq.c
@@ -144,7 +144,8 @@ static const struct reg_field sti_stih407_dvfs_regfields[DVFS_MAX_REGFIELDS] = {
static const struct reg_field *sti_cpufreq_match(void)
{
if (of_machine_is_compatible("st,stih407") ||
- of_machine_is_compatible("st,stih410"))
+ of_machine_is_compatible("st,stih410") ||
+ of_machine_is_compatible("st,stih418"))
return sti_stih407_dvfs_regfields;
return NULL;
@@ -261,7 +262,8 @@ static int sti_cpufreq_init(void)
int ret;
if ((!of_machine_is_compatible("st,stih407")) &&
- (!of_machine_is_compatible("st,stih410")))
+ (!of_machine_is_compatible("st,stih410")) &&
+ (!of_machine_is_compatible("st,stih418")))
return -ENODEV;
ddata.cpu = get_cpu_device(0);
@@ -293,6 +295,13 @@ register_cpufreq_dt:
}
module_init(sti_cpufreq_init);
+static const struct of_device_id __maybe_unused sti_cpufreq_of_match[] = {
+ { .compatible = "st,stih407" },
+ { .compatible = "st,stih410" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sti_cpufreq_of_match);
+
MODULE_DESCRIPTION("STMicroelectronics CPUFreq/OPP driver");
MODULE_AUTHOR("Ajitpal Singh <ajitpal.singh@st.com>");
MODULE_AUTHOR("Lee Jones <lee.jones@linaro.org>");
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 6df894d65d9e..2d182dc1b49e 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -148,7 +148,8 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv,
*/
stop_critical_timings();
drv->states[index].enter_s2idle(dev, drv, index);
- WARN_ON(!irqs_disabled());
+ if (WARN_ON_ONCE(!irqs_disabled()))
+ local_irq_disable();
/*
* timekeeping_resume() that will be called by tick_unfreeze() for the
* first CPU executing it calls functions containing RCU read-side
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index e754c7aae7f7..66979dc33680 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -467,7 +467,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle,
&kdev->kobj, "state%d", i);
if (ret) {
- kfree(kobj);
+ kobject_put(&kobj->kobj);
goto error_state;
}
cpuidle_add_s2idle_attr_group(kobj);
@@ -598,7 +598,7 @@ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
ret = kobject_init_and_add(&kdrv->kobj, &ktype_driver_cpuidle,
&kdev->kobj, "driver");
if (ret) {
- kfree(kdrv);
+ kobject_put(&kdrv->kobj);
return ret;
}
@@ -692,7 +692,7 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev)
error = kobject_init_and_add(&kdev->kobj, &ktype_cpuidle, &cpu_dev->kobj,
"cpuidle");
if (error) {
- kfree(kdev);
+ kobject_put(&kdev->kobj);
return error;
}
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 68d5ea818b6c..cd00afb5786e 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -926,7 +926,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
}
pd->pd_ctl.w = PD_CTL_HOST_READY |
- ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) |
+ ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) ||
(crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
PD_CTL_HASH_FINAL : 0);
pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen);
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 0b1fc5664b1d..c63992fbbc98 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -52,7 +52,7 @@
/* ================= Device Structure ================== */
-struct device_private iproc_priv;
+struct bcm_device_private iproc_priv;
/* ==================== Parameters ===================== */
@@ -2980,7 +2980,6 @@ static int aead_gcm_ccm_setkey(struct crypto_aead *cipher,
ctx->enckeylen = keylen;
ctx->authkeylen = 0;
- memcpy(ctx->enckey, key, ctx->enckeylen);
switch (ctx->enckeylen) {
case AES_KEYSIZE_128:
@@ -2996,6 +2995,8 @@ static int aead_gcm_ccm_setkey(struct crypto_aead *cipher,
goto badkey;
}
+ memcpy(ctx->enckey, key, ctx->enckeylen);
+
flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
ctx->authkeylen);
flow_dump(" enc: ", ctx->enckey, ctx->enckeylen);
@@ -3056,6 +3057,10 @@ static int aead_gcm_esp_setkey(struct crypto_aead *cipher,
struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
flow_log("%s\n", __func__);
+
+ if (keylen < GCM_ESP_SALT_SIZE)
+ return -EINVAL;
+
ctx->salt_len = GCM_ESP_SALT_SIZE;
ctx->salt_offset = GCM_ESP_SALT_OFFSET;
memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
@@ -3084,6 +3089,10 @@ static int rfc4543_gcm_esp_setkey(struct crypto_aead *cipher,
struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
flow_log("%s\n", __func__);
+
+ if (keylen < GCM_ESP_SALT_SIZE)
+ return -EINVAL;
+
ctx->salt_len = GCM_ESP_SALT_SIZE;
ctx->salt_offset = GCM_ESP_SALT_OFFSET;
memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
@@ -3113,6 +3122,10 @@ static int aead_ccm_esp_setkey(struct crypto_aead *cipher,
struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
flow_log("%s\n", __func__);
+
+ if (keylen < CCM_ESP_SALT_SIZE)
+ return -EINVAL;
+
ctx->salt_len = CCM_ESP_SALT_SIZE;
ctx->salt_offset = CCM_ESP_SALT_OFFSET;
memcpy(ctx->salt, key + keylen - CCM_ESP_SALT_SIZE, CCM_ESP_SALT_SIZE);
diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h
index 763c425c41ca..36452d26c7c5 100644
--- a/drivers/crypto/bcm/cipher.h
+++ b/drivers/crypto/bcm/cipher.h
@@ -431,7 +431,7 @@ struct spu_hw {
u32 num_chan;
};
-struct device_private {
+struct bcm_device_private {
struct platform_device *pdev;
struct spu_hw spu;
@@ -478,6 +478,6 @@ struct device_private {
struct mbox_chan **mbox;
};
-extern struct device_private iproc_priv;
+extern struct bcm_device_private iproc_priv;
#endif
diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
index a912c6ad3e85..f96d1dade010 100644
--- a/drivers/crypto/bcm/util.c
+++ b/drivers/crypto/bcm/util.c
@@ -400,7 +400,7 @@ char *spu_alg_name(enum spu_cipher_alg alg, enum spu_cipher_mode mode)
static ssize_t spu_debugfs_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *offp)
{
- struct device_private *ipriv;
+ struct bcm_device_private *ipriv;
char *buf;
ssize_t ret, out_offset, out_count;
int i;
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index edacf9b39b63..ceb033930535 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -1457,7 +1457,13 @@ EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap);
*/
void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
{
- __be64 sector_size = cpu_to_be64(512);
+ /*
+ * Set sector size to a big value, practically disabling
+ * sector size segmentation in xts implementation. We cannot
+ * take full advantage of this HW feature with existing
+ * crypto API / dm-crypt SW architecture.
+ */
+ __be64 sector_size = cpu_to_be64(BIT(15));
u32 *key_jump_cmd;
init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
@@ -1509,7 +1515,13 @@ EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap);
*/
void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
{
- __be64 sector_size = cpu_to_be64(512);
+ /*
+ * Set sector size to a big value, practically disabling
+ * sector size segmentation in xts implementation. We cannot
+ * take full advantage of this HW feature with existing
+ * crypto API / dm-crypt SW architecture.
+ */
+ __be64 sector_size = cpu_to_be64(BIT(15));
u32 *key_jump_cmd;
init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index 600336d169a9..cd4d60d318ba 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -205,6 +205,7 @@ static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc)
int status;
memset(req_info, 0, sizeof(struct cpt_request_info));
+ req_info->may_sleep = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) != 0;
memset(fctx, 0, sizeof(struct fc_context));
create_input_list(req, enc, enc_iv_len);
create_output_list(req, enc_iv_len);
diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
index b0ba4331944b..43fe69d0981a 100644
--- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
+++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
@@ -136,7 +136,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf,
/* Setup gather (input) components */
g_sz_bytes = ((req->incnt + 3) / 4) * sizeof(struct sglist_component);
- info->gather_components = kzalloc(g_sz_bytes, GFP_KERNEL);
+ info->gather_components = kzalloc(g_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (!info->gather_components) {
ret = -ENOMEM;
goto scatter_gather_clean;
@@ -153,7 +153,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf,
/* Setup scatter (output) components */
s_sz_bytes = ((req->outcnt + 3) / 4) * sizeof(struct sglist_component);
- info->scatter_components = kzalloc(s_sz_bytes, GFP_KERNEL);
+ info->scatter_components = kzalloc(s_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (!info->scatter_components) {
ret = -ENOMEM;
goto scatter_gather_clean;
@@ -170,7 +170,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf,
/* Create and initialize DPTR */
info->dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
- info->in_buffer = kzalloc(info->dlen, GFP_KERNEL);
+ info->in_buffer = kzalloc(info->dlen, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (!info->in_buffer) {
ret = -ENOMEM;
goto scatter_gather_clean;
@@ -198,7 +198,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf,
}
/* Create and initialize RPTR */
- info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, GFP_KERNEL);
+ info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (!info->out_buffer) {
ret = -ENOMEM;
goto scatter_gather_clean;
@@ -434,7 +434,7 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req)
struct cpt_vq_command vq_cmd;
union cpt_inst_s cptinst;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = kzalloc(sizeof(*info), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (unlikely(!info)) {
dev_err(&pdev->dev, "Unable to allocate memory for info_buffer\n");
return -ENOMEM;
@@ -456,7 +456,7 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req)
* Get buffer for union cpt_res_s response
* structure and its physical address
*/
- info->completion_addr = kzalloc(sizeof(union cpt_res_s), GFP_KERNEL);
+ info->completion_addr = kzalloc(sizeof(union cpt_res_s), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (unlikely(!info->completion_addr)) {
dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n");
ret = -ENOMEM;
diff --git a/drivers/crypto/cavium/cpt/request_manager.h b/drivers/crypto/cavium/cpt/request_manager.h
index 80ee074c6e0c..09930d95ad24 100644
--- a/drivers/crypto/cavium/cpt/request_manager.h
+++ b/drivers/crypto/cavium/cpt/request_manager.h
@@ -65,6 +65,8 @@ struct cpt_request_info {
union ctrl_info ctrl; /* User control information */
struct cptvf_request req; /* Request Information (Core specific) */
+ bool may_sleep;
+
struct buf_ptr in[MAX_BUF_CNT];
struct buf_ptr out[MAX_BUF_CNT];
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index fee7cb2ce747..a81f3c7e941d 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -183,7 +183,7 @@ static void nitrox_remove_from_devlist(struct nitrox_device *ndev)
struct nitrox_device *nitrox_get_first_device(void)
{
- struct nitrox_device *ndev = NULL;
+ struct nitrox_device *ndev;
mutex_lock(&devlist_lock);
list_for_each_entry(ndev, &ndevlist, list) {
@@ -191,7 +191,7 @@ struct nitrox_device *nitrox_get_first_device(void)
break;
}
mutex_unlock(&devlist_lock);
- if (!ndev)
+ if (&ndev->list == &ndevlist)
return NULL;
refcount_inc(&ndev->refcnt);
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index b9dfae47aefd..7f5fc705503d 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -9,10 +9,9 @@ config CRYPTO_DEV_CCP_DD
config CRYPTO_DEV_SP_CCP
bool "Cryptographic Coprocessor device"
default y
- depends on CRYPTO_DEV_CCP_DD
+ depends on CRYPTO_DEV_CCP_DD && DMADEVICES
select HW_RANDOM
select DMA_ENGINE
- select DMADEVICES
select CRYPTO_SHA1
select CRYPTO_SHA256
help
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 7442b0422f8a..bd43b5c1450c 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -471,6 +471,7 @@ struct ccp_sg_workarea {
unsigned int sg_used;
struct scatterlist *dma_sg;
+ struct scatterlist *dma_sg_head;
struct device *dma_dev;
unsigned int dma_count;
enum dma_data_direction dma_dir;
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 330853a2702f..20ca9c9e109e 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -67,7 +67,7 @@ static u32 ccp_gen_jobid(struct ccp_device *ccp)
static void ccp_sg_free(struct ccp_sg_workarea *wa)
{
if (wa->dma_count)
- dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir);
+ dma_unmap_sg(wa->dma_dev, wa->dma_sg_head, wa->nents, wa->dma_dir);
wa->dma_count = 0;
}
@@ -96,6 +96,7 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
return 0;
wa->dma_sg = sg;
+ wa->dma_sg_head = sg;
wa->dma_dev = dev;
wa->dma_dir = dma_dir;
wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
@@ -108,14 +109,28 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
{
unsigned int nbytes = min_t(u64, len, wa->bytes_left);
+ unsigned int sg_combined_len = 0;
if (!wa->sg)
return;
wa->sg_used += nbytes;
wa->bytes_left -= nbytes;
- if (wa->sg_used == wa->sg->length) {
- wa->sg = sg_next(wa->sg);
+ if (wa->sg_used == sg_dma_len(wa->dma_sg)) {
+ /* Advance to the next DMA scatterlist entry */
+ wa->dma_sg = sg_next(wa->dma_sg);
+
+ /* In the case that the DMA mapped scatterlist has entries
+ * that have been merged, the non-DMA mapped scatterlist
+ * must be advanced multiple times for each merged entry.
+ * This ensures that the current non-DMA mapped entry
+ * corresponds to the current DMA mapped entry.
+ */
+ do {
+ sg_combined_len += wa->sg->length;
+ wa->sg = sg_next(wa->sg);
+ } while (wa->sg_used > sg_combined_len);
+
wa->sg_used = 0;
}
}
@@ -304,7 +319,7 @@ static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
/* Update the structures and generate the count */
buf_count = 0;
while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
- nbytes = min(sg_wa->sg->length - sg_wa->sg_used,
+ nbytes = min(sg_dma_len(sg_wa->dma_sg) - sg_wa->sg_used,
dm_wa->length - buf_count);
nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
@@ -336,11 +351,11 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
* and destination. The resulting len values will always be <= UINT_MAX
* because the dma length is an unsigned int.
*/
- sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used;
+ sg_src_len = sg_dma_len(src->sg_wa.dma_sg) - src->sg_wa.sg_used;
sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
if (dst) {
- sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used;
+ sg_dst_len = sg_dma_len(dst->sg_wa.dma_sg) - dst->sg_wa.sg_used;
sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
op_len = min(sg_src_len, sg_dst_len);
} else {
@@ -370,7 +385,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
/* Enough data in the sg element, but we need to
* adjust for any previously copied data
*/
- op->src.u.dma.address = sg_dma_address(src->sg_wa.sg);
+ op->src.u.dma.address = sg_dma_address(src->sg_wa.dma_sg);
op->src.u.dma.offset = src->sg_wa.sg_used;
op->src.u.dma.length = op_len & ~(block_size - 1);
@@ -391,7 +406,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
/* Enough room in the sg element, but we need to
* adjust for any previously used area
*/
- op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg);
+ op->dst.u.dma.address = sg_dma_address(dst->sg_wa.dma_sg);
op->dst.u.dma.offset = dst->sg_wa.sg_used;
op->dst.u.dma.length = op->src.u.dma.length;
}
@@ -1737,7 +1752,7 @@ ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
break;
default:
ret = -EINVAL;
- goto e_ctx;
+ goto e_data;
}
} else {
/* Stash the context */
@@ -1783,8 +1798,9 @@ ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
LSB_ITEM_SIZE);
break;
default:
+ kfree(hmac_buf);
ret = -EINVAL;
- goto e_ctx;
+ goto e_data;
}
memset(&hmac_cmd, 0, sizeof(hmac_cmd));
@@ -2033,7 +2049,7 @@ ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
dst.sg_wa.sg_used = 0;
for (i = 1; i <= src.sg_wa.dma_count; i++) {
if (!dst.sg_wa.sg ||
- (dst.sg_wa.sg->length < src.sg_wa.sg->length)) {
+ (sg_dma_len(dst.sg_wa.sg) < sg_dma_len(src.sg_wa.sg))) {
ret = -EINVAL;
goto e_dst;
}
@@ -2059,8 +2075,8 @@ ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
goto e_dst;
}
- dst.sg_wa.sg_used += src.sg_wa.sg->length;
- if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) {
+ dst.sg_wa.sg_used += sg_dma_len(src.sg_wa.sg);
+ if (dst.sg_wa.sg_used == sg_dma_len(dst.sg_wa.sg)) {
dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
dst.sg_wa.sg_used = 0;
}
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index aa6b45bc13b9..57aac15a335f 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -731,7 +731,7 @@ static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
dev_dbg(dev, "ASSOC buffer type DLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
- areq->assoclen, NS_BIT);
+ areq_ctx->assoclen, NS_BIT);
set_flow_mode(&desc[idx], flow_mode);
if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
areq_ctx->cryptlen > 0)
@@ -1080,9 +1080,11 @@ static void cc_proc_header_desc(struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int idx = *seq_size;
+
/* Hash associated data */
- if (req->assoclen > 0)
+ if (areq_ctx->assoclen > 0)
cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
/* Hash IV */
@@ -1310,7 +1312,7 @@ static int validate_data_size(struct cc_aead_ctx *ctx,
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- unsigned int assoclen = req->assoclen;
+ unsigned int assoclen = areq_ctx->assoclen;
unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
(req->cryptlen - ctx->authsize) : req->cryptlen;
@@ -1469,7 +1471,7 @@ static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
idx++;
/* process assoc data */
- if (req->assoclen > 0) {
+ if (req_ctx->assoclen > 0) {
cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
} else {
hw_desc_init(&desc[idx]);
@@ -1561,7 +1563,7 @@ static int config_ccm_adata(struct aead_request *req)
* NIST Special Publication 800-38C
*/
*b0 |= (8 * ((m - 2) / 2));
- if (req->assoclen > 0)
+ if (req_ctx->assoclen > 0)
*b0 |= 64; /* Enable bit 6 if Adata exists. */
rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */
@@ -1572,7 +1574,7 @@ static int config_ccm_adata(struct aead_request *req)
/* END of "taken from crypto/ccm.c" */
/* l(a) - size of associated data. */
- req_ctx->ccm_hdr_size = format_ccm_a0(a0, req->assoclen);
+ req_ctx->ccm_hdr_size = format_ccm_a0(a0, req_ctx->assoclen);
memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
req->iv[15] = 1;
@@ -1604,7 +1606,7 @@ static void cc_proc_rfc4309_ccm(struct aead_request *req)
memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
CCM_BLOCK_IV_SIZE);
req->iv = areq_ctx->ctr_iv;
- req->assoclen -= CCM_BLOCK_IV_SIZE;
+ areq_ctx->assoclen -= CCM_BLOCK_IV_SIZE;
}
static void cc_set_ghash_desc(struct aead_request *req,
@@ -1812,7 +1814,7 @@ static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
// for gcm and rfc4106.
cc_set_ghash_desc(req, desc, seq_size);
/* process(ghash) assoc data */
- if (req->assoclen > 0)
+ if (req_ctx->assoclen > 0)
cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
cc_set_gctr_desc(req, desc, seq_size);
/* process(gctr+ghash) */
@@ -1836,8 +1838,8 @@ static int config_gcm_context(struct aead_request *req)
(req->cryptlen - ctx->authsize);
__be32 counter = cpu_to_be32(2);
- dev_dbg(dev, "%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n",
- __func__, cryptlen, req->assoclen, ctx->authsize);
+ dev_dbg(dev, "%s() cryptlen = %d, req_ctx->assoclen = %d ctx->authsize = %d\n",
+ __func__, cryptlen, req_ctx->assoclen, ctx->authsize);
memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
@@ -1853,7 +1855,7 @@ static int config_gcm_context(struct aead_request *req)
if (!req_ctx->plaintext_authenticate_only) {
__be64 temp64;
- temp64 = cpu_to_be64(req->assoclen * 8);
+ temp64 = cpu_to_be64(req_ctx->assoclen * 8);
memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
temp64 = cpu_to_be64(cryptlen * 8);
memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
@@ -1863,8 +1865,8 @@ static int config_gcm_context(struct aead_request *req)
*/
__be64 temp64;
- temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE +
- cryptlen) * 8);
+ temp64 = cpu_to_be64((req_ctx->assoclen +
+ GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
temp64 = 0;
memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
@@ -1884,7 +1886,7 @@ static void cc_proc_rfc4_gcm(struct aead_request *req)
memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
GCM_BLOCK_RFC4_IV_SIZE);
req->iv = areq_ctx->ctr_iv;
- req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
+ areq_ctx->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
}
static int cc_proc_aead(struct aead_request *req,
@@ -1909,7 +1911,7 @@ static int cc_proc_aead(struct aead_request *req,
/* Check data length according to mode */
if (validate_data_size(ctx, direct, req)) {
dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
- req->cryptlen, req->assoclen);
+ req->cryptlen, areq_ctx->assoclen);
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
return -EINVAL;
}
@@ -2058,8 +2060,11 @@ static int cc_aead_encrypt(struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = false;
@@ -2087,8 +2092,11 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
goto out;
}
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = true;
@@ -2106,8 +2114,11 @@ static int cc_aead_decrypt(struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = false;
@@ -2133,8 +2144,11 @@ static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
goto out;
}
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = true;
@@ -2250,8 +2264,11 @@ static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
goto out;
}
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->plaintext_authenticate_only = false;
@@ -2273,11 +2290,14 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
//plaintext is not encryped with rfc4543
areq_ctx->plaintext_authenticate_only = true;
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
cc_proc_rfc4_gcm(req);
@@ -2305,8 +2325,11 @@ static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
goto out;
}
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->plaintext_authenticate_only = false;
@@ -2328,11 +2351,14 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
//plaintext is not decryped with rfc4543
areq_ctx->plaintext_authenticate_only = true;
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
cc_proc_rfc4_gcm(req);
diff --git a/drivers/crypto/ccree/cc_aead.h b/drivers/crypto/ccree/cc_aead.h
index 5edf3b351fa4..74bc99067f18 100644
--- a/drivers/crypto/ccree/cc_aead.h
+++ b/drivers/crypto/ccree/cc_aead.h
@@ -67,6 +67,7 @@ struct aead_req_ctx {
u8 backup_mac[MAX_MAC_SIZE];
u8 *backup_iv; /*store iv for generated IV flow*/
u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/
+ u32 assoclen; /* internal assoclen */
dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
/* buffer for internal ccm configurations */
dma_addr_t ccm_iv0_dma_addr;
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 90b4870078fb..630020255941 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -65,7 +65,7 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- u32 skip = req->assoclen + req->cryptlen;
+ u32 skip = areq_ctx->assoclen + req->cryptlen;
if (areq_ctx->is_gcm4543)
skip += crypto_aead_ivsize(tfm);
@@ -460,10 +460,8 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
/* Map the src SGL */
rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
- if (rc) {
- rc = -ENOMEM;
+ if (rc)
goto cipher_exit;
- }
if (mapped_nents > 1)
req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
@@ -477,12 +475,11 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
}
} else {
/* Map the dst sg */
- if (cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
- &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
- &dummy, &mapped_nents)) {
- rc = -ENOMEM;
+ rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
+ &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+ &dummy, &mapped_nents);
+ if (rc)
goto cipher_exit;
- }
if (mapped_nents > 1)
req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
@@ -577,8 +574,8 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
- req->assoclen, req->cryptlen);
- size_to_unmap = req->assoclen + req->cryptlen;
+ areq_ctx->assoclen, req->cryptlen);
+ size_to_unmap = areq_ctx->assoclen + req->cryptlen;
if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
size_to_unmap += areq_ctx->req_authsize;
if (areq_ctx->is_gcm4543)
@@ -720,7 +717,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
struct scatterlist *current_sg = req->src;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
unsigned int sg_index = 0;
- u32 size_of_assoc = req->assoclen;
+ u32 size_of_assoc = areq_ctx->assoclen;
struct device *dev = drvdata_to_dev(drvdata);
if (areq_ctx->is_gcm4543)
@@ -731,7 +728,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
goto chain_assoc_exit;
}
- if (req->assoclen == 0) {
+ if (areq_ctx->assoclen == 0) {
areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
areq_ctx->assoc.nents = 0;
areq_ctx->assoc.mlli_nents = 0;
@@ -791,7 +788,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
cc_dma_buf_type(areq_ctx->assoc_buff_type),
areq_ctx->assoc.nents);
cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
- req->assoclen, 0, is_last,
+ areq_ctx->assoclen, 0, is_last,
&areq_ctx->assoc.mlli_nents);
areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
}
@@ -975,11 +972,11 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
u32 src_mapped_nents = 0, dst_mapped_nents = 0;
u32 offset = 0;
/* non-inplace mode */
- unsigned int size_for_map = req->assoclen + req->cryptlen;
+ unsigned int size_for_map = areq_ctx->assoclen + req->cryptlen;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
u32 sg_index = 0;
bool is_gcm4543 = areq_ctx->is_gcm4543;
- u32 size_to_skip = req->assoclen;
+ u32 size_to_skip = areq_ctx->assoclen;
if (is_gcm4543)
size_to_skip += crypto_aead_ivsize(tfm);
@@ -1023,9 +1020,13 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
areq_ctx->src_offset = offset;
if (req->src != req->dst) {
- size_for_map = req->assoclen + req->cryptlen;
- size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
- authsize : 0;
+ size_for_map = areq_ctx->assoclen + req->cryptlen;
+
+ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT)
+ size_for_map += authsize;
+ else
+ size_for_map -= authsize;
+
if (is_gcm4543)
size_for_map += crypto_aead_ivsize(tfm);
@@ -1033,10 +1034,8 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
&areq_ctx->dst.nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
&dst_mapped_nents);
- if (rc) {
- rc = -ENOMEM;
+ if (rc)
goto chain_data_exit;
- }
}
dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
@@ -1190,11 +1189,10 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
}
areq_ctx->ccm_iv0_dma_addr = dma_addr;
- if (cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
- &sg_data, req->assoclen)) {
- rc = -ENOMEM;
+ rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
+ &sg_data, areq_ctx->assoclen);
+ if (rc)
goto aead_map_failure;
- }
}
if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
@@ -1243,10 +1241,12 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
}
- size_to_map = req->cryptlen + req->assoclen;
- if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
+ size_to_map = req->cryptlen + areq_ctx->assoclen;
+ /* If we do in-place encryption, we also need the auth tag */
+ if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) &&
+ (req->src == req->dst)) {
size_to_map += authsize;
-
+ }
if (is_gcm4543)
size_to_map += crypto_aead_ivsize(tfm);
rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
@@ -1254,10 +1254,8 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
(LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
LLI_MAX_NUM_OF_DATA_ENTRIES),
&dummy, &mapped_nents);
- if (rc) {
- rc = -ENOMEM;
+ if (rc)
goto aead_map_failure;
- }
if (areq_ctx->is_single_pass) {
/*
@@ -1341,6 +1339,7 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
struct mlli_params *mlli_params = &areq_ctx->mlli_params;
struct buffer_array sg_data;
struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
+ int rc = 0;
u32 dummy = 0;
u32 mapped_nents = 0;
@@ -1360,18 +1359,18 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
/*TODO: copy data in case that buffer is enough for operation */
/* map the previous buffer */
if (*curr_buff_cnt) {
- if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
- &sg_data)) {
- return -ENOMEM;
- }
+ rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
+ &sg_data);
+ if (rc)
+ return rc;
}
if (src && nbytes > 0 && do_update) {
- if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
- &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
- &dummy, &mapped_nents)) {
+ rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
+ &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+ &dummy, &mapped_nents);
+ if (rc)
goto unmap_curr_buff;
- }
if (src && mapped_nents == 1 &&
areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
memcpy(areq_ctx->buff_sg, src,
@@ -1390,7 +1389,8 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
/* add the src data to the sg_data */
cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
0, true, &areq_ctx->mlli_nents);
- if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
+ rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
+ if (rc)
goto fail_unmap_din;
}
/* change the buffer index for the unmap function */
@@ -1406,7 +1406,7 @@ unmap_curr_buff:
if (*curr_buff_cnt)
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
- return -ENOMEM;
+ return rc;
}
int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
@@ -1425,6 +1425,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
struct buffer_array sg_data;
struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
unsigned int swap_index = 0;
+ int rc = 0;
u32 dummy = 0;
u32 mapped_nents = 0;
@@ -1469,21 +1470,21 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
}
if (*curr_buff_cnt) {
- if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
- &sg_data)) {
- return -ENOMEM;
- }
+ rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
+ &sg_data);
+ if (rc)
+ return rc;
/* change the buffer index for next operation */
swap_index = 1;
}
if (update_data_len > *curr_buff_cnt) {
- if (cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
- DMA_TO_DEVICE, &areq_ctx->in_nents,
- LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
- &mapped_nents)) {
+ rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
+ DMA_TO_DEVICE, &areq_ctx->in_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
+ &mapped_nents);
+ if (rc)
goto unmap_curr_buff;
- }
if (mapped_nents == 1 &&
areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
/* only one entry in the SG and no previous data */
@@ -1503,7 +1504,8 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
(update_data_len - *curr_buff_cnt), 0, true,
&areq_ctx->mlli_nents);
- if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
+ rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
+ if (rc)
goto fail_unmap_din;
}
areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
@@ -1517,7 +1519,7 @@ unmap_curr_buff:
if (*curr_buff_cnt)
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
- return -ENOMEM;
+ return rc;
}
void cc_unmap_hash_request(struct device *dev, void *ctx,
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 28a5b8b38fa2..1bcb6f0157b0 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -137,7 +137,6 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
skcipher_alg.base);
struct device *dev = drvdata_to_dev(cc_alg->drvdata);
unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize;
- int rc = 0;
dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p,
crypto_tfm_alg_name(tfm));
@@ -149,10 +148,19 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
ctx_p->flow_mode = cc_alg->flow_mode;
ctx_p->drvdata = cc_alg->drvdata;
+ if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+ /* Alloc hash tfm for essiv */
+ ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0);
+ if (IS_ERR(ctx_p->shash_tfm)) {
+ dev_err(dev, "Error allocating hash tfm for ESSIV.\n");
+ return PTR_ERR(ctx_p->shash_tfm);
+ }
+ }
+
/* Allocate key buffer, cache line aligned */
ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL);
if (!ctx_p->user.key)
- return -ENOMEM;
+ goto free_shash;
dev_dbg(dev, "Allocated key buffer in context. key=@%p\n",
ctx_p->user.key);
@@ -164,21 +172,19 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n",
max_key_buf_size, ctx_p->user.key);
- return -ENOMEM;
+ goto free_key;
}
dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n",
max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr);
- if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
- /* Alloc hash tfm for essiv */
- ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0);
- if (IS_ERR(ctx_p->shash_tfm)) {
- dev_err(dev, "Error allocating hash tfm for ESSIV.\n");
- return PTR_ERR(ctx_p->shash_tfm);
- }
- }
+ return 0;
- return rc;
+free_key:
+ kfree(ctx_p->user.key);
+free_shash:
+ crypto_free_shash(ctx_p->shash_tfm);
+
+ return -ENOMEM;
}
static void cc_cipher_exit(struct crypto_tfm *tfm)
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index c435f89f34e3..ee508bbbb750 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -2418,8 +2418,9 @@ int chcr_aead_dma_map(struct device *dev,
else
reqctx->b0_dma = 0;
if (req->src == req->dst) {
- error = dma_map_sg(dev, req->src, sg_nents(req->src),
- DMA_BIDIRECTIONAL);
+ error = dma_map_sg(dev, req->src,
+ sg_nents_for_len(req->src, dst_size),
+ DMA_BIDIRECTIONAL);
if (!error)
goto err;
} else {
@@ -2764,7 +2765,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
unsigned int ccm_xtra;
- unsigned char tag_offset = 0, auth_offset = 0;
+ unsigned int tag_offset = 0, auth_offset = 0;
unsigned int assoclen;
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index 28d24118c645..08ed3ff8b255 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -175,7 +175,7 @@ static struct sk_buff *alloc_ctrl_skb(struct sk_buff *skb, int len)
{
if (likely(skb && !skb_shared(skb) && !skb_cloned(skb))) {
__skb_trim(skb, 0);
- refcount_add(2, &skb->users);
+ refcount_inc(&skb->users);
} else {
skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
}
@@ -581,7 +581,7 @@ static void chtls_reset_synq(struct listen_ctx *listen_ctx)
while (!skb_queue_empty(&listen_ctx->synq)) {
struct chtls_sock *csk =
- container_of((struct synq *)__skb_dequeue
+ container_of((struct synq *)skb_peek
(&listen_ctx->synq), struct chtls_sock, synq);
struct sock *child = csk->sk;
@@ -696,14 +696,13 @@ static int chtls_pass_open_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
if (rpl->status != CPL_ERR_NONE) {
pr_info("Unexpected PASS_OPEN_RPL status %u for STID %u\n",
rpl->status, stid);
- return CPL_RET_BUF_DONE;
+ } else {
+ cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
+ sock_put(listen_ctx->lsk);
+ kfree(listen_ctx);
+ module_put(THIS_MODULE);
}
- cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
- sock_put(listen_ctx->lsk);
- kfree(listen_ctx);
- module_put(THIS_MODULE);
-
- return 0;
+ return CPL_RET_BUF_DONE;
}
static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
@@ -720,15 +719,13 @@ static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
if (rpl->status != CPL_ERR_NONE) {
pr_info("Unexpected CLOSE_LISTSRV_RPL status %u for STID %u\n",
rpl->status, stid);
- return CPL_RET_BUF_DONE;
+ } else {
+ cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
+ sock_put(listen_ctx->lsk);
+ kfree(listen_ctx);
+ module_put(THIS_MODULE);
}
-
- cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
- sock_put(listen_ctx->lsk);
- kfree(listen_ctx);
- module_put(THIS_MODULE);
-
- return 0;
+ return CPL_RET_BUF_DONE;
}
static void chtls_purge_wr_queue(struct sock *sk)
@@ -1027,6 +1024,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
const struct cpl_pass_accept_req *req,
struct chtls_dev *cdev)
{
+ struct adapter *adap = pci_get_drvdata(cdev->pdev);
const struct tcphdr *tcph;
struct inet_sock *newinet;
const struct iphdr *iph;
@@ -1036,9 +1034,10 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
struct neighbour *n;
struct tcp_sock *tp;
struct sock *newsk;
+ bool found = false;
u16 port_id;
int rxq_idx;
- int step;
+ int step, i;
iph = (const struct iphdr *)network_hdr;
newsk = tcp_create_openreq_child(lsk, oreq, cdev->askb);
@@ -1051,12 +1050,20 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
tcph = (struct tcphdr *)(iph + 1);
n = dst_neigh_lookup(dst, &iph->saddr);
- if (!n)
- goto free_sk;
+ if (!n || !n->dev)
+ goto free_dst;
ndev = n->dev;
- if (!ndev)
+ if (is_vlan_dev(ndev))
+ ndev = vlan_dev_real_dev(ndev);
+
+ for_each_port(adap, i)
+ if (cdev->ports[i] == ndev)
+ found = true;
+
+ if (!found)
goto free_dst;
+
port_id = cxgb4_port_idx(ndev);
csk = chtls_sock_create(cdev);
@@ -1079,6 +1086,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
oreq->ts_recent = PASS_OPEN_TID_G(ntohl(req->tos_stid));
sk_setup_caps(newsk, dst);
+ newsk->sk_prot_creator = lsk->sk_prot_creator;
csk->sk = newsk;
csk->passive_reap_next = oreq;
csk->tx_chan = cxgb4_port_chan(ndev);
@@ -1107,6 +1115,8 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
free_csk:
chtls_sock_release(&csk->kref);
free_dst:
+ if (n)
+ neigh_release(n);
dst_release(dst);
free_sk:
inet_csk_prepare_forced_close(newsk);
@@ -1345,7 +1355,6 @@ static void add_to_reap_list(struct sock *sk)
struct chtls_sock *csk = sk->sk_user_data;
local_bh_disable();
- bh_lock_sock(sk);
release_tcp_port(sk); /* release the port immediately */
spin_lock(&reap_list_lock);
@@ -1354,7 +1363,6 @@ static void add_to_reap_list(struct sock *sk)
if (!csk->passive_reap_next)
schedule_work(&reap_task);
spin_unlock(&reap_list_lock);
- bh_unlock_sock(sk);
local_bh_enable();
}
@@ -1423,6 +1431,11 @@ static int chtls_pass_establish(struct chtls_dev *cdev, struct sk_buff *skb)
sk_wake_async(sk, 0, POLL_OUT);
data = lookup_stid(cdev->tids, stid);
+ if (!data) {
+ /* listening server close */
+ kfree_skb(skb);
+ goto unlock;
+ }
lsk = ((struct listen_ctx *)data)->lsk;
bh_lock_sock(lsk);
@@ -1808,39 +1821,6 @@ static void send_defer_abort_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
kfree_skb(skb);
}
-static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
- struct chtls_dev *cdev, int status, int queue)
-{
- struct cpl_abort_req_rss *req = cplhdr(skb);
- struct sk_buff *reply_skb;
- struct chtls_sock *csk;
-
- csk = rcu_dereference_sk_user_data(sk);
-
- reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl),
- GFP_KERNEL);
-
- if (!reply_skb) {
- req->status = (queue << 1);
- send_defer_abort_rpl(cdev, skb);
- return;
- }
-
- set_abort_rpl_wr(reply_skb, GET_TID(req), status);
- kfree_skb(skb);
-
- set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
- if (csk_conn_inline(csk)) {
- struct l2t_entry *e = csk->l2t_entry;
-
- if (e && sk->sk_state != TCP_SYN_RECV) {
- cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
- return;
- }
- }
- cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
-}
-
/*
* Add an skb to the deferred skb queue for processing from process context.
*/
@@ -1903,9 +1883,9 @@ static void bl_abort_syn_rcv(struct sock *lsk, struct sk_buff *skb)
queue = csk->txq_idx;
skb->sk = NULL;
+ chtls_send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
+ CPL_ABORT_NO_RST, queue);
do_abort_syn_rcv(child, lsk);
- send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
- CPL_ABORT_NO_RST, queue);
}
static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
@@ -1935,8 +1915,8 @@ static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
if (!sock_owned_by_user(psk)) {
int queue = csk->txq_idx;
+ chtls_send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
do_abort_syn_rcv(sk, psk);
- send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
} else {
skb->sk = sk;
BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv;
@@ -1954,9 +1934,6 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
int queue = csk->txq_idx;
if (is_neg_adv(req->status)) {
- if (sk->sk_state == TCP_SYN_RECV)
- chtls_set_tcb_tflag(sk, 0, 0);
-
kfree_skb(skb);
return;
}
@@ -1982,12 +1959,11 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb))
return;
-
- chtls_release_resources(sk);
- chtls_conn_done(sk);
}
chtls_send_abort_rpl(sk, skb, csk->cdev, rst_status, queue);
+ chtls_release_resources(sk);
+ chtls_conn_done(sk);
}
static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb)
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.h b/drivers/crypto/chelsio/chtls/chtls_cm.h
index 4282d8a4eae4..ef7261072434 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.h
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.h
@@ -53,9 +53,6 @@
#define MIN_RCV_WND (24 * 1024U)
#define LOOPBACK(x) (((x) & htonl(0xff000000)) == htonl(0x7f000000))
-/* ulp_mem_io + ulptx_idata + payload + padding */
-#define MAX_IMM_ULPTX_WR_LEN (32 + 8 + 256 + 8)
-
/* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
#define TX_HEADER_LEN \
(sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr))
diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/crypto/chelsio/chtls/chtls_hw.c
index 64d24823c65a..7ea9dcfd7c49 100644
--- a/drivers/crypto/chelsio/chtls/chtls_hw.c
+++ b/drivers/crypto/chelsio/chtls/chtls_hw.c
@@ -368,11 +368,15 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname)
if (ret)
goto out_notcb;
+ if (unlikely(csk_flag(sk, CSK_ABORT_SHUTDOWN)))
+ goto out_notcb;
+
set_wr_txq(skb, CPL_PRIORITY_DATA, csk->tlshws.txqid);
csk->wr_credits -= DIV_ROUND_UP(len, 16);
csk->wr_unacked += DIV_ROUND_UP(len, 16);
enqueue_wr(csk, skb);
cxgb4_ofld_send(csk->egress_dev, skb);
+ skb = NULL;
chtls_set_scmd(csk);
/* Clear quiesce for Rx key */
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
index 1587f4ac6821..f9874da23a29 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -686,7 +686,7 @@ int chtls_push_frames(struct chtls_sock *csk, int comp)
make_tx_data_wr(sk, skb, immdlen, len,
credits_needed, completion);
tp->snd_nxt += len;
- tp->lsndtime = tcp_time_stamp(tp);
+ tp->lsndtime = tcp_jiffies32;
if (completion)
ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_NEED_HDR;
} else {
@@ -914,9 +914,9 @@ static int tls_header_read(struct tls_hdr *thdr, struct iov_iter *from)
return (__force int)cpu_to_be16(thdr->length);
}
-static int csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
+static bool csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
{
- return (cdev->max_host_sndbuf - sk->sk_wmem_queued);
+ return (cdev->max_host_sndbuf - sk->sk_wmem_queued > 0);
}
static int csk_wait_memory(struct chtls_dev *cdev,
@@ -1217,6 +1217,7 @@ int chtls_sendpage(struct sock *sk, struct page *page,
copied = 0;
csk = rcu_dereference_sk_user_data(sk);
cdev = csk->cdev;
+ lock_sock(sk);
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
err = sk_stream_wait_connect(sk, &timeo);
@@ -1449,7 +1450,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
csk->wr_max_credits))
sk->sk_write_space(sk);
- if (copied >= target && !sk->sk_backlog.tail)
+ if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
break;
if (copied) {
@@ -1482,7 +1483,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
break;
}
}
- if (sk->sk_backlog.tail) {
+ if (READ_ONCE(sk->sk_backlog.tail)) {
release_sock(sk);
lock_sock(sk);
chtls_cleanup_rbuf(sk, copied);
@@ -1548,6 +1549,7 @@ skip_copy:
tp->urg_data = 0;
if ((avail + offset) >= skb->len) {
+ struct sk_buff *next_skb;
if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) {
tp->copied_seq += skb->len;
hws->rcvpld = skb->hdr_len;
@@ -1557,8 +1559,10 @@ skip_copy:
chtls_free_skb(sk, skb);
buffers_freed++;
hws->copied_seq = 0;
- if (copied >= target &&
- !skb_peek(&sk->sk_receive_queue))
+ next_skb = skb_peek(&sk->sk_receive_queue);
+ if (copied >= target && !next_skb)
+ break;
+ if (ULP_SKB_CB(next_skb)->flags & ULPCB_FLAG_TLS_HDR)
break;
}
} while (len > 0);
@@ -1627,7 +1631,7 @@ static int peekmsg(struct sock *sk, struct msghdr *msg,
break;
}
- if (sk->sk_backlog.tail) {
+ if (READ_ONCE(sk->sk_backlog.tail)) {
/* Do not sleep, just process backlog. */
release_sock(sk);
lock_sock(sk);
@@ -1759,7 +1763,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
csk->wr_max_credits))
sk->sk_write_space(sk);
- if (copied >= target && !sk->sk_backlog.tail)
+ if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
break;
if (copied) {
@@ -1790,7 +1794,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
}
}
- if (sk->sk_backlog.tail) {
+ if (READ_ONCE(sk->sk_backlog.tail)) {
release_sock(sk);
lock_sock(sk);
chtls_cleanup_rbuf(sk, copied);
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index bf9658800bda..3e3cc28d5cfe 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -175,7 +175,8 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
dma_addr_t *psec_sgl,
struct scatterlist *sgl,
int count,
- struct sec_dev_info *info)
+ struct sec_dev_info *info,
+ gfp_t gfp)
{
struct sec_hw_sgl *sgl_current = NULL;
struct sec_hw_sgl *sgl_next;
@@ -190,7 +191,7 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
sge_index = i % SEC_MAX_SGE_NUM;
if (sge_index == 0) {
sgl_next = dma_pool_zalloc(info->hw_sgl_pool,
- GFP_KERNEL, &sgl_next_dma);
+ gfp, &sgl_next_dma);
if (!sgl_next) {
ret = -ENOMEM;
goto err_free_hw_sgls;
@@ -553,14 +554,14 @@ void sec_alg_callback(struct sec_bd_info *resp, void *shadow)
}
static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes,
- int *steps)
+ int *steps, gfp_t gfp)
{
size_t *sizes;
int i;
/* Split into suitable sized blocks */
*steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT;
- sizes = kcalloc(*steps, sizeof(*sizes), GFP_KERNEL);
+ sizes = kcalloc(*steps, sizeof(*sizes), gfp);
if (!sizes)
return -ENOMEM;
@@ -576,7 +577,7 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
int steps, struct scatterlist ***splits,
int **splits_nents,
int sgl_len_in,
- struct device *dev)
+ struct device *dev, gfp_t gfp)
{
int ret, count;
@@ -584,12 +585,12 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
if (!count)
return -EINVAL;
- *splits = kcalloc(steps, sizeof(struct scatterlist *), GFP_KERNEL);
+ *splits = kcalloc(steps, sizeof(struct scatterlist *), gfp);
if (!*splits) {
ret = -ENOMEM;
goto err_unmap_sg;
}
- *splits_nents = kcalloc(steps, sizeof(int), GFP_KERNEL);
+ *splits_nents = kcalloc(steps, sizeof(int), gfp);
if (!*splits_nents) {
ret = -ENOMEM;
goto err_free_splits;
@@ -597,7 +598,7 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
/* output the scatter list before and after this */
ret = sg_split(sgl, count, 0, steps, split_sizes,
- *splits, *splits_nents, GFP_KERNEL);
+ *splits, *splits_nents, gfp);
if (ret) {
ret = -ENOMEM;
goto err_free_splits_nents;
@@ -638,13 +639,13 @@ static struct sec_request_el
int el_size, bool different_dest,
struct scatterlist *sgl_in, int n_ents_in,
struct scatterlist *sgl_out, int n_ents_out,
- struct sec_dev_info *info)
+ struct sec_dev_info *info, gfp_t gfp)
{
struct sec_request_el *el;
struct sec_bd_info *req;
int ret;
- el = kzalloc(sizeof(*el), GFP_KERNEL);
+ el = kzalloc(sizeof(*el), gfp);
if (!el)
return ERR_PTR(-ENOMEM);
el->el_length = el_size;
@@ -676,7 +677,7 @@ static struct sec_request_el
el->sgl_in = sgl_in;
ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in,
- n_ents_in, info);
+ n_ents_in, info, gfp);
if (ret)
goto err_free_el;
@@ -687,7 +688,7 @@ static struct sec_request_el
el->sgl_out = sgl_out;
ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out,
el->sgl_out,
- n_ents_out, info);
+ n_ents_out, info, gfp);
if (ret)
goto err_free_hw_sgl_in;
@@ -728,6 +729,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
int *splits_out_nents = NULL;
struct sec_request_el *el, *temp;
bool split = skreq->src != skreq->dst;
+ gfp_t gfp = skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
mutex_init(&sec_req->lock);
sec_req->req_base = &skreq->base;
@@ -736,13 +738,13 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
sec_req->len_in = sg_nents(skreq->src);
ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes,
- &steps);
+ &steps, gfp);
if (ret)
return ret;
sec_req->num_elements = steps;
ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in,
&splits_in_nents, sec_req->len_in,
- info->dev);
+ info->dev, gfp);
if (ret)
goto err_free_split_sizes;
@@ -750,7 +752,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
sec_req->len_out = sg_nents(skreq->dst);
ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
&splits_out, &splits_out_nents,
- sec_req->len_out, info->dev);
+ sec_req->len_out, info->dev, gfp);
if (ret)
goto err_unmap_in_sg;
}
@@ -783,7 +785,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
splits_in[i], splits_in_nents[i],
split ? splits_out[i] : NULL,
split ? splits_out_nents[i] : 0,
- info);
+ info, gfp);
if (IS_ERR(el)) {
ret = PTR_ERR(el);
goto err_free_elements;
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 86c699c14f84..bc6c5cb7de23 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1066,7 +1066,7 @@ static int safexcel_probe(struct platform_device *pdev)
priv->ring[i].rdr_req = devm_kcalloc(dev,
EIP197_DEFAULT_RING_SIZE,
- sizeof(priv->ring[i].rdr_req),
+ sizeof(*priv->ring[i].rdr_req),
GFP_KERNEL);
if (!priv->ring[i].rdr_req) {
ret = -ENOMEM;
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 27f7dad2d45d..9b7b8558db31 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -530,7 +530,7 @@ static void release_ixp_crypto(struct device *dev)
if (crypt_virt) {
dma_free_coherent(dev,
- NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
+ NPE_QLEN * sizeof(struct crypt_ctl),
crypt_virt, crypt_phys);
}
}
diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c
index ee0404e27a0f..e4d7ef3bfb61 100644
--- a/drivers/crypto/mediatek/mtk-platform.c
+++ b/drivers/crypto/mediatek/mtk-platform.c
@@ -446,7 +446,7 @@ static void mtk_desc_dma_free(struct mtk_cryp *cryp)
static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
{
struct mtk_ring **ring = cryp->ring;
- int i, err = ENOMEM;
+ int i;
for (i = 0; i < MTK_RING_MAX; i++) {
ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL);
@@ -473,14 +473,14 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
return 0;
err_cleanup:
- for (; i--; ) {
+ do {
dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
ring[i]->res_base, ring[i]->res_dma);
dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
ring[i]->cmd_base, ring[i]->cmd_dma);
kfree(ring[i]);
- }
- return err;
+ } while (i--);
+ return -ENOMEM;
}
static int mtk_crypto_probe(struct platform_device *pdev)
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index b926098f70ff..b0c592073a4a 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -25,6 +25,7 @@
#include <crypto/sha.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
#define DCP_MAX_CHANS 4
#define DCP_BUF_SZ PAGE_SIZE
@@ -36,11 +37,11 @@
* Null hashes to align with hw behavior on imx6sl and ull
* these are flipped for consistency with hw output
*/
-const uint8_t sha1_null_hash[] =
+static const uint8_t sha1_null_hash[] =
"\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
"\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
-const uint8_t sha256_null_hash[] =
+static const uint8_t sha256_null_hash[] =
"\x55\xb8\x52\x78\x1b\x99\x95\xa4"
"\x4c\x93\x9b\x64\xe4\x41\xae\x27"
"\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
@@ -621,49 +622,46 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
- const int nents = sg_nents(req->src);
uint8_t *in_buf = sdcp->coh->sha_in_buf;
uint8_t *out_buf = sdcp->coh->sha_out_buf;
- uint8_t *src_buf;
-
struct scatterlist *src;
- unsigned int i, len, clen;
+ unsigned int i, len, clen, oft = 0;
int ret;
int fin = rctx->fini;
if (fin)
rctx->fini = 0;
- for_each_sg(req->src, src, nents, i) {
- src_buf = sg_virt(src);
- len = sg_dma_len(src);
-
- do {
- if (actx->fill + len > DCP_BUF_SZ)
- clen = DCP_BUF_SZ - actx->fill;
- else
- clen = len;
-
- memcpy(in_buf + actx->fill, src_buf, clen);
- len -= clen;
- src_buf += clen;
- actx->fill += clen;
+ src = req->src;
+ len = req->nbytes;
- /*
- * If we filled the buffer and still have some
- * more data, submit the buffer.
- */
- if (len && actx->fill == DCP_BUF_SZ) {
- ret = mxs_dcp_run_sha(req);
- if (ret)
- return ret;
- actx->fill = 0;
- rctx->init = 0;
- }
- } while (len);
+ while (len) {
+ if (actx->fill + len > DCP_BUF_SZ)
+ clen = DCP_BUF_SZ - actx->fill;
+ else
+ clen = len;
+
+ scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen,
+ 0);
+
+ len -= clen;
+ oft += clen;
+ actx->fill += clen;
+
+ /*
+ * If we filled the buffer and still have some
+ * more data, submit the buffer.
+ */
+ if (len && actx->fill == DCP_BUF_SZ) {
+ ret = mxs_dcp_run_sha(req);
+ if (ret)
+ return ret;
+ actx->fill = 0;
+ rctx->init = 0;
+ }
}
if (fin) {
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 9019f6b67986..a5d6e1a0192b 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -1163,7 +1163,7 @@ static int omap_aes_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(dev, "%s: failed to get_sync(%d)\n",
__func__, err);
- goto err_res;
+ goto err_pm_disable;
}
omap_aes_dma_stop(dd);
@@ -1276,6 +1276,7 @@ err_engine:
omap_aes_dma_cleanup(dd);
err_irq:
tasklet_kill(&dd->done_task);
+err_pm_disable:
pm_runtime_disable(dev);
err_res:
dd = NULL;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 0641185bd82f..4d31ef472436 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -168,8 +168,6 @@ struct omap_sham_hmac_ctx {
};
struct omap_sham_ctx {
- struct omap_sham_dev *dd;
-
unsigned long flags;
/* fallback stuff */
@@ -458,6 +456,9 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
u32 val, mask;
+ if (likely(ctx->digcnt))
+ omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
+
/*
* Setting ALGO_CONST only for the first iteration and
* CLOSE_HASH only for the last one. Note that flags mode bits
@@ -921,27 +922,35 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
return 0;
}
+struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx)
+{
+ struct omap_sham_dev *dd;
+
+ if (ctx->dd)
+ return ctx->dd;
+
+ spin_lock_bh(&sham.lock);
+ dd = list_first_entry(&sham.dev_list, struct omap_sham_dev, list);
+ list_move_tail(&dd->list, &sham.dev_list);
+ ctx->dd = dd;
+ spin_unlock_bh(&sham.lock);
+
+ return dd;
+}
+
static int omap_sham_init(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
- struct omap_sham_dev *dd = NULL, *tmp;
+ struct omap_sham_dev *dd;
int bs = 0;
- spin_lock_bh(&sham.lock);
- if (!tctx->dd) {
- list_for_each_entry(tmp, &sham.dev_list, list) {
- dd = tmp;
- break;
- }
- tctx->dd = dd;
- } else {
- dd = tctx->dd;
- }
- spin_unlock_bh(&sham.lock);
+ ctx->dd = NULL;
- ctx->dd = dd;
+ dd = omap_sham_find_dev(ctx);
+ if (!dd)
+ return -ENODEV;
ctx->flags = 0;
@@ -1191,8 +1200,7 @@ err1:
static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
- struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
- struct omap_sham_dev *dd = tctx->dd;
+ struct omap_sham_dev *dd = ctx->dd;
ctx->op = op;
@@ -1202,7 +1210,7 @@ static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
static int omap_sham_update(struct ahash_request *req)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
- struct omap_sham_dev *dd = ctx->dd;
+ struct omap_sham_dev *dd = omap_sham_find_dev(ctx);
if (!req->nbytes)
return 0;
@@ -1307,21 +1315,8 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
struct omap_sham_hmac_ctx *bctx = tctx->base;
int bs = crypto_shash_blocksize(bctx->shash);
int ds = crypto_shash_digestsize(bctx->shash);
- struct omap_sham_dev *dd = NULL, *tmp;
int err, i;
- spin_lock_bh(&sham.lock);
- if (!tctx->dd) {
- list_for_each_entry(tmp, &sham.dev_list, list) {
- dd = tmp;
- break;
- }
- tctx->dd = dd;
- } else {
- dd = tctx->dd;
- }
- spin_unlock_bh(&sham.lock);
-
err = crypto_shash_setkey(tctx->fallback, key, keylen);
if (err)
return err;
@@ -1339,7 +1334,7 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
memset(bctx->ipad + keylen, 0, bs - keylen);
- if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
+ if (!test_bit(FLAGS_AUTO_XOR, &sham.flags)) {
memcpy(bctx->opad, bctx->ipad, bs);
for (i = 0; i < bs; i++) {
@@ -2142,6 +2137,7 @@ static int omap_sham_probe(struct platform_device *pdev)
}
dd->flags |= dd->pdata->flags;
+ sham.flags |= dd->pdata->flags;
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
@@ -2169,6 +2165,9 @@ static int omap_sham_probe(struct platform_device *pdev)
spin_unlock(&sham.lock);
for (i = 0; i < dd->pdata->algs_info_size; i++) {
+ if (dd->pdata->algs_info[i].registered)
+ break;
+
for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
struct ahash_alg *alg;
@@ -2220,9 +2219,11 @@ static int omap_sham_remove(struct platform_device *pdev)
list_del(&dd->list);
spin_unlock(&sham.lock);
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
- for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
+ for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
crypto_unregister_ahash(
&dd->pdata->algs_info[i].algs_list[j]);
+ dd->pdata->algs_info[i].registered--;
+ }
tasklet_kill(&dd->done_task);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index e2491754c468..1ef47f7208b9 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -1701,11 +1701,6 @@ static int spacc_probe(struct platform_device *pdev)
goto err_clk_put;
}
- ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
- if (ret)
- goto err_clk_disable;
-
-
/*
* Use an IRQ threshold of 50% as a default. This seems to be a
* reasonable trade off of latency against throughput but can be
@@ -1713,6 +1708,10 @@ static int spacc_probe(struct platform_device *pdev)
*/
engine->stat_irq_thresh = (engine->fifo_sz / 2);
+ ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
+ if (ret)
+ goto err_clk_disable;
+
/*
* Configure the interrupts. We only use the STAT_CNT interrupt as we
* only submit a new packet for processing when we complete another in
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
index 613c7d5644ce..e87b7c466bdb 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
@@ -238,12 +238,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_err_free_reg;
- set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
-
ret = adf_dev_init(accel_dev);
if (ret)
goto out_err_dev_shutdown;
+ set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
ret = adf_dev_start(accel_dev);
if (ret)
goto out_err_dev_stop;
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
index 278452b8ef81..a8f3f2ecae70 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
@@ -238,12 +238,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_err_free_reg;
- set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
-
ret = adf_dev_init(accel_dev);
if (ret)
goto out_err_dev_shutdown;
+ set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
ret = adf_dev_start(accel_dev);
if (ret)
goto out_err_dev_stop;
diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
index cd1cdf5305bc..4898ef41fd9f 100644
--- a/drivers/crypto/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_isr.c
@@ -330,19 +330,32 @@ int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
ret = adf_isr_alloc_msix_entry_table(accel_dev);
if (ret)
- return ret;
- if (adf_enable_msix(accel_dev))
goto err_out;
- if (adf_setup_bh(accel_dev))
- goto err_out;
+ ret = adf_enable_msix(accel_dev);
+ if (ret)
+ goto err_free_msix_table;
- if (adf_request_irqs(accel_dev))
- goto err_out;
+ ret = adf_setup_bh(accel_dev);
+ if (ret)
+ goto err_disable_msix;
+
+ ret = adf_request_irqs(accel_dev);
+ if (ret)
+ goto err_cleanup_bh;
return 0;
+
+err_cleanup_bh:
+ adf_cleanup_bh(accel_dev);
+
+err_disable_msix:
+ adf_disable_msix(&accel_dev->accel_pci_dev);
+
+err_free_msix_table:
+ adf_isr_free_msix_entry_table(accel_dev);
+
err_out:
- adf_isr_resource_free(accel_dev);
- return -EFAULT;
+ return ret;
}
EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
index 57d2622728a5..4c0067f8c079 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/qat/qat_common/adf_transport.c
@@ -197,6 +197,7 @@ static int adf_init_ring(struct adf_etr_ring_data *ring)
dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n");
dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
ring->base_addr, ring->dma_addr);
+ ring->base_addr = NULL;
return -EFAULT;
}
diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
index 4a73fc70f7a9..df9a1f35b832 100644
--- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
@@ -304,17 +304,26 @@ int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
goto err_out;
if (adf_setup_pf2vf_bh(accel_dev))
- goto err_out;
+ goto err_disable_msi;
if (adf_setup_bh(accel_dev))
- goto err_out;
+ goto err_cleanup_pf2vf_bh;
if (adf_request_msi_irq(accel_dev))
- goto err_out;
+ goto err_cleanup_bh;
return 0;
+
+err_cleanup_bh:
+ adf_cleanup_bh(accel_dev);
+
+err_cleanup_pf2vf_bh:
+ adf_cleanup_pf2vf_bh(accel_dev);
+
+err_disable_msi:
+ adf_disable_msi(accel_dev);
+
err_out:
- adf_vf_isr_resource_free(accel_dev);
return -EFAULT;
}
EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 1138e41d6805..883342a45be7 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -828,6 +828,11 @@ static int qat_alg_aead_dec(struct aead_request *areq)
struct icp_qat_fw_la_bulk_req *msg;
int digst_size = crypto_aead_authsize(aead_tfm);
int ret, ctr = 0;
+ u32 cipher_len;
+
+ cipher_len = areq->cryptlen - digst_size;
+ if (cipher_len % AES_BLOCK_SIZE != 0)
+ return -EINVAL;
ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
if (unlikely(ret))
@@ -842,7 +847,7 @@ static int qat_alg_aead_dec(struct aead_request *areq)
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
- cipher_param->cipher_length = areq->cryptlen - digst_size;
+ cipher_param->cipher_length = cipher_len;
cipher_param->cipher_offset = areq->assoclen;
memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
@@ -871,6 +876,9 @@ static int qat_alg_aead_enc(struct aead_request *areq)
uint8_t *iv = areq->iv;
int ret, ctr = 0;
+ if (areq->cryptlen % AES_BLOCK_SIZE != 0)
+ return -EINVAL;
+
ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
if (unlikely(ret))
return ret;
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
index ff149e176f64..dac130bb807a 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -1189,7 +1189,7 @@ static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
unsigned short mask;
unsigned short dr_offset = 0x10;
- status = ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
if (CE_INUSE_CONTEXTS & ctx_enables) {
if (ctx & 0x1) {
pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
index 6bd8f6a2a24f..aeb03081415c 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -332,13 +332,18 @@ static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
}
return 0;
out_err:
+ /* Do not free the list head unless we allocated it. */
+ tail_old = tail_old->next;
+ if (flag) {
+ kfree(*init_tab_base);
+ *init_tab_base = NULL;
+ }
+
while (tail_old) {
mem_init = tail_old->next;
kfree(tail_old);
tail_old = mem_init;
}
- if (flag)
- kfree(*init_tab_base);
return -ENOMEM;
}
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
index 3da0f951cb59..1b954abf67fb 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
@@ -238,12 +238,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_err_free_reg;
- set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
-
ret = adf_dev_init(accel_dev);
if (ret)
goto out_err_dev_shutdown;
+ set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
ret = adf_dev_start(accel_dev);
if (ret)
goto out_err_dev_stop;
diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32_crc32.c
index 29d2095d9dfd..47d31335c2d4 100644
--- a/drivers/crypto/stm32/stm32_crc32.c
+++ b/drivers/crypto/stm32/stm32_crc32.c
@@ -28,8 +28,10 @@
/* Registers values */
#define CRC_CR_RESET BIT(0)
-#define CRC_CR_REVERSE (BIT(7) | BIT(6) | BIT(5))
-#define CRC_INIT_DEFAULT 0xFFFFFFFF
+#define CRC_CR_REV_IN_WORD (BIT(6) | BIT(5))
+#define CRC_CR_REV_IN_BYTE BIT(5)
+#define CRC_CR_REV_OUT BIT(7)
+#define CRC32C_INIT_DEFAULT 0xFFFFFFFF
#define CRC_AUTOSUSPEND_DELAY 50
@@ -38,8 +40,6 @@ struct stm32_crc {
struct device *dev;
void __iomem *regs;
struct clk *clk;
- u8 pending_data[sizeof(u32)];
- size_t nb_pending_bytes;
};
struct stm32_crc_list {
@@ -59,14 +59,13 @@ struct stm32_crc_ctx {
struct stm32_crc_desc_ctx {
u32 partial; /* crc32c: partial in first 4 bytes of that struct */
- struct stm32_crc *crc;
};
static int stm32_crc32_cra_init(struct crypto_tfm *tfm)
{
struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
- mctx->key = CRC_INIT_DEFAULT;
+ mctx->key = 0;
mctx->poly = CRC32_POLY_LE;
return 0;
}
@@ -75,7 +74,7 @@ static int stm32_crc32c_cra_init(struct crypto_tfm *tfm)
{
struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
- mctx->key = CRC_INIT_DEFAULT;
+ mctx->key = CRC32C_INIT_DEFAULT;
mctx->poly = CRC32C_POLY_LE;
return 0;
}
@@ -94,32 +93,42 @@ static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key,
return 0;
}
+static struct stm32_crc *stm32_crc_get_next_crc(void)
+{
+ struct stm32_crc *crc;
+
+ spin_lock_bh(&crc_list.lock);
+ crc = list_first_entry(&crc_list.dev_list, struct stm32_crc, list);
+ if (crc)
+ list_move_tail(&crc->list, &crc_list.dev_list);
+ spin_unlock_bh(&crc_list.lock);
+
+ return crc;
+}
+
static int stm32_crc_init(struct shash_desc *desc)
{
struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
struct stm32_crc *crc;
- spin_lock_bh(&crc_list.lock);
- list_for_each_entry(crc, &crc_list.dev_list, list) {
- ctx->crc = crc;
- break;
- }
- spin_unlock_bh(&crc_list.lock);
+ crc = stm32_crc_get_next_crc();
+ if (!crc)
+ return -ENODEV;
- pm_runtime_get_sync(ctx->crc->dev);
+ pm_runtime_get_sync(crc->dev);
/* Reset, set key, poly and configure in bit reverse mode */
- writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT);
- writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL);
- writel_relaxed(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR);
+ writel_relaxed(bitrev32(mctx->key), crc->regs + CRC_INIT);
+ writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL);
+ writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
+ crc->regs + CRC_CR);
/* Store partial result */
- ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR);
- ctx->crc->nb_pending_bytes = 0;
+ ctx->partial = readl_relaxed(crc->regs + CRC_DR);
- pm_runtime_mark_last_busy(ctx->crc->dev);
- pm_runtime_put_autosuspend(ctx->crc->dev);
+ pm_runtime_mark_last_busy(crc->dev);
+ pm_runtime_put_autosuspend(crc->dev);
return 0;
}
@@ -128,31 +137,49 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
unsigned int length)
{
struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
- struct stm32_crc *crc = ctx->crc;
- u32 *d32;
- unsigned int i;
+ struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
+ struct stm32_crc *crc;
+
+ crc = stm32_crc_get_next_crc();
+ if (!crc)
+ return -ENODEV;
pm_runtime_get_sync(crc->dev);
- if (unlikely(crc->nb_pending_bytes)) {
- while (crc->nb_pending_bytes != sizeof(u32) && length) {
- /* Fill in pending data */
- crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
+ /*
+ * Restore previously calculated CRC for this context as init value
+ * Restore polynomial configuration
+ * Configure in register for word input data,
+ * Configure out register in reversed bit mode data.
+ */
+ writel_relaxed(bitrev32(ctx->partial), crc->regs + CRC_INIT);
+ writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL);
+ writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
+ crc->regs + CRC_CR);
+
+ if (d8 != PTR_ALIGN(d8, sizeof(u32))) {
+ /* Configure for byte data */
+ writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT,
+ crc->regs + CRC_CR);
+ while (d8 != PTR_ALIGN(d8, sizeof(u32)) && length) {
+ writeb_relaxed(*d8++, crc->regs + CRC_DR);
length--;
}
-
- if (crc->nb_pending_bytes == sizeof(u32)) {
- /* Process completed pending data */
- writel_relaxed(*(u32 *)crc->pending_data,
- crc->regs + CRC_DR);
- crc->nb_pending_bytes = 0;
- }
+ /* Configure for word data */
+ writel_relaxed(CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
+ crc->regs + CRC_CR);
}
- d32 = (u32 *)d8;
- for (i = 0; i < length >> 2; i++)
- /* Process 32 bits data */
- writel_relaxed(*(d32++), crc->regs + CRC_DR);
+ for (; length >= sizeof(u32); d8 += sizeof(u32), length -= sizeof(u32))
+ writel_relaxed(*((u32 *)d8), crc->regs + CRC_DR);
+
+ if (length) {
+ /* Configure for byte data */
+ writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT,
+ crc->regs + CRC_CR);
+ while (length--)
+ writeb_relaxed(*d8++, crc->regs + CRC_DR);
+ }
/* Store partial result */
ctx->partial = readl_relaxed(crc->regs + CRC_DR);
@@ -160,22 +187,6 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
pm_runtime_mark_last_busy(crc->dev);
pm_runtime_put_autosuspend(crc->dev);
- /* Check for pending data (non 32 bits) */
- length &= 3;
- if (likely(!length))
- return 0;
-
- if ((crc->nb_pending_bytes + length) >= sizeof(u32)) {
- /* Shall not happen */
- dev_err(crc->dev, "Pending data overflow\n");
- return -EINVAL;
- }
-
- d8 = (const u8 *)d32;
- for (i = 0; i < length; i++)
- /* Store pending data */
- crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
-
return 0;
}
@@ -204,6 +215,8 @@ static int stm32_crc_digest(struct shash_desc *desc, const u8 *data,
return stm32_crc_init(desc) ?: stm32_crc_finup(desc, data, length, out);
}
+static unsigned int refcnt;
+static DEFINE_MUTEX(refcnt_lock);
static struct shash_alg algs[] = {
/* CRC-32 */
{
@@ -296,12 +309,18 @@ static int stm32_crc_probe(struct platform_device *pdev)
list_add(&crc->list, &crc_list.dev_list);
spin_unlock(&crc_list.lock);
- ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
- if (ret) {
- dev_err(dev, "Failed to register\n");
- clk_disable_unprepare(crc->clk);
- return ret;
+ mutex_lock(&refcnt_lock);
+ if (!refcnt) {
+ ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
+ if (ret) {
+ mutex_unlock(&refcnt_lock);
+ dev_err(dev, "Failed to register\n");
+ clk_disable_unprepare(crc->clk);
+ return ret;
+ }
}
+ refcnt++;
+ mutex_unlock(&refcnt_lock);
dev_info(dev, "Initialized\n");
@@ -322,7 +341,10 @@ static int stm32_crc_remove(struct platform_device *pdev)
list_del(&crc->list);
spin_unlock(&crc_list.lock);
- crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+ mutex_lock(&refcnt_lock);
+ if (!--refcnt)
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+ mutex_unlock(&refcnt_lock);
pm_runtime_disable(crc->dev);
pm_runtime_put_noidle(crc->dev);
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
index 22e491857925..aa3d2f439965 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
@@ -34,6 +34,8 @@ static int sun4i_ss_opti_poll(struct skcipher_request *areq)
unsigned int ileft = areq->cryptlen;
unsigned int oleft = areq->cryptlen;
unsigned int todo;
+ unsigned long pi = 0, po = 0; /* progress for in and out */
+ bool miter_err;
struct sg_mapping_iter mi, mo;
unsigned int oi, oo; /* offset for in and out */
unsigned long flags;
@@ -53,50 +55,62 @@ static int sun4i_ss_opti_poll(struct skcipher_request *areq)
spin_lock_irqsave(&ss->slock, flags);
- for (i = 0; i < op->keylen; i += 4)
- writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
+ for (i = 0; i < op->keylen / 4; i++)
+ writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
if (areq->iv) {
for (i = 0; i < 4 && i < ivsize / 4; i++) {
v = *(u32 *)(areq->iv + i * 4);
- writel(v, ss->base + SS_IV0 + i * 4);
+ writesl(ss->base + SS_IV0 + i * 4, &v, 1);
}
}
writel(mode, ss->base + SS_CTL);
- sg_miter_start(&mi, areq->src, sg_nents(areq->src),
- SG_MITER_FROM_SG | SG_MITER_ATOMIC);
- sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
- SG_MITER_TO_SG | SG_MITER_ATOMIC);
- sg_miter_next(&mi);
- sg_miter_next(&mo);
- if (!mi.addr || !mo.addr) {
- dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
- err = -EINVAL;
- goto release_ss;
- }
ileft = areq->cryptlen / 4;
oleft = areq->cryptlen / 4;
oi = 0;
oo = 0;
do {
- todo = min(rx_cnt, ileft);
- todo = min_t(size_t, todo, (mi.length - oi) / 4);
- if (todo) {
- ileft -= todo;
- writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
- oi += todo * 4;
- }
- if (oi == mi.length) {
- sg_miter_next(&mi);
- oi = 0;
+ if (ileft) {
+ sg_miter_start(&mi, areq->src, sg_nents(areq->src),
+ SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+ if (pi)
+ sg_miter_skip(&mi, pi);
+ miter_err = sg_miter_next(&mi);
+ if (!miter_err || !mi.addr) {
+ dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
+ err = -EINVAL;
+ goto release_ss;
+ }
+ todo = min(rx_cnt, ileft);
+ todo = min_t(size_t, todo, (mi.length - oi) / 4);
+ if (todo) {
+ ileft -= todo;
+ writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
+ oi += todo * 4;
+ }
+ if (oi == mi.length) {
+ pi += mi.length;
+ oi = 0;
+ }
+ sg_miter_stop(&mi);
}
spaces = readl(ss->base + SS_FCSR);
rx_cnt = SS_RXFIFO_SPACES(spaces);
tx_cnt = SS_TXFIFO_SPACES(spaces);
+ sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
+ SG_MITER_TO_SG | SG_MITER_ATOMIC);
+ if (po)
+ sg_miter_skip(&mo, po);
+ miter_err = sg_miter_next(&mo);
+ if (!miter_err || !mo.addr) {
+ dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
+ err = -EINVAL;
+ goto release_ss;
+ }
todo = min(tx_cnt, oleft);
todo = min_t(size_t, todo, (mo.length - oo) / 4);
if (todo) {
@@ -105,9 +119,10 @@ static int sun4i_ss_opti_poll(struct skcipher_request *areq)
oo += todo * 4;
}
if (oo == mo.length) {
- sg_miter_next(&mo);
oo = 0;
+ po += mo.length;
}
+ sg_miter_stop(&mo);
} while (oleft);
if (areq->iv) {
@@ -118,8 +133,6 @@ static int sun4i_ss_opti_poll(struct skcipher_request *areq)
}
release_ss:
- sg_miter_stop(&mi);
- sg_miter_stop(&mo);
writel(0, ss->base + SS_CTL);
spin_unlock_irqrestore(&ss->slock, flags);
return err;
@@ -148,6 +161,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
unsigned int oleft = areq->cryptlen;
unsigned int todo;
struct sg_mapping_iter mi, mo;
+ unsigned long pi = 0, po = 0; /* progress for in and out */
+ bool miter_err;
unsigned int oi, oo; /* offset for in and out */
char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
@@ -174,12 +189,12 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
* we can use the SS optimized function
*/
while (in_sg && no_chunk == 1) {
- if (in_sg->length % 4)
+ if ((in_sg->length | in_sg->offset) & 3u)
no_chunk = 0;
in_sg = sg_next(in_sg);
}
while (out_sg && no_chunk == 1) {
- if (out_sg->length % 4)
+ if ((out_sg->length | out_sg->offset) & 3u)
no_chunk = 0;
out_sg = sg_next(out_sg);
}
@@ -189,28 +204,17 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
spin_lock_irqsave(&ss->slock, flags);
- for (i = 0; i < op->keylen; i += 4)
- writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
+ for (i = 0; i < op->keylen / 4; i++)
+ writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
if (areq->iv) {
for (i = 0; i < 4 && i < ivsize / 4; i++) {
v = *(u32 *)(areq->iv + i * 4);
- writel(v, ss->base + SS_IV0 + i * 4);
+ writesl(ss->base + SS_IV0 + i * 4, &v, 1);
}
}
writel(mode, ss->base + SS_CTL);
- sg_miter_start(&mi, areq->src, sg_nents(areq->src),
- SG_MITER_FROM_SG | SG_MITER_ATOMIC);
- sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
- SG_MITER_TO_SG | SG_MITER_ATOMIC);
- sg_miter_next(&mi);
- sg_miter_next(&mo);
- if (!mi.addr || !mo.addr) {
- dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
- err = -EINVAL;
- goto release_ss;
- }
ileft = areq->cryptlen;
oleft = areq->cryptlen;
oi = 0;
@@ -218,6 +222,16 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
while (oleft) {
if (ileft) {
+ sg_miter_start(&mi, areq->src, sg_nents(areq->src),
+ SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+ if (pi)
+ sg_miter_skip(&mi, pi);
+ miter_err = sg_miter_next(&mi);
+ if (!miter_err || !mi.addr) {
+ dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
+ err = -EINVAL;
+ goto release_ss;
+ }
/*
* todo is the number of consecutive 4byte word that we
* can read from current SG
@@ -250,31 +264,38 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
}
}
if (oi == mi.length) {
- sg_miter_next(&mi);
+ pi += mi.length;
oi = 0;
}
+ sg_miter_stop(&mi);
}
spaces = readl(ss->base + SS_FCSR);
rx_cnt = SS_RXFIFO_SPACES(spaces);
tx_cnt = SS_TXFIFO_SPACES(spaces);
- dev_dbg(ss->dev,
- "%x %u/%zu %u/%u cnt=%u %u/%zu %u/%u cnt=%u %u\n",
- mode,
- oi, mi.length, ileft, areq->cryptlen, rx_cnt,
- oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob);
if (!tx_cnt)
continue;
+ sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
+ SG_MITER_TO_SG | SG_MITER_ATOMIC);
+ if (po)
+ sg_miter_skip(&mo, po);
+ miter_err = sg_miter_next(&mo);
+ if (!miter_err || !mo.addr) {
+ dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
+ err = -EINVAL;
+ goto release_ss;
+ }
/* todo in 4bytes word */
todo = min(tx_cnt, oleft / 4);
todo = min_t(size_t, todo, (mo.length - oo) / 4);
+
if (todo) {
readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
oleft -= todo * 4;
oo += todo * 4;
if (oo == mo.length) {
- sg_miter_next(&mo);
+ po += mo.length;
oo = 0;
}
} else {
@@ -299,12 +320,14 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
obo += todo;
oo += todo;
if (oo == mo.length) {
+ po += mo.length;
sg_miter_next(&mo);
oo = 0;
}
} while (obo < obl);
/* bufo must be fully used here */
}
+ sg_miter_stop(&mo);
}
if (areq->iv) {
for (i = 0; i < 4 && i < ivsize / 4; i++) {
@@ -314,8 +337,6 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
}
release_ss:
- sg_miter_stop(&mi);
- sg_miter_stop(&mo);
writel(0, ss->base + SS_CTL);
spin_unlock_irqrestore(&ss->slock, flags);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index db5f939f5aa3..7a55baa861e5 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -474,7 +474,7 @@ DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
/*
* locate current (offending) descriptor
*/
-static u32 current_desc_hdr(struct device *dev, int ch)
+static __be32 current_desc_hdr(struct device *dev, int ch)
{
struct talitos_private *priv = dev_get_drvdata(dev);
int tail, iter;
@@ -492,7 +492,7 @@ static u32 current_desc_hdr(struct device *dev, int ch)
iter = tail;
while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
- priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
+ priv->chan[ch].fifo[iter].desc->next_desc != cpu_to_be32(cur_desc)) {
iter = (iter + 1) & (priv->fifo_len - 1);
if (iter == tail) {
dev_err(dev, "couldn't locate current descriptor\n");
@@ -500,7 +500,7 @@ static u32 current_desc_hdr(struct device *dev, int ch)
}
}
- if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) {
+ if (priv->chan[ch].fifo[iter].desc->next_desc == cpu_to_be32(cur_desc)) {
struct talitos_edesc *edesc;
edesc = container_of(priv->chan[ch].fifo[iter].desc,
@@ -515,13 +515,13 @@ static u32 current_desc_hdr(struct device *dev, int ch)
/*
* user diagnostics; report root cause of error based on execution unit status
*/
-static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
+static void report_eu_error(struct device *dev, int ch, __be32 desc_hdr)
{
struct talitos_private *priv = dev_get_drvdata(dev);
int i;
if (!desc_hdr)
- desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
+ desc_hdr = cpu_to_be32(in_be32(priv->chan[ch].reg + TALITOS_DESCBUF));
switch (desc_hdr & DESC_HDR_SEL0_MASK) {
case DESC_HDR_SEL0_AFEU:
@@ -1066,11 +1066,12 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
*/
static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
unsigned int offset, int datalen, int elen,
- struct talitos_ptr *link_tbl_ptr)
+ struct talitos_ptr *link_tbl_ptr, int align)
{
int n_sg = elen ? sg_count + 1 : sg_count;
int count = 0;
int cryptlen = datalen + elen;
+ int padding = ALIGN(cryptlen, align) - cryptlen;
while (cryptlen && sg && n_sg--) {
unsigned int len = sg_dma_len(sg);
@@ -1094,7 +1095,7 @@ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
offset += datalen;
}
to_talitos_ptr(link_tbl_ptr + count,
- sg_dma_address(sg) + offset, len, 0);
+ sg_dma_address(sg) + offset, sg_next(sg) ? len : len + padding, 0);
to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
count++;
cryptlen -= len;
@@ -1117,10 +1118,11 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
unsigned int len, struct talitos_edesc *edesc,
struct talitos_ptr *ptr, int sg_count,
unsigned int offset, int tbl_off, int elen,
- bool force)
+ bool force, int align)
{
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
+ int aligned_len = ALIGN(len, align);
if (!src) {
to_talitos_ptr(ptr, 0, 0, is_sec1);
@@ -1128,22 +1130,22 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
}
to_talitos_ptr_ext_set(ptr, elen, is_sec1);
if (sg_count == 1 && !force) {
- to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
+ to_talitos_ptr(ptr, sg_dma_address(src) + offset, aligned_len, is_sec1);
return sg_count;
}
if (is_sec1) {
- to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
+ to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, aligned_len, is_sec1);
return sg_count;
}
sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
- &edesc->link_tbl[tbl_off]);
+ &edesc->link_tbl[tbl_off], align);
if (sg_count == 1 && !force) {
/* Only one segment now, so no link tbl needed*/
copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
return sg_count;
}
to_talitos_ptr(ptr, edesc->dma_link_tbl +
- tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
+ tbl_off * sizeof(struct talitos_ptr), aligned_len, is_sec1);
to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
return sg_count;
@@ -1155,7 +1157,7 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
unsigned int offset, int tbl_off)
{
return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
- tbl_off, 0, false);
+ tbl_off, 0, false, 1);
}
/*
@@ -1224,7 +1226,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
sg_count, areq->assoclen, tbl_off, elen,
- false);
+ false, 1);
if (ret > 1) {
tbl_off += ret;
@@ -1244,7 +1246,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
elen = 0;
ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
sg_count, areq->assoclen, tbl_off, elen,
- is_ipsec_esp && !encrypt);
+ is_ipsec_esp && !encrypt, 1);
tbl_off += ret;
/* ICV data */
@@ -1554,6 +1556,8 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
bool sync_needed = false;
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
+ bool is_ctr = (desc->hdr & DESC_HDR_SEL0_MASK) == DESC_HDR_SEL0_AESU &&
+ (desc->hdr & DESC_HDR_MODE0_AESU_MASK) == DESC_HDR_MODE0_AESU_CTR;
/* first DWORD empty */
@@ -1574,8 +1578,8 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
/*
* cipher in
*/
- sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
- &desc->ptr[3], sg_count, 0, 0);
+ sg_count = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[3],
+ sg_count, 0, 0, 0, false, is_ctr ? 16 : 1);
if (sg_count > 1)
sync_needed = true;
@@ -2670,7 +2674,6 @@ static struct talitos_alg_template driver_algs[] = {
.cra_ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2704,6 +2707,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
.setkey = ablkcipher_aes_setkey,
}
},
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
index cb0137e131cc..16f96c57de34 100644
--- a/drivers/crypto/talitos.h
+++ b/drivers/crypto/talitos.h
@@ -377,6 +377,7 @@ static inline bool has_ftr_sec1(struct talitos_private *priv)
/* primary execution unit mode (MODE0) and derivatives */
#define DESC_HDR_MODE0_ENCRYPT cpu_to_be32(0x00100000)
+#define DESC_HDR_MODE0_AESU_MASK cpu_to_be32(0x00600000)
#define DESC_HDR_MODE0_AESU_CBC cpu_to_be32(0x00200000)
#define DESC_HDR_MODE0_AESU_CTR cpu_to_be32(0x00600000)
#define DESC_HDR_MODE0_DEU_CBC cpu_to_be32(0x00400000)
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index 38432721069f..ab4700e4b409 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -367,13 +367,18 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
int err;
unsigned long flags;
struct scatterlist outhdr, iv_sg, status_sg, **sgs;
- int i;
u64 dst_len;
unsigned int num_out = 0, num_in = 0;
int sg_total;
uint8_t *iv;
+ struct scatterlist *sg;
src_nents = sg_nents_for_len(req->src, req->nbytes);
+ if (src_nents < 0) {
+ pr_err("Invalid number of src SG.\n");
+ return src_nents;
+ }
+
dst_nents = sg_nents(req->dst);
pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
@@ -419,6 +424,7 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
goto free;
}
+ dst_len = min_t(unsigned int, req->nbytes, dst_len);
pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
req->nbytes, dst_len);
@@ -459,12 +465,12 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
vc_sym_req->iv = iv;
/* Source data */
- for (i = 0; i < src_nents; i++)
- sgs[num_out++] = &req->src[i];
+ for (sg = req->src; src_nents; sg = sg_next(sg), src_nents--)
+ sgs[num_out++] = sg;
/* Destination data */
- for (i = 0; i < dst_nents; i++)
- sgs[num_out + num_in++] = &req->dst[i];
+ for (sg = req->dst; sg; sg = sg_next(sg))
+ sgs[num_out + num_in++] = sg;
/* Status */
sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
@@ -594,10 +600,11 @@ static void virtio_crypto_ablkcipher_finalize_req(
scatterwalk_map_and_copy(req->info, req->dst,
req->nbytes - AES_BLOCK_SIZE,
AES_BLOCK_SIZE, 0);
- crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine,
- req, err);
kzfree(vc_sym_req->iv);
virtcrypto_clear_request(&vc_sym_req->base);
+
+ crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine,
+ req, err);
}
static struct virtio_crypto_algo virtio_crypto_algs[] = { {
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
index 06768074d2d8..479d9575e124 100644
--- a/drivers/devfreq/tegra-devfreq.c
+++ b/drivers/devfreq/tegra-devfreq.c
@@ -80,6 +80,8 @@
#define KHZ 1000
+#define KHZ_MAX (ULONG_MAX / KHZ)
+
/* Assume that the bus is saturated if the utilization is 25% */
#define BUS_SATURATION_RATIO 25
@@ -180,7 +182,7 @@ struct tegra_actmon_emc_ratio {
};
static struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
- { 1400000, ULONG_MAX },
+ { 1400000, KHZ_MAX },
{ 1200000, 750000 },
{ 1100000, 600000 },
{ 1000000, 500000 },
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 1551ca7df394..8586cc05def1 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -244,6 +244,30 @@ void dma_fence_free(struct dma_fence *fence)
}
EXPORT_SYMBOL(dma_fence_free);
+static bool __dma_fence_enable_signaling(struct dma_fence *fence)
+{
+ bool was_set;
+
+ lockdep_assert_held(fence->lock);
+
+ was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &fence->flags);
+
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return false;
+
+ if (!was_set && fence->ops->enable_signaling) {
+ trace_dma_fence_enable_signal(fence);
+
+ if (!fence->ops->enable_signaling(fence)) {
+ dma_fence_signal_locked(fence);
+ return false;
+ }
+ }
+
+ return true;
+}
+
/**
* dma_fence_enable_sw_signaling - enable signaling on fence
* @fence: the fence to enable
@@ -256,19 +280,12 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence)
{
unsigned long flags;
- if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &fence->flags) &&
- !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
- fence->ops->enable_signaling) {
- trace_dma_fence_enable_signal(fence);
-
- spin_lock_irqsave(fence->lock, flags);
-
- if (!fence->ops->enable_signaling(fence))
- dma_fence_signal_locked(fence);
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return;
- spin_unlock_irqrestore(fence->lock, flags);
- }
+ spin_lock_irqsave(fence->lock, flags);
+ __dma_fence_enable_signaling(fence);
+ spin_unlock_irqrestore(fence->lock, flags);
}
EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
@@ -302,7 +319,6 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
{
unsigned long flags;
int ret = 0;
- bool was_set;
if (WARN_ON(!fence || !func))
return -EINVAL;
@@ -314,25 +330,14 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
spin_lock_irqsave(fence->lock, flags);
- was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &fence->flags);
-
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- ret = -ENOENT;
- else if (!was_set && fence->ops->enable_signaling) {
- trace_dma_fence_enable_signal(fence);
-
- if (!fence->ops->enable_signaling(fence)) {
- dma_fence_signal_locked(fence);
- ret = -ENOENT;
- }
- }
-
- if (!ret) {
+ if (__dma_fence_enable_signaling(fence)) {
cb->func = func;
list_add_tail(&cb->node, &fence->cb_list);
- } else
+ } else {
INIT_LIST_HEAD(&cb->node);
+ ret = -ENOENT;
+ }
+
spin_unlock_irqrestore(fence->lock, flags);
return ret;
@@ -432,7 +437,6 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
struct default_wait_cb cb;
unsigned long flags;
signed long ret = timeout ? timeout : 1;
- bool was_set;
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return ret;
@@ -444,21 +448,9 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
goto out;
}
- was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &fence->flags);
-
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (!__dma_fence_enable_signaling(fence))
goto out;
- if (!was_set && fence->ops->enable_signaling) {
- trace_dma_fence_enable_signal(fence);
-
- if (!fence->ops->enable_signaling(fence)) {
- dma_fence_signal_locked(fence);
- goto out;
- }
- }
-
if (!timeout) {
ret = 0;
goto out;
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index 4a748c3435d7..8d99c84361cb 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -131,11 +131,13 @@ static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma)
if (ret < 0) {
dev_warn(&adev->dev,
"error in parsing resource group\n");
- return;
+ break;
}
grp = (struct acpi_csrt_group *)((void *)grp + grp->length);
}
+
+ acpi_put_table((struct acpi_table_header *)csrt);
}
/**
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index dbc51154f122..c52718b37f8f 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1677,13 +1677,17 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
return NULL;
dmac_pdev = of_find_device_by_node(dma_spec->np);
+ if (!dmac_pdev)
+ return NULL;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- atslave = kzalloc(sizeof(*atslave), GFP_KERNEL);
- if (!atslave)
+ atslave = kmalloc(sizeof(*atslave), GFP_KERNEL);
+ if (!atslave) {
+ put_device(&dmac_pdev->dev);
return NULL;
+ }
atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
/*
@@ -1712,8 +1716,11 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
atslave->dma_dev = &dmac_pdev->dev;
chan = dma_request_channel(mask, at_dma_filter, atslave);
- if (!chan)
+ if (!chan) {
+ put_device(&dmac_pdev->dev);
+ kfree(atslave);
return NULL;
+ }
atchan = to_at_dma_chan(chan);
atchan->per_if = dma_spec->args[0] & 0xff;
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index edff93aacad3..0d0850b35e45 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -574,11 +574,11 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
enum dma_status status;
unsigned long flags;
+ spin_lock_irqsave(&jzchan->vchan.lock, flags);
+
status = dma_cookie_status(chan, cookie, txstate);
if ((status == DMA_COMPLETE) || (txstate == NULL))
- return status;
-
- spin_lock_irqsave(&jzchan->vchan.lock, flags);
+ goto out_unlock_irqrestore;
vdesc = vchan_find_desc(&jzchan->vchan, cookie);
if (vdesc) {
@@ -595,6 +595,7 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
&& jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
status = DMA_ERROR;
+out_unlock_irqrestore:
spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
return status;
}
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 7b7fba0c9253..e38a653dd208 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -567,8 +567,8 @@ static int dmatest_func(void *data)
flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
ktime = ktime_get();
- while (!kthread_should_stop()
- && !(params->iterations && total_tests >= params->iterations)) {
+ while (!(kthread_should_stop() ||
+ (params->iterations && total_tests >= params->iterations))) {
struct dma_async_tx_descriptor *tx = NULL;
struct dmaengine_unmap_data *um;
dma_addr_t *dsts;
diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
index 04b9728c1d26..070860ec0ef1 100644
--- a/drivers/dma/dw/Kconfig
+++ b/drivers/dma/dw/Kconfig
@@ -8,6 +8,7 @@ config DW_DMAC_CORE
config DW_DMAC
tristate "Synopsys DesignWare AHB DMA platform driver"
+ depends on HAS_IOMEM
select DW_DMAC_CORE
help
Support the Synopsys DesignWare AHB DMA controller. This
@@ -16,6 +17,7 @@ config DW_DMAC
config DW_DMAC_PCI
tristate "Synopsys DesignWare AHB DMA PCI driver"
depends on PCI
+ depends on HAS_IOMEM
select DW_DMAC_CORE
help
Support the Synopsys DesignWare AHB DMA controller on the
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index c7568869284e..0d2c6e13a01f 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -682,6 +682,13 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
fsl_chan = &fsl_edma->chans[ch];
spin_lock(&fsl_chan->vchan.lock);
+
+ if (!fsl_chan->edesc) {
+ /* terminate_all called before */
+ spin_unlock(&fsl_chan->vchan.lock);
+ continue;
+ }
+
if (!fsl_chan->edesc->iscyclic) {
list_del(&fsl_chan->edesc->vdesc.node);
vchan_cookie_complete(&fsl_chan->edesc->vdesc);
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 1117b5123a6f..e7ca3175dbc3 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1218,6 +1218,7 @@ static int fsldma_of_probe(struct platform_device *op)
{
struct fsldma_device *fdev;
struct device_node *child;
+ unsigned int i;
int err;
fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
@@ -1296,6 +1297,10 @@ static int fsldma_of_probe(struct platform_device *op)
return 0;
out_free_fdev:
+ for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
+ if (fdev->chan[i])
+ fsl_dma_chan_remove(fdev->chan[i]);
+ }
irq_dispose_mapping(fdev->irq);
iounmap(fdev->regs);
out_free:
@@ -1318,6 +1323,7 @@ static int fsldma_of_remove(struct platform_device *op)
if (fdev->chan[i])
fsl_dma_chan_remove(fdev->chan[i]);
}
+ irq_dispose_mapping(fdev->irq);
iounmap(fdev->regs);
kfree(fdev);
diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c
index ad45cd344bba..78836526d2e0 100644
--- a/drivers/dma/hsu/pci.c
+++ b/drivers/dma/hsu/pci.c
@@ -29,22 +29,12 @@
static irqreturn_t hsu_pci_irq(int irq, void *dev)
{
struct hsu_dma_chip *chip = dev;
- struct pci_dev *pdev = to_pci_dev(chip->dev);
u32 dmaisr;
u32 status;
unsigned short i;
int ret = 0;
int err;
- /*
- * On Intel Tangier B0 and Anniedale the interrupt line, disregarding
- * to have different numbers, is shared between HSU DMA and UART IPs.
- * Thus on such SoCs we are expecting that IRQ handler is called in
- * UART driver only.
- */
- if (pdev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA)
- return IRQ_HANDLED;
-
dmaisr = readl(chip->regs + HSU_PCI_DMAISR);
for (i = 0; i < chip->hsu->nr_channels; i++) {
if (dmaisr & 0x1) {
@@ -108,6 +98,17 @@ static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto err_register_irq;
+ /*
+ * On Intel Tangier B0 and Anniedale the interrupt line, disregarding
+ * to have different numbers, is shared between HSU DMA and UART IPs.
+ * Thus on such SoCs we are expecting that IRQ handler is called in
+ * UART driver only. Instead of handling the spurious interrupt
+ * from HSU DMA here and waste CPU time and delay HSU UART interrupt
+ * handling, disable the interrupt entirely.
+ */
+ if (pdev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA)
+ disable_irq_nosync(chip->irq);
+
pci_set_drvdata(pdev, chip);
return 0;
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index b94cece58b98..890cadf3ec5d 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -38,6 +38,18 @@
#include "../dmaengine.h"
+int completion_timeout = 200;
+module_param(completion_timeout, int, 0644);
+MODULE_PARM_DESC(completion_timeout,
+ "set ioat completion timeout [msec] (default 200 [msec])");
+int idle_timeout = 2000;
+module_param(idle_timeout, int, 0644);
+MODULE_PARM_DESC(idle_timeout,
+ "set ioat idel timeout [msec] (default 2000 [msec])");
+
+#define IDLE_TIMEOUT msecs_to_jiffies(idle_timeout)
+#define COMPLETION_TIMEOUT msecs_to_jiffies(completion_timeout)
+
static char *chanerr_str[] = {
"DMA Transfer Source Address Error",
"DMA Transfer Destination Address Error",
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 1ab42ec2b7ff..b061db201332 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -111,8 +111,6 @@ struct ioatdma_chan {
#define IOAT_RUN 5
#define IOAT_CHAN_ACTIVE 6
struct timer_list timer;
- #define COMPLETION_TIMEOUT msecs_to_jiffies(100)
- #define IDLE_TIMEOUT msecs_to_jiffies(2000)
#define RESET_DELAY msecs_to_jiffies(100)
struct ioatdma_device *ioat_dma;
dma_addr_t completion_dma;
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index b7ec56ae02a6..1d44b02831a0 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -997,7 +997,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
if (err) {
dev_err(&pdev->dev,
"request_irq failed with err %d\n", err);
- goto err_unregister;
+ goto err_free;
}
platform_set_drvdata(pdev, hsdma);
@@ -1006,6 +1006,9 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
return 0;
+err_free:
+ mtk_hsdma_hw_deinit(hsdma);
+ of_dma_controller_free(pdev->dev.of_node);
err_unregister:
dma_async_device_unregister(dd);
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 13c68b6434ce..15b4a44e6006 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -362,6 +362,8 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
size);
tdmac->desc_arr = NULL;
+ if (tdmac->status == DMA_ERROR)
+ tdmac->status = DMA_COMPLETE;
return;
}
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index 8dc0aa4d73ab..462adf7e4e95 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -777,8 +777,10 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
goto disable_clk;
msi_desc = first_msi_entry(&pdev->dev);
- if (!msi_desc)
+ if (!msi_desc) {
+ ret = -ENODEV;
goto free_msi_irqs;
+ }
xor_dev->msi_desc = msi_desc;
ret = devm_request_irq(&pdev->dev, msi_desc->irq,
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index 91fd395c90c4..8344a60c2131 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -72,12 +72,12 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
return NULL;
chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target);
- if (chan) {
- chan->router = ofdma->dma_router;
- chan->route_data = route_data;
- } else {
+ if (IS_ERR_OR_NULL(chan)) {
ofdma->dma_router->route_free(ofdma->dma_router->dev,
route_data);
+ } else {
+ chan->router = ofdma->dma_router;
+ chan->route_data = route_data;
}
/*
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index 7812a6338acd..da5050ab7f38 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -172,13 +172,11 @@ struct owl_dma_txd {
* @id: physical index to this channel
* @base: virtual memory base for the dma channel
* @vchan: the virtual channel currently being served by this physical channel
- * @lock: a lock to use when altering an instance of this struct
*/
struct owl_dma_pchan {
u32 id;
void __iomem *base;
struct owl_dma_vchan *vchan;
- spinlock_t lock;
};
/**
@@ -396,14 +394,14 @@ static struct owl_dma_pchan *owl_dma_get_pchan(struct owl_dma *od,
for (i = 0; i < od->nr_pchans; i++) {
pchan = &od->pchans[i];
- spin_lock_irqsave(&pchan->lock, flags);
+ spin_lock_irqsave(&od->lock, flags);
if (!pchan->vchan) {
pchan->vchan = vchan;
- spin_unlock_irqrestore(&pchan->lock, flags);
+ spin_unlock_irqrestore(&od->lock, flags);
break;
}
- spin_unlock_irqrestore(&pchan->lock, flags);
+ spin_unlock_irqrestore(&od->lock, flags);
}
return pchan;
@@ -934,6 +932,7 @@ static int owl_dma_remove(struct platform_device *pdev)
owl_dma_free(od);
clk_disable_unprepare(od->clk);
+ dma_pool_destroy(od->lli_pool);
return 0;
}
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index afd8f27bda96..47c6e3ceac4d 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -873,6 +873,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
}
pci_set_master(pdev);
+ pd->dma.dev = &pdev->dev;
err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
if (err) {
@@ -888,7 +889,6 @@ static int pch_dma_probe(struct pci_dev *pdev,
goto err_free_irq;
}
- pd->dma.dev = &pdev->dev;
INIT_LIST_HEAD(&pd->dma.channels);
@@ -972,7 +972,6 @@ static void pch_dma_remove(struct pci_dev *pdev)
}
/* PCI Device ID of DMA device */
-#define PCI_VENDOR_ID_ROHM 0x10DB
#define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH 0x8810
#define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH 0x8815
#define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index bc8050c025b7..15b30d2d8f7e 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2769,14 +2769,14 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
while (burst != (1 << desc->rqcfg.brst_size))
desc->rqcfg.brst_size++;
+ desc->rqcfg.brst_len = get_burst_len(desc, len);
/*
* If burst size is smaller than bus width then make sure we only
* transfer one at a time to avoid a burst stradling an MFIFO entry.
*/
- if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width)
+ if (burst * 8 < pl330->pcfg.data_bus_width)
desc->rqcfg.brst_len = 1;
- desc->rqcfg.brst_len = get_burst_len(desc, len);
desc->bytes_requested = len;
desc->txd.flags = flags;
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index d64edeb6771a..f9640e37b139 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -423,6 +423,20 @@ static int __init hidma_mgmt_init(void)
hidma_mgmt_of_populate_channels(child);
}
#endif
+ /*
+ * We do not check for return value here, as it is assumed that
+ * platform_driver_register must not fail. The reason for this is that
+ * the (potential) hidma_mgmt_of_populate_channels calls above are not
+ * cleaned up if it does fail, and to do this work is quite
+ * complicated. In particular, various calls of of_address_to_resource,
+ * of_irq_to_resource, platform_device_register_full, of_dma_configure,
+ * and of_msi_configure which then call other functions and so on, must
+ * be cleaned up - this is not a trivial exercise.
+ *
+ * Currently, this module is not intended to be unloaded, and there is
+ * no module_exit function defined which does the needed cleanup. For
+ * this reason, we have to assume success here.
+ */
platform_driver_register(&hidma_mgmt_driver);
return 0;
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 4903a408fc14..ac7af440f865 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -494,8 +494,10 @@ static int stm32_dma_terminate_all(struct dma_chan *c)
spin_lock_irqsave(&chan->vchan.lock, flags);
- if (chan->busy) {
- stm32_dma_stop(chan);
+ if (chan->desc) {
+ vchan_terminate_vdesc(&chan->desc->vdesc);
+ if (chan->busy)
+ stm32_dma_stop(chan);
chan->desc = NULL;
}
@@ -551,6 +553,8 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
if (!vdesc)
return;
+ list_del(&vdesc->node);
+
chan->desc = to_stm32_dma_desc(vdesc);
chan->next_sg = 0;
}
@@ -628,7 +632,6 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan)
} else {
chan->busy = false;
if (chan->next_sg == chan->desc->num_sgs) {
- list_del(&chan->desc->vdesc.node);
vchan_cookie_complete(&chan->desc->vdesc);
chan->desc = NULL;
}
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 8c3c3e5b812a..9c6867916e89 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1137,6 +1137,8 @@ static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan)
return;
}
+ list_del(&vdesc->node);
+
chan->desc = to_stm32_mdma_desc(vdesc);
hwdesc = chan->desc->node[0].hwdesc;
chan->curr_hwdesc = 0;
@@ -1252,8 +1254,10 @@ static int stm32_mdma_terminate_all(struct dma_chan *c)
LIST_HEAD(head);
spin_lock_irqsave(&chan->vchan.lock, flags);
- if (chan->busy) {
- stm32_mdma_stop(chan);
+ if (chan->desc) {
+ vchan_terminate_vdesc(&chan->desc->vdesc);
+ if (chan->busy)
+ stm32_mdma_stop(chan);
chan->desc = NULL;
}
vchan_get_all_descriptors(&chan->vchan, &head);
@@ -1341,7 +1345,6 @@ static enum dma_status stm32_mdma_tx_status(struct dma_chan *c,
static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan)
{
- list_del(&chan->desc->vdesc.node);
vchan_cookie_complete(&chan->desc->vdesc);
chan->desc = NULL;
chan->busy = false;
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 15481aeaeecd..5ccd24a46e38 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -1225,8 +1225,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc)
dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
- if (tdc->busy)
- tegra_dma_terminate_all(dc);
+ tegra_dma_terminate_all(dc);
spin_lock_irqsave(&tdc->lock, flags);
list_splice_init(&tdc->pending_sg_req, &sg_req_list);
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index 4f4733d831a1..86b45198fb96 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -583,6 +583,7 @@ static int tegra_adma_alloc_chan_resources(struct dma_chan *dc)
ret = pm_runtime_get_sync(tdc2dev(tdc));
if (ret < 0) {
+ pm_runtime_put_noidle(tdc2dev(tdc));
free_irq(tdc->irq, tdc);
return ret;
}
@@ -764,8 +765,10 @@ static int tegra_adma_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
goto rpm_disable;
+ }
ret = tegra_adma_init(tdma);
if (ret)
@@ -793,7 +796,7 @@ static int tegra_adma_probe(struct platform_device *pdev)
ret = dma_async_device_register(&tdma->dma_dev);
if (ret < 0) {
dev_err(&pdev->dev, "ADMA registration failed: %d\n", ret);
- goto irq_dispose;
+ goto rpm_put;
}
ret = of_dma_controller_register(pdev->dev.of_node,
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index d56b6b0e22a8..0c5668e897fe 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -453,8 +453,8 @@ struct xilinx_dma_device {
#define to_dma_tx_descriptor(tx) \
container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
#define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
- readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
- cond, delay_us, timeout_us)
+ readl_poll_timeout_atomic(chan->xdev->regs + chan->ctrl_offset + reg, \
+ val, cond, delay_us, timeout_us)
/* IO accessors */
static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
@@ -2426,7 +2426,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
has_dre = false;
if (!has_dre)
- xdev->common.copy_align = fls(width - 1);
+ xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1);
if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
@@ -2529,7 +2529,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
struct device_node *node)
{
- int ret, i, nr_channels = 1;
+ int ret, i;
+ u32 nr_channels = 1;
ret = of_property_read_u32(node, "dma-channels", &nr_channels);
if ((ret < 0) && xdev->mcdma)
@@ -2713,7 +2714,11 @@ static int xilinx_dma_probe(struct platform_device *pdev)
}
/* Register the DMA engine with the core */
- dma_async_device_register(&xdev->common);
+ err = dma_async_device_register(&xdev->common);
+ if (err) {
+ dev_err(xdev->dev, "failed to register the dma device\n");
+ goto error;
+ }
err = of_dma_controller_register(node, of_dma_xilinx_xlate,
xdev);
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 73de6a6179fc..e002ff8413e2 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -127,10 +127,12 @@
/* Max transfer size per descriptor */
#define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000
+/* Max burst lengths */
+#define ZYNQMP_DMA_MAX_DST_BURST_LEN 32768U
+#define ZYNQMP_DMA_MAX_SRC_BURST_LEN 32768U
+
/* Reset values for data attributes */
#define ZYNQMP_DMA_AXCACHE_VAL 0xF
-#define ZYNQMP_DMA_ARLEN_RST_VAL 0xF
-#define ZYNQMP_DMA_AWLEN_RST_VAL 0xF
#define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F
@@ -536,17 +538,19 @@ static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status)
static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
{
- u32 val;
+ u32 val, burst_val;
val = readl(chan->regs + ZYNQMP_DMA_CTRL0);
val |= ZYNQMP_DMA_POINT_TYPE_SG;
writel(val, chan->regs + ZYNQMP_DMA_CTRL0);
val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR);
+ burst_val = __ilog2_u32(chan->src_burst_len);
val = (val & ~ZYNQMP_DMA_ARLEN) |
- (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST);
+ ((burst_val << ZYNQMP_DMA_ARLEN_OFST) & ZYNQMP_DMA_ARLEN);
+ burst_val = __ilog2_u32(chan->dst_burst_len);
val = (val & ~ZYNQMP_DMA_AWLEN) |
- (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST);
+ ((burst_val << ZYNQMP_DMA_AWLEN_OFST) & ZYNQMP_DMA_AWLEN);
writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR);
}
@@ -562,8 +566,10 @@ static int zynqmp_dma_device_config(struct dma_chan *dchan,
{
struct zynqmp_dma_chan *chan = to_chan(dchan);
- chan->src_burst_len = config->src_maxburst;
- chan->dst_burst_len = config->dst_maxburst;
+ chan->src_burst_len = clamp(config->src_maxburst, 1U,
+ ZYNQMP_DMA_MAX_SRC_BURST_LEN);
+ chan->dst_burst_len = clamp(config->dst_maxburst, 1U,
+ ZYNQMP_DMA_MAX_DST_BURST_LEN);
return 0;
}
@@ -884,8 +890,8 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
return PTR_ERR(chan->regs);
chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64;
- chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL;
- chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL;
+ chan->dst_burst_len = ZYNQMP_DMA_MAX_DST_BURST_LEN;
+ chan->src_burst_len = ZYNQMP_DMA_MAX_SRC_BURST_LEN;
err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width);
if (err < 0) {
dev_err(&pdev->dev, "missing xlnx,bus-width property\n");
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 05d6f9c86ac3..fe25c98380ad 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -18,6 +18,9 @@ static struct msr __percpu *msrs;
/* Per-node stuff */
static struct ecc_settings **ecc_stngs;
+/* Device for the PCI component */
+static struct device *pci_ctl_dev;
+
/*
* Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
* bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
@@ -261,6 +264,8 @@ static int get_scrub_rate(struct mem_ctl_info *mci)
if (pvt->model == 0x60)
amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
+ else
+ amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
break;
case 0x17:
@@ -2209,6 +2214,15 @@ static struct amd64_family_type family_types[] = {
.dbam_to_cs = f17_base_addr_to_cs_size,
}
},
+ [F17_M30H_CPUS] = {
+ .ctl_name = "F17h_M30h",
+ .f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0,
+ .f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
+ .ops = {
+ .early_channel_count = f17_early_channel_count,
+ .dbam_to_cs = f17_base_addr_to_cs_size,
+ }
+ },
};
/*
@@ -2552,6 +2566,9 @@ reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
return -ENODEV;
}
+ if (!pci_ctl_dev)
+ pci_ctl_dev = &pvt->F0->dev;
+
edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
@@ -2576,6 +2593,9 @@ reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
return -ENODEV;
}
+ if (!pci_ctl_dev)
+ pci_ctl_dev = &pvt->F2->dev;
+
edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
@@ -3212,6 +3232,10 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
fam_type = &family_types[F17_M10H_CPUS];
pvt->ops = &family_types[F17_M10H_CPUS].ops;
break;
+ } else if (pvt->model >= 0x30 && pvt->model <= 0x3f) {
+ fam_type = &family_types[F17_M30H_CPUS];
+ pvt->ops = &family_types[F17_M30H_CPUS].ops;
+ break;
}
fam_type = &family_types[F17_CPUS];
pvt->ops = &family_types[F17_CPUS].ops;
@@ -3426,21 +3450,10 @@ static void remove_one_instance(unsigned int nid)
static void setup_pci_device(void)
{
- struct mem_ctl_info *mci;
- struct amd64_pvt *pvt;
-
if (pci_ctl)
return;
- mci = edac_mc_find(0);
- if (!mci)
- return;
-
- pvt = mci->pvt_info;
- if (pvt->umc)
- pci_ctl = edac_pci_create_generic_ctl(&pvt->F0->dev, EDAC_MOD_STR);
- else
- pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
+ pci_ctl = edac_pci_create_generic_ctl(pci_ctl_dev, EDAC_MOD_STR);
if (!pci_ctl) {
pr_warn("%s(): Unable to create PCI control\n", __func__);
pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
@@ -3520,6 +3533,8 @@ static int __init amd64_edac_init(void)
return 0;
err_pci:
+ pci_ctl_dev = NULL;
+
msrs_free(msrs);
msrs = NULL;
@@ -3551,6 +3566,8 @@ static void __exit amd64_edac_exit(void)
kfree(ecc_stngs);
ecc_stngs = NULL;
+ pci_ctl_dev = NULL;
+
msrs_free(msrs);
msrs = NULL;
}
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 4242f8e39c18..de8dbb0b42b5 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -117,6 +117,8 @@
#define PCI_DEVICE_ID_AMD_17H_DF_F6 0x1466
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F0 0x15e8
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F6 0x15ee
+#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F0 0x1490
+#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496
/*
* Function 1 - Address Map
@@ -284,6 +286,7 @@ enum amd_families {
F16_M30H_CPUS,
F17_CPUS,
F17_M10H_CPUS,
+ F17_M30H_CPUS,
NUM_FAMILIES,
};
diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
index 0e7ea3591b78..5e7593753799 100644
--- a/drivers/edac/edac_device_sysfs.c
+++ b/drivers/edac/edac_device_sysfs.c
@@ -275,6 +275,7 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
/* Error exit stack */
err_kobj_reg:
+ kobject_put(&edac_dev->kobj);
module_put(edac_dev->owner);
err_out:
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index 72c9eb9fdffb..53042af7262e 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -386,7 +386,7 @@ static int edac_pci_main_kobj_setup(void)
/* Error unwind statck */
kobject_init_and_add_fail:
- kfree(edac_pci_top_main_kobj);
+ kobject_put(edac_pci_top_main_kobj);
kzalloc_fail:
module_put(THIS_MODULE);
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index b506eef6b146..858ef4e15180 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -1072,16 +1072,15 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
PCI_DEVICE_ID_INTEL_5100_19, 0);
if (!einj) {
ret = -ENODEV;
- goto bail_einj;
+ goto bail_mc_free;
}
rc = pci_enable_device(einj);
if (rc < 0) {
ret = rc;
- goto bail_disable_einj;
+ goto bail_einj;
}
-
mci->pdev = &pdev->dev;
priv = mci->pvt_info;
@@ -1147,14 +1146,14 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
bail_scrub:
priv->scrub_enable = 0;
cancel_delayed_work_sync(&(priv->i5100_scrubbing));
- edac_mc_free(mci);
-
-bail_disable_einj:
pci_disable_device(einj);
bail_einj:
pci_dev_put(einj);
+bail_mc_free:
+ edac_mc_free(mci);
+
bail_disable_ch1:
pci_disable_device(ch1mm);
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index aac9b9b360b8..9e4781a807cf 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -147,6 +147,8 @@
(n << (28 + (2 * skl) - PAGE_SHIFT))
static int nr_channels;
+static struct pci_dev *mci_pdev;
+static int ie31200_registered = 1;
struct ie31200_priv {
void __iomem *window;
@@ -518,12 +520,16 @@ fail_free:
static int ie31200_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- edac_dbg(0, "MC:\n");
+ int rc;
+ edac_dbg(0, "MC:\n");
if (pci_enable_device(pdev) < 0)
return -EIO;
+ rc = ie31200_probe1(pdev, ent->driver_data);
+ if (rc == 0 && !mci_pdev)
+ mci_pdev = pci_dev_get(pdev);
- return ie31200_probe1(pdev, ent->driver_data);
+ return rc;
}
static void ie31200_remove_one(struct pci_dev *pdev)
@@ -532,6 +538,8 @@ static void ie31200_remove_one(struct pci_dev *pdev)
struct ie31200_priv *priv;
edac_dbg(0, "\n");
+ pci_dev_put(mci_pdev);
+ mci_pdev = NULL;
mci = edac_mc_del_mc(&pdev->dev);
if (!mci)
return;
@@ -583,17 +591,53 @@ static struct pci_driver ie31200_driver = {
static int __init ie31200_init(void)
{
+ int pci_rc, i;
+
edac_dbg(3, "MC:\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
- return pci_register_driver(&ie31200_driver);
+ pci_rc = pci_register_driver(&ie31200_driver);
+ if (pci_rc < 0)
+ goto fail0;
+
+ if (!mci_pdev) {
+ ie31200_registered = 0;
+ for (i = 0; ie31200_pci_tbl[i].vendor != 0; i++) {
+ mci_pdev = pci_get_device(ie31200_pci_tbl[i].vendor,
+ ie31200_pci_tbl[i].device,
+ NULL);
+ if (mci_pdev)
+ break;
+ }
+ if (!mci_pdev) {
+ edac_dbg(0, "ie31200 pci_get_device fail\n");
+ pci_rc = -ENODEV;
+ goto fail1;
+ }
+ pci_rc = ie31200_init_one(mci_pdev, &ie31200_pci_tbl[i]);
+ if (pci_rc < 0) {
+ edac_dbg(0, "ie31200 init fail\n");
+ pci_rc = -ENODEV;
+ goto fail1;
+ }
+ }
+ return 0;
+
+fail1:
+ pci_unregister_driver(&ie31200_driver);
+fail0:
+ pci_dev_put(mci_pdev);
+
+ return pci_rc;
}
static void __exit ie31200_exit(void)
{
edac_dbg(3, "MC:\n");
pci_unregister_driver(&ie31200_driver);
+ if (!ie31200_registered)
+ ie31200_remove_one(mci_pdev);
}
module_init(ie31200_init);
diff --git a/drivers/edac/ti_edac.c b/drivers/edac/ti_edac.c
index 6ac26d1b929f..324768946743 100644
--- a/drivers/edac/ti_edac.c
+++ b/drivers/edac/ti_edac.c
@@ -278,7 +278,8 @@ static int ti_edac_probe(struct platform_device *pdev)
/* add EMIF ECC error handler */
error_irq = platform_get_irq(pdev, 0);
- if (!error_irq) {
+ if (error_irq < 0) {
+ ret = error_irq;
edac_printk(KERN_ERR, EDAC_MOD_NAME,
"EMIF irq number not defined.\n");
goto err;
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index 18026354c332..b1fb38b7f270 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -128,7 +128,7 @@ static int adc_jack_probe(struct platform_device *pdev)
for (i = 0; data->adc_conditions[i].id != EXTCON_NONE; i++);
data->num_conditions = i;
- data->chan = iio_channel_get(&pdev->dev, pdata->consumer_channel);
+ data->chan = devm_iio_channel_get(&pdev->dev, pdata->consumer_channel);
if (IS_ERR(data->chan))
return PTR_ERR(data->chan);
@@ -170,7 +170,6 @@ static int adc_jack_remove(struct platform_device *pdev)
free_irq(data->irq, data);
cancel_work_sync(&data->handler.work);
- iio_channel_release(data->chan);
return 0;
}
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index 9327479c719c..c857120c00d9 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -602,7 +602,7 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
struct arizona *arizona = info->arizona;
int id_gpio = arizona->pdata.hpdet_id_gpio;
unsigned int report = EXTCON_JACK_HEADPHONE;
- int ret, reading;
+ int ret, reading, state;
bool mic = false;
mutex_lock(&info->lock);
@@ -615,12 +615,11 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
}
/* If the cable was removed while measuring ignore the result */
- ret = extcon_get_state(info->edev, EXTCON_MECHANICAL);
- if (ret < 0) {
- dev_err(arizona->dev, "Failed to check cable state: %d\n",
- ret);
+ state = extcon_get_state(info->edev, EXTCON_MECHANICAL);
+ if (state < 0) {
+ dev_err(arizona->dev, "Failed to check cable state: %d\n", state);
goto out;
- } else if (!ret) {
+ } else if (!state) {
dev_dbg(arizona->dev, "Ignoring HPDET for removed cable\n");
goto done;
}
@@ -673,7 +672,7 @@ done:
ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
/* If we have a mic then reenable MICDET */
- if (mic || info->mic)
+ if (state && (mic || info->mic))
arizona_start_mic(info);
if (info->hpdet_active) {
@@ -681,7 +680,9 @@ done:
info->hpdet_active = false;
}
- info->hpdet_done = true;
+ /* Do not set hp_det done when the cable has been unplugged */
+ if (state)
+ info->hpdet_done = true;
out:
mutex_unlock(&info->lock);
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index 227651ff9666..c221a0aec0f3 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -1275,4 +1275,4 @@ module_platform_driver(max77693_muic_driver);
MODULE_DESCRIPTION("Maxim MAX77693 Extcon driver");
MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:extcon-max77693");
+MODULE_ALIAS("platform:max77693-muic");
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index b9d27c8fe57e..e70f21ae85ff 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -1256,6 +1256,7 @@ int extcon_dev_register(struct extcon_dev *edev)
sizeof(*edev->nh), GFP_KERNEL);
if (!edev->nh) {
ret = -ENOMEM;
+ device_unregister(&edev->dev);
goto err_dev;
}
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index a128dd1126ae..ac85e03e88e1 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -359,6 +359,7 @@ nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct client *client = file->private_data;
spinlock_t *client_list_lock = &client->lynx->client_list_lock;
struct nosy_stats stats;
+ int ret;
switch (cmd) {
case NOSY_IOC_GET_STATS:
@@ -373,11 +374,15 @@ nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return 0;
case NOSY_IOC_START:
+ ret = -EBUSY;
spin_lock_irq(client_list_lock);
- list_add_tail(&client->link, &client->lynx->client_list);
+ if (list_empty(&client->link)) {
+ list_add_tail(&client->link, &client->lynx->client_list);
+ ret = 0;
+ }
spin_unlock_irq(client_list_lock);
- return 0;
+ return ret;
case NOSY_IOC_STOP:
spin_lock_irq(client_list_lock);
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index ed212c8b4108..1c419e4cea83 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -248,6 +248,7 @@ config FW_CFG_SYSFS_CMDLINE
config QCOM_SCM
bool
depends on ARM || ARM64
+ depends on HAVE_ARM_SMCCC
select RESET_CONTROLLER
config QCOM_SCM_32
diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
index 87f737e01473..041f8152272b 100644
--- a/drivers/firmware/arm_scmi/scmi_pm_domain.c
+++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
@@ -85,7 +85,10 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
for (i = 0; i < num_domains; i++, scmi_pd++) {
u32 state;
- domains[i] = &scmi_pd->genpd;
+ if (handle->power_ops->state_get(handle, i, &state)) {
+ dev_warn(dev, "failed to get state for domain %d\n", i);
+ continue;
+ }
scmi_pd->domain = i;
scmi_pd->handle = handle;
@@ -94,13 +97,10 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
scmi_pd->genpd.power_off = scmi_pd_power_off;
scmi_pd->genpd.power_on = scmi_pd_power_on;
- if (handle->power_ops->state_get(handle, i, &state)) {
- dev_warn(dev, "failed to get state for domain %d\n", i);
- continue;
- }
-
pm_genpd_init(&scmi_pd->genpd, NULL,
state == SCMI_POWER_STATE_GENERIC_OFF);
+
+ domains[i] = &scmi_pd->genpd;
}
scmi_pd_data->domains = domains;
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index c7d06a36b23a..baa7280eccb3 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -563,8 +563,10 @@ static unsigned long scpi_clk_get_val(u16 clk_id)
ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id,
sizeof(le_clk_id), &rate, sizeof(rate));
+ if (ret)
+ return 0;
- return ret ? ret : le32_to_cpu(rate);
+ return le32_to_cpu(rate);
}
static int scpi_clk_set_val(u16 clk_id, unsigned long rate)
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
index c64c7da73829..e809f4d9a9e9 100644
--- a/drivers/firmware/arm_sdei.c
+++ b/drivers/firmware/arm_sdei.c
@@ -410,14 +410,19 @@ int sdei_event_enable(u32 event_num)
return -ENOENT;
}
- spin_lock(&sdei_list_lock);
- event->reenable = true;
- spin_unlock(&sdei_list_lock);
+ cpus_read_lock();
if (event->type == SDEI_EVENT_TYPE_SHARED)
err = sdei_api_event_enable(event->event_num);
else
err = sdei_do_cross_call(_local_event_enable, event);
+
+ if (!err) {
+ spin_lock(&sdei_list_lock);
+ event->reenable = true;
+ spin_unlock(&sdei_list_lock);
+ }
+ cpus_read_unlock();
mutex_unlock(&sdei_events_lock);
return err;
@@ -489,11 +494,6 @@ static int _sdei_event_unregister(struct sdei_event *event)
{
lockdep_assert_held(&sdei_events_lock);
- spin_lock(&sdei_list_lock);
- event->reregister = false;
- event->reenable = false;
- spin_unlock(&sdei_list_lock);
-
if (event->type == SDEI_EVENT_TYPE_SHARED)
return sdei_api_event_unregister(event->event_num);
@@ -516,6 +516,11 @@ int sdei_event_unregister(u32 event_num)
break;
}
+ spin_lock(&sdei_list_lock);
+ event->reregister = false;
+ event->reenable = false;
+ spin_unlock(&sdei_list_lock);
+
err = _sdei_event_unregister(event);
if (err)
break;
@@ -583,26 +588,15 @@ static int _sdei_event_register(struct sdei_event *event)
lockdep_assert_held(&sdei_events_lock);
- spin_lock(&sdei_list_lock);
- event->reregister = true;
- spin_unlock(&sdei_list_lock);
-
if (event->type == SDEI_EVENT_TYPE_SHARED)
return sdei_api_event_register(event->event_num,
sdei_entry_point,
event->registered,
SDEI_EVENT_REGISTER_RM_ANY, 0);
-
err = sdei_do_cross_call(_local_event_register, event);
- if (err) {
- spin_lock(&sdei_list_lock);
- event->reregister = false;
- event->reenable = false;
- spin_unlock(&sdei_list_lock);
-
+ if (err)
sdei_do_cross_call(_local_event_unregister, event);
- }
return err;
}
@@ -630,12 +624,18 @@ int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
break;
}
+ cpus_read_lock();
err = _sdei_event_register(event);
if (err) {
sdei_event_destroy(event);
pr_warn("Failed to register event %u: %d\n", event_num,
err);
+ } else {
+ spin_lock(&sdei_list_lock);
+ event->reregister = true;
+ spin_unlock(&sdei_list_lock);
}
+ cpus_read_unlock();
} while (0);
mutex_unlock(&sdei_events_lock);
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 89110dfc7127..6fbe598693aa 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -179,6 +179,17 @@ config RESET_ATTACK_MITIGATION
have been evicted, since otherwise it will trigger even on clean
reboots.
+config EFI_CUSTOM_SSDT_OVERLAYS
+ bool "Load custom ACPI SSDT overlay from an EFI variable"
+ depends on EFI_VARS && ACPI
+ default ACPI_TABLE_UPGRADE
+ help
+ Allow loading of an ACPI SSDT overlay from an EFI variable specified
+ by a kernel command line option.
+
+ See Documentation/admin-guide/acpi/ssdt-overlays.rst for more
+ information.
+
endmenu
config UEFI_CPER
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index d54fca902e64..a8180f9090fa 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -236,7 +236,7 @@ static void generic_ops_unregister(void)
efivars_unregister(&generic_efivars);
}
-#if IS_ENABLED(CONFIG_ACPI)
+#ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
#define EFIVAR_SSDT_NAME_MAX 16
static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
static int __init efivar_ssdt_setup(char *str)
@@ -359,6 +359,7 @@ static int __init efisubsys_init(void)
efi_kobj = kobject_create_and_add("efi", firmware_kobj);
if (!efi_kobj) {
pr_err("efi: Firmware registration failed.\n");
+ destroy_workqueue(efi_rts_wq);
return -ENOMEM;
}
@@ -395,6 +396,7 @@ err_unregister:
generic_ops_unregister();
err_put:
kobject_put(efi_kobj);
+ destroy_workqueue(efi_rts_wq);
return error;
}
@@ -572,7 +574,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
}
}
- if (efi_enabled(EFI_MEMMAP))
+ if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
efi_memattr_init();
efi_tpm_eventlog_init();
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index 1c65f5ac4368..6529addd1e82 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -586,8 +586,10 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
ret = kobject_init_and_add(&new_var->kobj, &efivar_ktype,
NULL, "%s", short_name);
kfree(short_name);
- if (ret)
+ if (ret) {
+ kobject_put(&new_var->kobj);
return ret;
+ }
kobject_uevent(&new_var->kobj, KOBJ_ADD);
if (efivar_entry_add(new_var, &efivar_sysfs_list)) {
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index 5d06bd247d07..2f6204b2fdd3 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -180,7 +180,7 @@ static int esre_create_sysfs_entry(void *esre, int entry_num)
rc = kobject_init_and_add(&entry->kobj, &esre1_ktype, NULL,
"entry%d", entry_num);
if (rc) {
- kfree(entry);
+ kobject_put(&entry->kobj);
return rc;
}
}
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index d9845099635e..d3777d754984 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -28,6 +28,7 @@ KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \
-D__NO_FORTIFY \
$(call cc-option,-ffreestanding) \
$(call cc-option,-fno-stack-protector) \
+ $(call cc-option,-fno-addrsig) \
-D__DISABLE_EXPORTS
GCOV_PROFILE := n
diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
index cbd53cb1b2d4..9f1a913933d5 100644
--- a/drivers/firmware/psci_checker.c
+++ b/drivers/firmware/psci_checker.c
@@ -164,8 +164,10 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups)
cpu_groups = kcalloc(nb_available_cpus, sizeof(cpu_groups),
GFP_KERNEL);
- if (!cpu_groups)
+ if (!cpu_groups) {
+ free_cpumask_var(tmp);
return -ENOMEM;
+ }
cpumask_copy(tmp, cpu_online_mask);
@@ -174,6 +176,7 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups)
topology_core_cpumask(cpumask_any(tmp));
if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) {
+ free_cpumask_var(tmp);
free_cpu_groups(num_groups, &cpu_groups);
return -ENOMEM;
}
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 98c987188835..513908a0c262 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -18,7 +18,6 @@
#include <linux/init.h>
#include <linux/cpumask.h>
#include <linux/export.h>
-#include <linux/dma-direct.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -449,8 +448,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
struct qcom_scm_mem_map_info *mem_to_map;
phys_addr_t mem_to_map_phys;
phys_addr_t dest_phys;
- phys_addr_t ptr_phys;
- dma_addr_t ptr_dma;
+ dma_addr_t ptr_phys;
size_t mem_to_map_sz;
size_t dest_sz;
size_t src_sz;
@@ -468,10 +466,9 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
ALIGN(dest_sz, SZ_64);
- ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_dma, GFP_KERNEL);
+ ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
if (!ptr)
return -ENOMEM;
- ptr_phys = dma_to_phys(__scm->dev, ptr_dma);
/* Fill source vmid detail */
src = ptr;
@@ -501,7 +498,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
ptr_phys, src_sz, dest_phys, dest_sz);
- dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_dma);
+ dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys);
if (ret) {
dev_err(__scm->dev,
"Assign memory protection call failed %d.\n", ret);
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index 039e0f91dba8..6945c3c96637 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -605,8 +605,10 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
/* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */
err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype,
fw_cfg_sel_ko, "%d", entry->select);
- if (err)
- goto err_register;
+ if (err) {
+ kobject_put(&entry->kobj);
+ return err;
+ }
/* add raw binary content access */
err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw);
@@ -622,7 +624,6 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
err_add_raw:
kobject_del(&entry->kobj);
-err_register:
kfree(entry);
return err;
}
diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
index c9a613dc9eb7..e056965ef97b 100644
--- a/drivers/fpga/dfl-afu-dma-region.c
+++ b/drivers/fpga/dfl-afu-dma-region.c
@@ -106,10 +106,10 @@ static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata,
region->pages);
if (pinned < 0) {
ret = pinned;
- goto put_pages;
+ goto free_pages;
} else if (pinned != npages) {
ret = -EFAULT;
- goto free_pages;
+ goto put_pages;
}
dev_dbg(dev, "%d pages pinned\n", pinned);
diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c
index 02baa6a227c0..fc048f9a99b1 100644
--- a/drivers/fpga/dfl-afu-main.c
+++ b/drivers/fpga/dfl-afu-main.c
@@ -79,7 +79,8 @@ static int port_disable(struct platform_device *pdev)
* on this port and minimum soft reset pulse width has elapsed.
* Driver polls port_soft_reset_ack to determine if reset done by HW.
*/
- if (readq_poll_timeout(base + PORT_HDR_CTRL, v, v & PORT_CTRL_SFTRST,
+ if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
+ v & PORT_CTRL_SFTRST_ACK,
RST_POLL_INVL, RST_POLL_TIMEOUT)) {
dev_err(&pdev->dev, "timeout, fail to reset device\n");
return -ETIMEDOUT;
diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c
index 4596fde16dfe..3e88e79d1053 100644
--- a/drivers/gnss/sirf.c
+++ b/drivers/gnss/sirf.c
@@ -292,14 +292,18 @@ static int sirf_probe(struct serdev_device *serdev)
data->on_off = devm_gpiod_get_optional(dev, "sirf,onoff",
GPIOD_OUT_LOW);
- if (IS_ERR(data->on_off))
+ if (IS_ERR(data->on_off)) {
+ ret = PTR_ERR(data->on_off);
goto err_put_device;
+ }
if (data->on_off) {
data->wakeup = devm_gpiod_get_optional(dev, "sirf,wakeup",
GPIOD_IN);
- if (IS_ERR(data->wakeup))
+ if (IS_ERR(data->wakeup)) {
+ ret = PTR_ERR(data->wakeup);
goto err_put_device;
+ }
/*
* Configurations where WAKEUP has been left not connected,
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index ba51ea15f379..c6cf270035b5 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -69,6 +69,7 @@ static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset)
ret = pm_runtime_get_sync(chip->parent);
if (ret < 0) {
dev_err(chip->parent, "Failed to resume: %d\n", ret);
+ pm_runtime_put_autosuspend(chip->parent);
return ret;
}
@@ -77,12 +78,15 @@ static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset)
if (ret < 0) {
dev_err(chip->parent, "Failed to drop cache: %d\n",
ret);
+ pm_runtime_put_autosuspend(chip->parent);
return ret;
}
ret = regmap_read(arizona->regmap, reg, &val);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(chip->parent);
return ret;
+ }
pm_runtime_mark_last_busy(chip->parent);
pm_runtime_put_autosuspend(chip->parent);
@@ -111,6 +115,7 @@ static int arizona_gpio_direction_out(struct gpio_chip *chip,
ret = pm_runtime_get_sync(chip->parent);
if (ret < 0) {
dev_err(chip->parent, "Failed to resume: %d\n", ret);
+ pm_runtime_put(chip->parent);
return ret;
}
}
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 044888fd96a1..2a56efced798 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -51,7 +51,9 @@
#define GPIO_EXT_PORTC 0x58
#define GPIO_EXT_PORTD 0x5c
+#define DWAPB_DRIVER_NAME "gpio-dwapb"
#define DWAPB_MAX_PORTS 4
+
#define GPIO_EXT_PORT_STRIDE 0x04 /* register stride 32 bits */
#define GPIO_SWPORT_DR_STRIDE 0x0c /* register stride 3*32 bits */
#define GPIO_SWPORT_DDR_STRIDE 0x0c /* register stride 3*32 bits */
@@ -400,7 +402,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
return;
err = irq_alloc_domain_generic_chips(gpio->domain, ngpio, 2,
- "gpio-dwapb", handle_level_irq,
+ DWAPB_DRIVER_NAME, handle_level_irq,
IRQ_NOREQUEST, 0,
IRQ_GC_INIT_NESTED_LOCK);
if (err) {
@@ -457,7 +459,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
*/
err = devm_request_irq(gpio->dev, pp->irq[0],
dwapb_irq_handler_mfd,
- IRQF_SHARED, "gpio-dwapb-mfd", gpio);
+ IRQF_SHARED, DWAPB_DRIVER_NAME, gpio);
if (err) {
dev_err(gpio->dev, "error requesting IRQ\n");
irq_domain_remove(gpio->domain);
@@ -535,26 +537,33 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio,
dwapb_configure_irqs(gpio, port, pp);
err = gpiochip_add_data(&port->gc, port);
- if (err)
+ if (err) {
dev_err(gpio->dev, "failed to register gpiochip for port%d\n",
port->idx);
- else
- port->is_registered = true;
+ return err;
+ }
/* Add GPIO-signaled ACPI event support */
- if (pp->has_irq)
- acpi_gpiochip_request_interrupts(&port->gc);
+ acpi_gpiochip_request_interrupts(&port->gc);
- return err;
+ port->is_registered = true;
+
+ return 0;
}
static void dwapb_gpio_unregister(struct dwapb_gpio *gpio)
{
unsigned int m;
- for (m = 0; m < gpio->nr_ports; ++m)
- if (gpio->ports[m].is_registered)
- gpiochip_remove(&gpio->ports[m].gc);
+ for (m = 0; m < gpio->nr_ports; ++m) {
+ struct dwapb_gpio_port *port = &gpio->ports[m];
+
+ if (!port->is_registered)
+ continue;
+
+ acpi_gpiochip_free_interrupts(&port->gc);
+ gpiochip_remove(&port->gc);
+ }
}
static struct dwapb_platform_data *
@@ -842,7 +851,7 @@ static SIMPLE_DEV_PM_OPS(dwapb_gpio_pm_ops, dwapb_gpio_suspend,
static struct platform_driver dwapb_gpio_driver = {
.driver = {
- .name = "gpio-dwapb",
+ .name = DWAPB_DRIVER_NAME,
.pm = &dwapb_gpio_pm_ops,
.of_match_table = of_match_ptr(dwapb_of_match),
.acpi_match_table = ACPI_PTR(dwapb_acpi_match),
@@ -856,3 +865,4 @@ module_platform_driver(dwapb_gpio_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jamie Iles");
MODULE_DESCRIPTION("Synopsys DesignWare APB GPIO driver");
+MODULE_ALIAS("platform:" DWAPB_DRIVER_NAME);
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index 4935cda5301e..4f1af323ec03 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -599,7 +599,7 @@ static int sprd_eic_probe(struct platform_device *pdev)
*/
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
if (!res)
- continue;
+ break;
sprd_eic->base[i] = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(sprd_eic->base[i]))
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index a09d2f9ebacc..695c19901eff 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -148,8 +148,10 @@ static int gpio_exar_probe(struct platform_device *pdev)
mutex_init(&exar_gpio->lock);
index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
- if (index < 0)
- goto err_destroy;
+ if (index < 0) {
+ ret = index;
+ goto err_mutex_destroy;
+ }
sprintf(exar_gpio->name, "exar_gpio%d", index);
exar_gpio->gpio_chip.label = exar_gpio->name;
@@ -176,6 +178,7 @@ static int gpio_exar_probe(struct platform_device *pdev)
err_destroy:
ida_simple_remove(&ida_index, index);
+err_mutex_destroy:
mutex_destroy(&exar_gpio->lock);
return ret;
}
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index 51c7d1b84c2e..0c076dce9e17 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -31,8 +31,6 @@
#define IOH_IRQ_BASE 0
-#define PCI_VENDOR_ID_ROHM 0x10DB
-
struct ioh_reg_comn {
u32 ien;
u32 istatus;
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 945bd13e5e79..cab324eb7df2 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -367,6 +367,7 @@ static int __init gpio_mockup_init(void)
err = platform_driver_register(&gpio_mockup_driver);
if (err) {
gpio_mockup_err("error registering platform driver\n");
+ debugfs_remove_recursive(gpio_mockup_dbg_dir);
return err;
}
@@ -386,6 +387,7 @@ static int __init gpio_mockup_init(void)
gpio_mockup_err("error registering device");
platform_driver_unregister(&gpio_mockup_driver);
gpio_mockup_unregister_pdevs();
+ debugfs_remove_recursive(gpio_mockup_dbg_dir);
return PTR_ERR(pdev);
}
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index adc768f908f1..874caed72390 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -650,9 +650,8 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip,
spin_lock_irqsave(&mvpwm->lock, flags);
- val = (unsigned long long)
- readl_relaxed(mvebu_pwmreg_blink_on_duration(mvpwm));
- val *= NSEC_PER_SEC;
+ u = readl_relaxed(mvebu_pwmreg_blink_on_duration(mvpwm));
+ val = (unsigned long long) u * NSEC_PER_SEC;
do_div(val, mvpwm->clk_rate);
if (val > UINT_MAX)
state->duty_cycle = UINT_MAX;
@@ -661,21 +660,17 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip,
else
state->duty_cycle = 1;
- val = (unsigned long long)
- readl_relaxed(mvebu_pwmreg_blink_off_duration(mvpwm));
+ val = (unsigned long long) u; /* on duration */
+ /* period = on + off duration */
+ val += readl_relaxed(mvebu_pwmreg_blink_off_duration(mvpwm));
val *= NSEC_PER_SEC;
do_div(val, mvpwm->clk_rate);
- if (val < state->duty_cycle) {
+ if (val > UINT_MAX)
+ state->period = UINT_MAX;
+ else if (val)
+ state->period = val;
+ else
state->period = 1;
- } else {
- val -= state->duty_cycle;
- if (val > UINT_MAX)
- state->period = UINT_MAX;
- else if (val)
- state->period = val;
- else
- state->period = 1;
- }
regmap_read(mvchip->regs, GPIO_BLINK_EN_OFF + mvchip->offset, &u);
if (u)
@@ -1191,6 +1186,13 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
devm_gpiochip_add_data(&pdev->dev, &mvchip->chip, mvchip);
+ /* Some MVEBU SoCs have simple PWM support for GPIO lines */
+ if (IS_ENABLED(CONFIG_PWM)) {
+ err = mvebu_pwm_probe(pdev, mvchip, id);
+ if (err)
+ return err;
+ }
+
/* Some gpio controllers do not provide irq support */
if (!have_irqs)
return 0;
@@ -1200,7 +1202,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
if (!mvchip->domain) {
dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
mvchip->chip.label);
- return -ENODEV;
+ err = -ENODEV;
+ goto err_pwm;
}
err = irq_alloc_domain_generic_chips(
@@ -1248,14 +1251,12 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip);
}
- /* Some MVEBU SoCs have simple PWM support for GPIO lines */
- if (IS_ENABLED(CONFIG_PWM))
- return mvebu_pwm_probe(pdev, mvchip, id);
-
return 0;
err_domain:
irq_domain_remove(mvchip->domain);
+err_pwm:
+ pwmchip_remove(&mvchip->mvpwm->chip);
return err;
}
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 68a35b65925a..d3fcc0d15eb8 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -357,7 +357,7 @@ static int pcf857x_probe(struct i2c_client *client,
* reset state. Otherwise it flags pins to be driven low.
*/
gpio->out = ~n_latch;
- gpio->status = gpio->out;
+ gpio->status = gpio->read(gpio->client);
status = devm_gpiochip_add_data(&client->dev, &gpio->chip, gpio);
if (status < 0)
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index ffce0ab912ed..8c7f3d20e30e 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -524,7 +524,6 @@ static int pch_gpio_resume(struct pci_dev *pdev)
#define pch_gpio_resume NULL
#endif
-#define PCI_VENDOR_ID_ROHM 0x10DB
static const struct pci_device_id pch_gpio_pcidev_id[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) },
{ PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8014) },
diff --git a/drivers/gpio/gpio-pcie-idio-24.c b/drivers/gpio/gpio-pcie-idio-24.c
index f953541e7890..634125747a03 100644
--- a/drivers/gpio/gpio-pcie-idio-24.c
+++ b/drivers/gpio/gpio-pcie-idio-24.c
@@ -28,6 +28,47 @@
#include <linux/spinlock.h>
#include <linux/types.h>
+/*
+ * PLX PEX8311 PCI LCS_INTCSR Interrupt Control/Status
+ *
+ * Bit: Description
+ * 0: Enable Interrupt Sources (Bit 0)
+ * 1: Enable Interrupt Sources (Bit 1)
+ * 2: Generate Internal PCI Bus Internal SERR# Interrupt
+ * 3: Mailbox Interrupt Enable
+ * 4: Power Management Interrupt Enable
+ * 5: Power Management Interrupt
+ * 6: Slave Read Local Data Parity Check Error Enable
+ * 7: Slave Read Local Data Parity Check Error Status
+ * 8: Internal PCI Wire Interrupt Enable
+ * 9: PCI Express Doorbell Interrupt Enable
+ * 10: PCI Abort Interrupt Enable
+ * 11: Local Interrupt Input Enable
+ * 12: Retry Abort Enable
+ * 13: PCI Express Doorbell Interrupt Active
+ * 14: PCI Abort Interrupt Active
+ * 15: Local Interrupt Input Active
+ * 16: Local Interrupt Output Enable
+ * 17: Local Doorbell Interrupt Enable
+ * 18: DMA Channel 0 Interrupt Enable
+ * 19: DMA Channel 1 Interrupt Enable
+ * 20: Local Doorbell Interrupt Active
+ * 21: DMA Channel 0 Interrupt Active
+ * 22: DMA Channel 1 Interrupt Active
+ * 23: Built-In Self-Test (BIST) Interrupt Active
+ * 24: Direct Master was the Bus Master during a Master or Target Abort
+ * 25: DMA Channel 0 was the Bus Master during a Master or Target Abort
+ * 26: DMA Channel 1 was the Bus Master during a Master or Target Abort
+ * 27: Target Abort after internal 256 consecutive Master Retrys
+ * 28: PCI Bus wrote data to LCS_MBOX0
+ * 29: PCI Bus wrote data to LCS_MBOX1
+ * 30: PCI Bus wrote data to LCS_MBOX2
+ * 31: PCI Bus wrote data to LCS_MBOX3
+ */
+#define PLX_PEX8311_PCI_LCS_INTCSR 0x68
+#define INTCSR_INTERNAL_PCI_WIRE BIT(8)
+#define INTCSR_LOCAL_INPUT BIT(11)
+
/**
* struct idio_24_gpio_reg - GPIO device registers structure
* @out0_7: Read: FET Outputs 0-7
@@ -92,6 +133,7 @@ struct idio_24_gpio_reg {
struct idio_24_gpio {
struct gpio_chip chip;
raw_spinlock_t lock;
+ __u8 __iomem *plx;
struct idio_24_gpio_reg __iomem *reg;
unsigned long irq_mask;
};
@@ -360,13 +402,13 @@ static void idio_24_irq_mask(struct irq_data *data)
unsigned long flags;
const unsigned long bit_offset = irqd_to_hwirq(data) - 24;
unsigned char new_irq_mask;
- const unsigned long bank_offset = bit_offset/8 * 8;
+ const unsigned long bank_offset = bit_offset / 8;
unsigned char cos_enable_state;
raw_spin_lock_irqsave(&idio24gpio->lock, flags);
- idio24gpio->irq_mask &= BIT(bit_offset);
- new_irq_mask = idio24gpio->irq_mask >> bank_offset;
+ idio24gpio->irq_mask &= ~BIT(bit_offset);
+ new_irq_mask = idio24gpio->irq_mask >> bank_offset * 8;
if (!new_irq_mask) {
cos_enable_state = ioread8(&idio24gpio->reg->cos_enable);
@@ -389,12 +431,12 @@ static void idio_24_irq_unmask(struct irq_data *data)
unsigned long flags;
unsigned char prev_irq_mask;
const unsigned long bit_offset = irqd_to_hwirq(data) - 24;
- const unsigned long bank_offset = bit_offset/8 * 8;
+ const unsigned long bank_offset = bit_offset / 8;
unsigned char cos_enable_state;
raw_spin_lock_irqsave(&idio24gpio->lock, flags);
- prev_irq_mask = idio24gpio->irq_mask >> bank_offset;
+ prev_irq_mask = idio24gpio->irq_mask >> bank_offset * 8;
idio24gpio->irq_mask |= BIT(bit_offset);
if (!prev_irq_mask) {
@@ -481,6 +523,7 @@ static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct device *const dev = &pdev->dev;
struct idio_24_gpio *idio24gpio;
int err;
+ const size_t pci_plx_bar_index = 1;
const size_t pci_bar_index = 2;
const char *const name = pci_name(pdev);
@@ -494,12 +537,13 @@ static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- err = pcim_iomap_regions(pdev, BIT(pci_bar_index), name);
+ err = pcim_iomap_regions(pdev, BIT(pci_plx_bar_index) | BIT(pci_bar_index), name);
if (err) {
dev_err(dev, "Unable to map PCI I/O addresses (%d)\n", err);
return err;
}
+ idio24gpio->plx = pcim_iomap_table(pdev)[pci_plx_bar_index];
idio24gpio->reg = pcim_iomap_table(pdev)[pci_bar_index];
idio24gpio->chip.label = name;
@@ -520,6 +564,12 @@ static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Software board reset */
iowrite8(0, &idio24gpio->reg->soft_reset);
+ /*
+ * enable PLX PEX8311 internal PCI wire interrupt and local interrupt
+ * input
+ */
+ iowrite8((INTCSR_INTERNAL_PCI_WIRE | INTCSR_LOCAL_INPUT) >> 8,
+ idio24gpio->plx + PLX_PEX8311_PCI_LCS_INTCSR + 1);
err = devm_gpiochip_add_data(dev, &idio24gpio->chip, idio24gpio);
if (err) {
diff --git a/drivers/gpio/gpio-sprd.c b/drivers/gpio/gpio-sprd.c
index 55072d2b367f..4d53347adcaf 100644
--- a/drivers/gpio/gpio-sprd.c
+++ b/drivers/gpio/gpio-sprd.c
@@ -149,17 +149,20 @@ static int sprd_gpio_irq_set_type(struct irq_data *data,
sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0);
sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 0);
sprd_gpio_update(chip, offset, SPRD_GPIO_IEV, 1);
+ sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1);
irq_set_handler_locked(data, handle_edge_irq);
break;
case IRQ_TYPE_EDGE_FALLING:
sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0);
sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 0);
sprd_gpio_update(chip, offset, SPRD_GPIO_IEV, 0);
+ sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1);
irq_set_handler_locked(data, handle_edge_irq);
break;
case IRQ_TYPE_EDGE_BOTH:
sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0);
sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 1);
+ sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1);
irq_set_handler_locked(data, handle_edge_irq);
break;
case IRQ_TYPE_LEVEL_HIGH:
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 91a8ef8e7f3f..1436098b1614 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -209,7 +209,7 @@ static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d)
continue;
tc3589x_gpio->oldregs[i][j] = new;
- tc3589x_reg_write(tc3589x, regmap[i] + j * 8, new);
+ tc3589x_reg_write(tc3589x, regmap[i] + j, new);
}
}
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 47dbd19751d0..57903501821e 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -357,6 +357,7 @@ static void tegra_gpio_irq_shutdown(struct irq_data *d)
struct tegra_gpio_info *tgi = bank->tgi;
unsigned int gpio = d->hwirq;
+ tegra_gpio_irq_mask(d);
gpiochip_unlock_as_irq(&tgi->gc, gpio);
}
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 18f5973b9697..b018909a4e46 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -177,7 +177,7 @@ static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
int ret, value;
ret = request_threaded_irq(event->irq, NULL, event->handler,
- event->irqflags, "ACPI:Event", event);
+ event->irqflags | IRQF_ONESHOT, "ACPI:Event", event);
if (ret) {
dev_err(acpi_gpio->chip->parent,
"Failed to setup interrupt handler for %d\n",
@@ -1357,6 +1357,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] = {
},
{
/*
+ * The Dell Venue 10 Pro 5055, with Bay Trail SoC + TI PMIC uses an
+ * external embedded-controller connected via I2C + an ACPI GPIO
+ * event handler on INT33FFC:02 pin 12, causing spurious wakeups.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Venue 10 Pro 5055"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "INT33FC:02@12",
+ },
+ },
+ {
+ /*
* HP X2 10 models with Cherry Trail SoC + TI PMIC use an
* external embedded-controller connected via I2C + an ACPI GPIO
* event handler on INT33FF:01 pin 0, causing spurious wakeups.
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 3dbaf489a8a5..e0ccc79b239a 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -462,6 +462,8 @@ static ssize_t export_store(struct class *class,
long gpio;
struct gpio_desc *desc;
int status;
+ struct gpio_chip *gc;
+ int offset;
status = kstrtol(buf, 0, &gpio);
if (status < 0)
@@ -473,6 +475,12 @@ static ssize_t export_store(struct class *class,
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
return -EINVAL;
}
+ gc = desc->gdev->chip;
+ offset = gpio_chip_hwgpio(desc);
+ if (!gpiochip_line_is_valid(gc, offset)) {
+ pr_warn("%s: GPIO %ld masked\n", __func__, gpio);
+ return -EINVAL;
+ }
/* No extra locking here; FLAG_SYSFS just signifies that the
* request and export were done by on behalf of userspace, so
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index e44e567bd789..a050a9aa9a5e 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -192,6 +192,7 @@ source "drivers/gpu/drm/arm/Kconfig"
config DRM_RADEON
tristate "ATI Radeon"
depends on DRM && PCI && MMU
+ depends on AGP || !AGP
select FW_LOADER
select DRM_KMS_HELPER
select DRM_TTM
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 71efcf38f11b..94cd8a261091 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -276,7 +276,7 @@ static int acp_hw_init(void *handle)
u32 val = 0;
u32 count = 0;
struct device *dev;
- struct i2s_platform_data *i2s_pdata;
+ struct i2s_platform_data *i2s_pdata = NULL;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -317,20 +317,21 @@ static int acp_hw_init(void *handle)
adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
GFP_KERNEL);
- if (adev->acp.acp_cell == NULL)
- return -ENOMEM;
+ if (adev->acp.acp_cell == NULL) {
+ r = -ENOMEM;
+ goto failure;
+ }
adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
if (adev->acp.acp_res == NULL) {
- kfree(adev->acp.acp_cell);
- return -ENOMEM;
+ r = -ENOMEM;
+ goto failure;
}
i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
if (i2s_pdata == NULL) {
- kfree(adev->acp.acp_res);
- kfree(adev->acp.acp_cell);
- return -ENOMEM;
+ r = -ENOMEM;
+ goto failure;
}
switch (adev->asic_type) {
@@ -427,7 +428,7 @@ static int acp_hw_init(void *handle)
r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
ACP_DEVS);
if (r)
- return r;
+ goto failure;
if (adev->asic_type != CHIP_STONEY) {
for (i = 0; i < ACP_DEVS ; i++) {
@@ -435,7 +436,7 @@ static int acp_hw_init(void *handle)
r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
if (r) {
dev_err(dev, "Failed to add dev to genpd\n");
- return r;
+ goto failure;
}
}
}
@@ -454,7 +455,8 @@ static int acp_hw_init(void *handle)
break;
if (--count == 0) {
dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
- return -ETIMEDOUT;
+ r = -ETIMEDOUT;
+ goto failure;
}
udelay(100);
}
@@ -471,7 +473,8 @@ static int acp_hw_init(void *handle)
break;
if (--count == 0) {
dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
- return -ETIMEDOUT;
+ r = -ETIMEDOUT;
+ goto failure;
}
udelay(100);
}
@@ -480,6 +483,13 @@ static int acp_hw_init(void *handle)
val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
return 0;
+
+failure:
+ kfree(i2s_pdata);
+ kfree(adev->acp.acp_res);
+ kfree(adev->acp.acp_cell);
+ kfree(adev->acp.acp_genpd);
+ return r;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index a5df80d50d44..6cf3dd5edffd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -191,30 +191,35 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
{
- uint8_t __iomem *bios;
- size_t size;
+ phys_addr_t rom = adev->pdev->rom;
+ size_t romlen = adev->pdev->romlen;
+ void __iomem *bios;
adev->bios = NULL;
- bios = pci_platform_rom(adev->pdev, &size);
- if (!bios) {
+ if (!rom || romlen == 0)
return false;
- }
- adev->bios = kzalloc(size, GFP_KERNEL);
- if (adev->bios == NULL)
+ adev->bios = kzalloc(romlen, GFP_KERNEL);
+ if (!adev->bios)
return false;
- memcpy_fromio(adev->bios, bios, size);
+ bios = ioremap(rom, romlen);
+ if (!bios)
+ goto free_bios;
- if (!check_atom_bios(adev->bios, size)) {
- kfree(adev->bios);
- return false;
- }
+ memcpy_fromio(adev->bios, bios, romlen);
+ iounmap(bios);
- adev->bios_size = size;
+ if (!check_atom_bios(adev->bios, romlen))
+ goto free_bios;
+
+ adev->bios_size = romlen;
return true;
+free_bios:
+ kfree(adev->bios);
+ return false;
}
#ifdef CONFIG_ACPI
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index c770d73352a7..c15286858f0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -718,8 +718,10 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
if (!drm_kms_helper_is_poll_worker()) {
r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
+ if (r < 0) {
+ pm_runtime_put_autosuspend(connector->dev->dev);
return connector_status_disconnected;
+ }
}
if (encoder) {
@@ -856,8 +858,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
if (!drm_kms_helper_is_poll_worker()) {
r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
+ if (r < 0) {
+ pm_runtime_put_autosuspend(connector->dev->dev);
return connector_status_disconnected;
+ }
}
encoder = amdgpu_connector_best_single_encoder(connector);
@@ -979,8 +983,10 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
if (!drm_kms_helper_is_poll_worker()) {
r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
+ if (r < 0) {
+ pm_runtime_put_autosuspend(connector->dev->dev);
return connector_status_disconnected;
+ }
}
if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
@@ -1329,8 +1335,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
if (!drm_kms_helper_is_poll_worker()) {
r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
+ if (r < 0) {
+ pm_runtime_put_autosuspend(connector->dev->dev);
return connector_status_disconnected;
+ }
}
if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index a90e83e5ab57..ee4a0b7cb452 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -239,7 +239,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
while (size) {
uint32_t value;
- value = RREG32_PCIE(*pos >> 2);
+ value = RREG32_PCIE(*pos);
r = put_user(value, (uint32_t *)buf);
if (r)
return r;
@@ -282,7 +282,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
if (r)
return r;
- WREG32_PCIE(*pos >> 2, value);
+ WREG32_PCIE(*pos, value);
result += 4;
buf += 4;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 686a26de50f9..5f85c9586cba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -275,7 +275,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
ret = pm_runtime_get_sync(dev->dev);
if (ret < 0)
- return ret;
+ goto out;
ret = drm_crtc_helper_set_config(set, ctx);
@@ -299,6 +299,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
adev->have_disp_power_ref = false;
}
+out:
/* drop the power reference we got coming in here */
pm_runtime_put_autosuspend(dev->dev);
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 5e29f14f4b30..63b1e325b45c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1085,11 +1085,12 @@ long amdgpu_drm_ioctl(struct file *filp,
dev = file_priv->minor->dev;
ret = pm_runtime_get_sync(dev->dev);
if (ret < 0)
- return ret;
+ goto out;
ret = drm_ioctl(filp, cmd, arg);
pm_runtime_mark_last_busy(dev->dev);
+out:
pm_runtime_put_autosuspend(dev->dev);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 69c5d22f29bd..d55ff59584c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -297,10 +297,13 @@ out:
static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev)
{
struct amdgpu_framebuffer *rfb = &rfbdev->rfb;
+ int i;
drm_fb_helper_unregister_fbi(&rfbdev->helper);
if (rfb->base.obj[0]) {
+ for (i = 0; i < rfb->base.format->num_planes; i++)
+ drm_gem_object_put(rfb->base.obj[0]);
amdgpufb_destroy_pinned_object(rfb->base.obj[0]);
rfb->base.obj[0] = NULL;
drm_framebuffer_unregister_private(&rfb->base);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 869ff624b108..e5e51e4d4f3d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -396,7 +396,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
}
amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
- amdgpu_irq_get(adev, irq_src, irq_type);
+
+ if (irq_src)
+ amdgpu_irq_get(adev, irq_src, irq_type);
ring->fence_drv.irq_src = irq_src;
ring->fence_drv.irq_type = irq_type;
@@ -508,8 +510,9 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
/* no need to trigger GPU reset as we are unloading */
amdgpu_fence_driver_force_completion(ring);
}
- amdgpu_irq_put(adev, ring->fence_drv.irq_src,
- ring->fence_drv.irq_type);
+ if (ring->fence_drv.irq_src)
+ amdgpu_irq_put(adev, ring->fence_drv.irq_src,
+ ring->fence_drv.irq_type);
drm_sched_fini(&ring->sched);
del_timer_sync(&ring->fence_drv.fallback_timer);
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
@@ -545,8 +548,9 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
}
/* disable the interrupt */
- amdgpu_irq_put(adev, ring->fence_drv.irq_src,
- ring->fence_drv.irq_type);
+ if (ring->fence_drv.irq_src)
+ amdgpu_irq_put(adev, ring->fence_drv.irq_src,
+ ring->fence_drv.irq_type);
}
}
@@ -572,8 +576,9 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
continue;
/* enable the interrupt */
- amdgpu_irq_get(adev, ring->fence_drv.irq_src,
- ring->fence_drv.irq_type);
+ if (ring->fence_drv.irq_src)
+ amdgpu_irq_get(adev, ring->fence_drv.irq_src,
+ ring->fence_drv.irq_type);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 71792d820ae0..0db05ff4a652 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -563,6 +563,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct ww_acquire_ctx ticket;
struct list_head list, duplicates;
uint64_t va_flags;
+ uint64_t vm_size;
int r = 0;
if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
@@ -583,6 +584,15 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->va_address &= AMDGPU_VA_HOLE_MASK;
+ vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
+ vm_size -= AMDGPU_VA_RESERVED_SIZE;
+ if (args->va_address + args->map_size > vm_size) {
+ dev_dbg(&dev->pdev->dev,
+ "va_address 0x%llx is in top reserved area 0x%llx\n",
+ args->va_address + args->map_size, vm_size);
+ return -EINVAL;
+ }
+
if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
args->flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 1abf5b5bac9e..18402a6ba8fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -447,7 +447,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
- if (!src)
+ if (!src || !src->funcs || !src->funcs->set)
continue;
for (k = 0; k < src->num_types; k++)
amdgpu_irq_update(adev, src, k);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index ba10577569f8..dd9b8feb3a66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -524,8 +524,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
* in the bitfields */
if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
se_num = 0xffffffff;
+ else if (se_num >= AMDGPU_GFX_MAX_SE)
+ return -EINVAL;
if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
sh_num = 0xffffffff;
+ else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE)
+ return -EINVAL;
if (info->read_mmr_reg.count > 128)
return -EINVAL;
@@ -549,9 +553,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return n ? -EFAULT : 0;
}
case AMDGPU_INFO_DEV_INFO: {
- struct drm_amdgpu_info_device dev_info = {};
+ struct drm_amdgpu_info_device dev_info;
uint64_t vm_size;
+ memset(&dev_info, 0, sizeof(dev_info));
dev_info.device_id = dev->pdev->device;
dev_info.chip_rev = adev->rev_id;
dev_info.external_rev = adev->external_rev_id;
@@ -834,7 +839,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
r = pm_runtime_get_sync(dev->dev);
if (r < 0)
- return r;
+ goto pm_put;
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
if (unlikely(!fpriv)) {
@@ -882,6 +887,7 @@ error_pasid:
out_suspend:
pm_runtime_mark_last_busy(dev->dev);
+pm_put:
pm_runtime_put_autosuspend(dev->dev);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index c3df75a9f65d..cbb969a870f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -71,7 +71,8 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
adev->pm.ac_power = true;
else
adev->pm.ac_power = false;
- if (adev->powerplay.pp_funcs->enable_bapm)
+ if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->enable_bapm)
amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
mutex_unlock(&adev->pm.mutex);
}
@@ -616,7 +617,7 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
{
int ret;
- long level;
+ unsigned long level;
char *sub_str = NULL;
char *tmp;
char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
@@ -632,8 +633,8 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
while (tmp[0]) {
sub_str = strsep(&tmp, delimiter);
if (strlen(sub_str)) {
- ret = kstrtol(sub_str, 0, &level);
- if (ret)
+ ret = kstrtoul(sub_str, 0, &level);
+ if (ret || level > 31)
return -EINVAL;
*mask |= 1 << level;
} else
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 7206a0025b17..db9907fb99f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -21,7 +21,7 @@
*
*/
-#if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#if !defined(_AMDGPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _AMDGPU_TRACE_H_
#include <linux/stringify.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index fcf421263fd9..50807d621eca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -954,6 +954,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
release_sg:
kfree(ttm->sg);
+ ttm->sg = NULL;
return r;
}
@@ -970,7 +971,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
/* double check that we don't free the table twice */
- if (!ttm->sg->sgl)
+ if (!ttm->sg || !ttm->sg->sgl)
return;
/* unmap the pages mapped to the device */
@@ -1276,6 +1277,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
if (gtt && gtt->userptr) {
amdgpu_ttm_tt_set_user_pages(ttm, NULL);
kfree(ttm->sg);
+ ttm->sg = NULL;
ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
return;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e5a6db6beab7..8c5f39beee7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -231,7 +231,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
if ((adev->asic_type == CHIP_POLARIS10 ||
adev->asic_type == CHIP_POLARIS11) &&
(adev->uvd.fw_version < FW_1_66_16))
- DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
+ DRM_ERROR("POLARIS10/11 UVD firmware version %u.%u is too old.\n",
version_major, version_minor);
} else {
unsigned int enc_major, enc_minor, dec_minor;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index f67c332b16a4..6a1f5df4bc07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2076,8 +2076,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
uint64_t eaddr;
/* validate the parameters */
- if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
- size == 0 || size & AMDGPU_GPU_PAGE_MASK)
+ if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
+ size == 0 || size & ~PAGE_MASK)
return -EINVAL;
/* make sure object fit at this offset */
@@ -2141,8 +2141,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
int r;
/* validate the parameters */
- if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
- size == 0 || size & AMDGPU_GPU_PAGE_MASK)
+ if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
+ size == 0 || size & ~PAGE_MASK)
return -EINVAL;
/* make sure object fit at this offset */
@@ -2286,7 +2286,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
after->start = eaddr + 1;
after->last = tmp->last;
after->offset = tmp->offset;
- after->offset += after->start - tmp->start;
+ after->offset += (after->start - tmp->start) << PAGE_SHIFT;
after->flags = tmp->flags;
after->bo_va = tmp->bo_va;
list_add(&after->list, &tmp->bo_va->invalids);
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index e9934de1b9cf..0222bb7ea49b 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -742,8 +742,8 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
cjiffies = jiffies;
if (time_after(cjiffies, ctx->last_jump_jiffies)) {
cjiffies -= ctx->last_jump_jiffies;
- if ((jiffies_to_msecs(cjiffies) > 5000)) {
- DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
+ if ((jiffies_to_msecs(cjiffies) > 10000)) {
+ DRM_ERROR("atombios stuck in loop for more than 10secs aborting\n");
ctx->abort = true;
}
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index d0fa2aac2388..ca66c2f79758 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -1086,22 +1086,19 @@ static int cik_sdma_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 tmp = RREG32(mmSRBM_STATUS2);
+ u32 tmp;
- if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
- /* sdma0 */
- tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
- tmp |= SDMA0_F32_CNTL__HALT_MASK;
- WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
- }
- if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
- /* sdma1 */
- tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
- tmp |= SDMA0_F32_CNTL__HALT_MASK;
- WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
- }
+ /* sdma0 */
+ tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
+ tmp |= SDMA0_F32_CNTL__HALT_MASK;
+ WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
+
+ /* sdma1 */
+ tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
+ tmp |= SDMA0_F32_CNTL__HALT_MASK;
+ WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
if (srbm_soft_reset) {
tmp = RREG32(mmSRBM_SOFT_RESET);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 938d0053a820..28022d1cb0f0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -921,9 +921,9 @@ kfd_gtt_out:
return 0;
kfd_gtt_no_free_chunk:
- pr_debug("Allocation failed with mem_obj = %p\n", mem_obj);
+ pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj);
mutex_unlock(&kfd->gtt_sa_lock);
- kfree(mem_obj);
+ kfree(*mem_obj);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 189212cb3547..bff39f561264 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1101,6 +1101,8 @@ static int stop_cpsch(struct device_queue_manager *dqm)
unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
dqm_unlock(dqm);
+ pm_release_ib(&dqm->packets);
+
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
pm_uninit(&dqm->packets);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
index 01494752c36a..f3a526ed8059 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -20,6 +20,10 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/kconfig.h>
+
+#if IS_REACHABLE(CONFIG_AMD_IOMMU_V2)
+
#include <linux/printk.h>
#include <linux/device.h>
#include <linux/slab.h>
@@ -366,3 +370,5 @@ int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
return 0;
}
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
index dd23d9fdf6a8..afd420b01a0c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
@@ -23,7 +23,9 @@
#ifndef __KFD_IOMMU_H__
#define __KFD_IOMMU_H__
-#if defined(CONFIG_AMD_IOMMU_V2_MODULE) || defined(CONFIG_AMD_IOMMU_V2)
+#include <linux/kconfig.h>
+
+#if IS_REACHABLE(CONFIG_AMD_IOMMU_V2)
#define KFD_SUPPORT_IOMMU_V2
@@ -46,6 +48,9 @@ static inline int kfd_iommu_check_device(struct kfd_dev *kfd)
}
static inline int kfd_iommu_device_init(struct kfd_dev *kfd)
{
+#if IS_MODULE(CONFIG_AMD_IOMMU_V2)
+ WARN_ONCE(1, "iommu_v2 module is not usable by built-in KFD");
+#endif
return 0;
}
@@ -73,6 +78,6 @@ static inline int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
return 0;
}
-#endif /* defined(CONFIG_AMD_IOMMU_V2) */
+#endif /* IS_REACHABLE(CONFIG_AMD_IOMMU_V2) */
#endif /* __KFD_IOMMU_H__ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 0805c423a5ce..5cf499a07806 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -592,8 +592,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
ret = kobject_init_and_add(dev->kobj_node, &node_type,
sys_props.kobj_nodes, "%d", id);
- if (ret < 0)
+ if (ret < 0) {
+ kobject_put(dev->kobj_node);
return ret;
+ }
dev->kobj_mem = kobject_create_and_add("mem_banks", dev->kobj_node);
if (!dev->kobj_mem)
@@ -640,8 +642,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
return -ENOMEM;
ret = kobject_init_and_add(mem->kobj, &mem_type,
dev->kobj_mem, "%d", i);
- if (ret < 0)
+ if (ret < 0) {
+ kobject_put(mem->kobj);
return ret;
+ }
mem->attr.name = "properties";
mem->attr.mode = KFD_SYSFS_FILE_MODE;
@@ -659,8 +663,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
return -ENOMEM;
ret = kobject_init_and_add(cache->kobj, &cache_type,
dev->kobj_cache, "%d", i);
- if (ret < 0)
+ if (ret < 0) {
+ kobject_put(cache->kobj);
return ret;
+ }
cache->attr.name = "properties";
cache->attr.mode = KFD_SYSFS_FILE_MODE;
@@ -678,8 +684,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
return -ENOMEM;
ret = kobject_init_and_add(iolink->kobj, &iolink_type,
dev->kobj_iolink, "%d", i);
- if (ret < 0)
+ if (ret < 0) {
+ kobject_put(iolink->kobj);
return ret;
+ }
iolink->attr.name = "properties";
iolink->attr.mode = KFD_SYSFS_FILE_MODE;
@@ -759,8 +767,10 @@ static int kfd_topology_update_sysfs(void)
ret = kobject_init_and_add(sys_props.kobj_topology,
&sysprops_type, &kfd_device->kobj,
"topology");
- if (ret < 0)
+ if (ret < 0) {
+ kobject_put(sys_props.kobj_topology);
return ret;
+ }
sys_props.kobj_nodes = kobject_create_and_add("nodes",
sys_props.kobj_topology);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 3b07a316680c..62a2f0491117 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -668,8 +668,8 @@ static void emulated_link_detect(struct dc_link *link)
link->type = dc_connection_none;
prev_sink = link->local_sink;
- if (prev_sink != NULL)
- dc_sink_retain(prev_sink);
+ if (prev_sink)
+ dc_sink_release(prev_sink);
switch (link->connector_signal) {
case SIGNAL_TYPE_HDMI_TYPE_A: {
@@ -4732,14 +4732,14 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
ret = PTR_ERR_OR_ZERO(conn_state);
if (ret)
- goto err;
+ goto out;
/* Attach crtc to drm_atomic_state*/
crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
ret = PTR_ERR_OR_ZERO(crtc_state);
if (ret)
- goto err;
+ goto out;
/* force a restore */
crtc_state->mode_changed = true;
@@ -4749,17 +4749,15 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
ret = PTR_ERR_OR_ZERO(plane_state);
if (ret)
- goto err;
-
+ goto out;
/* Call commit internally with the state we just constructed */
ret = drm_atomic_commit(state);
- if (!ret)
- return 0;
-err:
- DRM_ERROR("Restoring old state failed with %i\n", ret);
+out:
drm_atomic_state_put(state);
+ if (ret)
+ DRM_ERROR("Restoring old state failed with %i\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 2b2efe443c36..b64ad9e1f0c3 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -996,6 +996,26 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
return (result == DC_OK);
}
+static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
+{
+ int i;
+ struct pipe_ctx *pipe;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->plane_state)
+ continue;
+
+ /* Must set to false to start with, due to OR in update function */
+ pipe->plane_state->status.is_flip_pending = false;
+ dc->hwss.update_pending_status(pipe);
+ if (pipe->plane_state->status.is_flip_pending)
+ return true;
+ }
+ return false;
+}
+
bool dc_post_update_surfaces_to_stream(struct dc *dc)
{
int i;
@@ -1003,6 +1023,9 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
post_surface_trace(dc);
+ if (is_flip_pending_in_pipes(dc, context))
+ return true;
+
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].stream == NULL ||
context->res_ctx.pipe_ctx[i].plane_state == NULL) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 3abc0294c05f..c9c81090d580 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -768,6 +768,24 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
dc_is_dvi_signal(link->connector_signal)) {
if (prev_sink != NULL)
dc_sink_release(prev_sink);
+ link_disconnect_sink(link);
+
+ return false;
+ }
+ /*
+ * Abort detection for DP connectors if we have
+ * no EDID and connector is active converter
+ * as there are no display downstream
+ *
+ */
+ if (dc_is_dp_sst_signal(link->connector_signal) &&
+ (link->dpcd_caps.dongle_type ==
+ DISPLAY_DONGLE_DP_VGA_CONVERTER ||
+ link->dpcd_caps.dongle_type ==
+ DISPLAY_DONGLE_DP_DVI_CONVERTER)) {
+ if (prev_sink)
+ dc_sink_release(prev_sink);
+ link_disconnect_sink(link);
return false;
}
@@ -1124,6 +1142,11 @@ static bool construct(
goto ddc_create_fail;
}
+ if (!link->ddc->ddc_pin) {
+ DC_ERROR("Failed to get I2C info for connector!\n");
+ goto ddc_create_fail;
+ }
+
link->ddc_hw_inst =
dal_ddc_get_line(
dal_ddc_service_get_ddc_pin(link->ddc));
@@ -1576,8 +1599,7 @@ static void write_i2c_retimer_setting(
buffer, sizeof(buffer));
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
* needs to be set to 1 on every 0xA-0xC write.
@@ -1595,8 +1617,7 @@ static void write_i2c_retimer_setting(
pipe_ctx->stream->sink->link->ddc,
slave_address, &offset, 1, &value, 1);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
}
buffer[0] = offset;
@@ -1605,8 +1626,7 @@ static void write_i2c_retimer_setting(
i2c_success = i2c_write(pipe_ctx, slave_address,
buffer, sizeof(buffer));
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
}
}
}
@@ -1623,8 +1643,7 @@ static void write_i2c_retimer_setting(
buffer, sizeof(buffer));
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
* needs to be set to 1 on every 0xA-0xC write.
@@ -1642,8 +1661,7 @@ static void write_i2c_retimer_setting(
pipe_ctx->stream->sink->link->ddc,
slave_address, &offset, 1, &value, 1);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
}
buffer[0] = offset;
@@ -1652,8 +1670,7 @@ static void write_i2c_retimer_setting(
i2c_success = i2c_write(pipe_ctx, slave_address,
buffer, sizeof(buffer));
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
}
}
}
@@ -1668,8 +1685,7 @@ static void write_i2c_retimer_setting(
i2c_success = i2c_write(pipe_ctx, slave_address,
buffer, sizeof(buffer));
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x00 to 0x23 */
buffer[0] = 0x00;
@@ -1677,8 +1693,7 @@ static void write_i2c_retimer_setting(
i2c_success = i2c_write(pipe_ctx, slave_address,
buffer, sizeof(buffer));
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0xff to 0x00 */
buffer[0] = 0xff;
@@ -1686,10 +1701,14 @@ static void write_i2c_retimer_setting(
i2c_success = i2c_write(pipe_ctx, slave_address,
buffer, sizeof(buffer));
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
}
+
+ return;
+
+i2c_write_fail:
+ DC_LOG_DEBUG("Set retimer failed");
}
static void write_i2c_default_retimer_setting(
@@ -1710,8 +1729,7 @@ static void write_i2c_default_retimer_setting(
i2c_success = i2c_write(pipe_ctx, slave_address,
buffer, sizeof(buffer));
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x0A to 0x17 */
buffer[0] = 0x0A;
@@ -1719,8 +1737,7 @@ static void write_i2c_default_retimer_setting(
i2c_success = i2c_write(pipe_ctx, slave_address,
buffer, sizeof(buffer));
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x0B to 0xDA or 0xD8 */
buffer[0] = 0x0B;
@@ -1728,8 +1745,7 @@ static void write_i2c_default_retimer_setting(
i2c_success = i2c_write(pipe_ctx, slave_address,
buffer, sizeof(buffer));
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x0A to 0x17 */
buffer[0] = 0x0A;
@@ -1737,8 +1753,7 @@ static void write_i2c_default_retimer_setting(
i2c_success = i2c_write(pipe_ctx, slave_address,
buffer, sizeof(buffer));
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x0C to 0x1D or 0x91 */
buffer[0] = 0x0C;
@@ -1746,8 +1761,7 @@ static void write_i2c_default_retimer_setting(
i2c_success = i2c_write(pipe_ctx, slave_address,
buffer, sizeof(buffer));
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x0A to 0x17 */
buffer[0] = 0x0A;
@@ -1755,8 +1769,7 @@ static void write_i2c_default_retimer_setting(
i2c_success = i2c_write(pipe_ctx, slave_address,
buffer, sizeof(buffer));
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
if (is_vga_mode) {
@@ -1768,8 +1781,7 @@ static void write_i2c_default_retimer_setting(
i2c_success = i2c_write(pipe_ctx, slave_address,
buffer, sizeof(buffer));
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x00 to 0x23 */
buffer[0] = 0x00;
@@ -1777,8 +1789,7 @@ static void write_i2c_default_retimer_setting(
i2c_success = i2c_write(pipe_ctx, slave_address,
buffer, sizeof(buffer));
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0xff to 0x00 */
buffer[0] = 0xff;
@@ -1786,9 +1797,13 @@ static void write_i2c_default_retimer_setting(
i2c_success = i2c_write(pipe_ctx, slave_address,
buffer, sizeof(buffer));
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
}
+
+ return;
+
+i2c_write_fail:
+ DC_LOG_DEBUG("Set default retimer failed");
}
static void write_i2c_redriver_setting(
@@ -1811,8 +1826,7 @@ static void write_i2c_redriver_setting(
buffer, sizeof(buffer));
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ DC_LOG_DEBUG("Set redriver failed");
}
static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
@@ -2018,7 +2032,7 @@ enum dc_status dc_link_validate_mode_timing(
/* A hack to avoid failing any modes for EDID override feature on
* topology change such as lower quality cable for DP or different dongle
*/
- if (link->remote_sinks[0])
+ if (link->remote_sinks[0] && link->remote_sinks[0]->sink_signal == SIGNAL_TYPE_VIRTUAL)
return DC_OK;
/* Passive Dongle */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index 46c9cb47a96e..145af3bb2dfc 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -127,22 +127,16 @@ struct aux_payloads {
struct vector payloads;
};
-static struct i2c_payloads *dal_ddc_i2c_payloads_create(struct dc_context *ctx, uint32_t count)
+static bool dal_ddc_i2c_payloads_create(
+ struct dc_context *ctx,
+ struct i2c_payloads *payloads,
+ uint32_t count)
{
- struct i2c_payloads *payloads;
-
- payloads = kzalloc(sizeof(struct i2c_payloads), GFP_KERNEL);
-
- if (!payloads)
- return NULL;
-
if (dal_vector_construct(
&payloads->payloads, ctx, count, sizeof(struct i2c_payload)))
- return payloads;
-
- kfree(payloads);
- return NULL;
+ return true;
+ return false;
}
static struct i2c_payload *dal_ddc_i2c_payloads_get(struct i2c_payloads *p)
@@ -155,14 +149,12 @@ static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p)
return p->payloads.count;
}
-static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads **p)
+static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads *p)
{
- if (!p || !*p)
+ if (!p)
return;
- dal_vector_destruct(&(*p)->payloads);
- kfree(*p);
- *p = NULL;
+ dal_vector_destruct(&p->payloads);
}
static struct aux_payloads *dal_ddc_aux_payloads_create(struct dc_context *ctx, uint32_t count)
@@ -580,9 +572,13 @@ bool dal_ddc_service_query_ddc_data(
uint32_t payloads_num = write_payloads + read_payloads;
+
if (write_size > EDID_SEGMENT_SIZE || read_size > EDID_SEGMENT_SIZE)
return false;
+ if (!payloads_num)
+ return false;
+
/*TODO: len of payload data for i2c and aux is uint8!!!!,
* but we want to read 256 over i2c!!!!*/
if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
@@ -613,23 +609,25 @@ bool dal_ddc_service_query_ddc_data(
dal_ddc_aux_payloads_destroy(&payloads);
} else {
- struct i2c_payloads *payloads =
- dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num);
+ struct i2c_command command = {0};
+ struct i2c_payloads payloads;
- struct i2c_command command = {
- .payloads = dal_ddc_i2c_payloads_get(payloads),
- .number_of_payloads = 0,
- .engine = DDC_I2C_COMMAND_ENGINE,
- .speed = ddc->ctx->dc->caps.i2c_speed_in_khz };
+ if (!dal_ddc_i2c_payloads_create(ddc->ctx, &payloads, payloads_num))
+ return false;
+
+ command.payloads = dal_ddc_i2c_payloads_get(&payloads);
+ command.number_of_payloads = 0;
+ command.engine = DDC_I2C_COMMAND_ENGINE;
+ command.speed = ddc->ctx->dc->caps.i2c_speed_in_khz;
dal_ddc_i2c_payloads_add(
- payloads, address, write_size, write_buf, true);
+ &payloads, address, write_size, write_buf, true);
dal_ddc_i2c_payloads_add(
- payloads, address, read_size, read_buf, false);
+ &payloads, address, read_size, read_buf, false);
command.number_of_payloads =
- dal_ddc_i2c_payloads_get_count(payloads);
+ dal_ddc_i2c_payloads_get_count(&payloads);
ret = dm_helpers_submit_i2c(
ddc->ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
index ab63d0d0304c..6fd57cfb112f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -429,12 +429,12 @@ static void set_clamp(
clamp_max = 0x3FC0;
break;
case COLOR_DEPTH_101010:
- /* 10bit MSB aligned on 14 bit bus '11 1111 1111 1100' */
- clamp_max = 0x3FFC;
+ /* 10bit MSB aligned on 14 bit bus '11 1111 1111 0000' */
+ clamp_max = 0x3FF0;
break;
case COLOR_DEPTH_121212:
- /* 12bit MSB aligned on 14 bit bus '11 1111 1111 1111' */
- clamp_max = 0x3FFF;
+ /* 12bit MSB aligned on 14 bit bus '11 1111 1111 1100' */
+ clamp_max = 0x3FFC;
break;
default:
clamp_max = 0x3FC0;
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 3f76e6019546..5a2f29bd3508 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -1001,6 +1001,7 @@ struct resource_pool *dce100_create_resource_pool(
if (construct(num_virtual_links, dc, pool))
return &pool->base;
+ kfree(pool);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index e5e9e92521e9..17d936c260d9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -1344,6 +1344,7 @@ struct resource_pool *dce110_create_resource_pool(
if (construct(num_virtual_links, dc, pool, asic_id))
return &pool->base;
+ kfree(pool);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 288129343c77..71adab8bf31b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -1287,6 +1287,7 @@ struct resource_pool *dce112_create_resource_pool(
if (construct(num_virtual_links, dc, pool))
return &pool->base;
+ kfree(pool);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index d43f37d99c7d..f0f2ce6da827 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -1076,6 +1076,7 @@ struct resource_pool *dce120_create_resource_pool(
if (construct(num_virtual_links, dc, pool))
return &pool->base;
+ kfree(pool);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 6b44ed3697a4..e6d556881140 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -1361,6 +1361,7 @@ struct resource_pool *dcn10_create_resource_pool(
if (construct(num_virtual_links, dc, pool))
return &pool->base;
+ kfree(pool);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index a407892905af..d4cb7db89192 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -57,7 +57,7 @@
* general debug capabilities
*
*/
-#if defined(CONFIG_HAVE_KGDB) || defined(CONFIG_KGDB)
+#if defined(CONFIG_DEBUG_KERNEL_DC) && (defined(CONFIG_HAVE_KGDB) || defined(CONFIG_KGDB))
#define ASSERT_CRITICAL(expr) do { \
if (WARN_ON(!(expr))) { \
kgdb_breakpoint(); \
diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
index 52a73332befb..343f869c5277 100644
--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
+++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
@@ -431,6 +431,9 @@ struct fixed31_32 dc_fixpt_log(struct fixed31_32 arg);
*/
static inline struct fixed31_32 dc_fixpt_pow(struct fixed31_32 arg1, struct fixed31_32 arg2)
{
+ if (arg1.value == 0)
+ return arg2.value == 0 ? dc_fixpt_one : dc_fixpt_zero;
+
return dc_fixpt_exp(
dc_fixpt_mul(
dc_fixpt_log(arg1),
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 962900932bee..11ea1a0e629b 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1561,7 +1561,7 @@ bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf,
kfree(rgb_regamma);
rgb_regamma_alloc_fail:
- kvfree(rgb_user);
+ kfree(rgb_user);
rgb_user_alloc_fail:
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
index 925e17104f90..b9e08b06ed5d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
@@ -983,6 +983,32 @@ static int init_thermal_controller(
struct pp_hwmgr *hwmgr,
const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
{
+ hwmgr->thermal_controller.ucType =
+ powerplay_table->sThermalController.ucType;
+ hwmgr->thermal_controller.ucI2cLine =
+ powerplay_table->sThermalController.ucI2cLine;
+ hwmgr->thermal_controller.ucI2cAddress =
+ powerplay_table->sThermalController.ucI2cAddress;
+
+ hwmgr->thermal_controller.fanInfo.bNoFan =
+ (0 != (powerplay_table->sThermalController.ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN));
+
+ hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution =
+ powerplay_table->sThermalController.ucFanParameters &
+ ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
+
+ hwmgr->thermal_controller.fanInfo.ulMinRPM
+ = powerplay_table->sThermalController.ucFanMinRPM * 100UL;
+ hwmgr->thermal_controller.fanInfo.ulMaxRPM
+ = powerplay_table->sThermalController.ucFanMaxRPM * 100UL;
+
+ set_hw_cap(hwmgr,
+ ATOM_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType,
+ PHM_PlatformCaps_ThermalController);
+
+ hwmgr->thermal_controller.use_hw_fan_control = 1;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 6bf032e81e39..d8e624d64ae3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -1531,6 +1531,10 @@ int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to reset to default!", result = tmp_result);
+ tmp_result = smum_stop_smc(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to stop smc!", result = tmp_result);
+
tmp_result = smu7_force_switch_to_arbf0(hwmgr);
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to force to switch arbf0!", result = tmp_result);
@@ -3566,7 +3570,8 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
case AMDGPU_PP_SENSOR_GPU_POWER:
return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
case AMDGPU_PP_SENSOR_VDDGFX:
- if ((data->vr_config & 0xff) == 0x2)
+ if ((data->vr_config & VRCONF_VDDGFX_MASK) ==
+ (VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT))
val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID);
else
@@ -3788,9 +3793,12 @@ static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
{
uint32_t i;
+ /* force the trim if mclk_switching is disabled to prevent flicker */
+ bool force_trim = (low_limit == high_limit);
for (i = 0; i < dpm_table->count; i++) {
/*skip the trim if od is enabled*/
- if (!hwmgr->od_enabled && (dpm_table->dpm_levels[i].value < low_limit
+ if ((!hwmgr->od_enabled || force_trim)
+ && (dpm_table->dpm_levels[i].value < low_limit
|| dpm_table->dpm_levels[i].value > high_limit))
dpm_table->dpm_levels[i].enabled = false;
else
@@ -3966,6 +3974,13 @@ static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
"Failed to populate and upload SCLK MCLK DPM levels!",
result = tmp_result);
+ /*
+ * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
+ * That effectively disables AVFS feature.
+ */
+ if (hwmgr->hardcode_pp_table != NULL)
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
+
tmp_result = smu7_update_avfs(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to update avfs voltages!",
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index ce459ea4ec3a..da9e6923fa65 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -3591,6 +3591,13 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
PP_ASSERT_WITH_CODE(!result,
"Failed to upload PPtable!", return result);
+ /*
+ * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
+ * That effectively disables AVFS feature.
+ */
+ if(hwmgr->hardcode_pp_table != NULL)
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
+
vega10_update_avfs(hwmgr);
data->need_update_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
index aa044c1955fe..a2b2a6c67cda 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
@@ -362,6 +362,9 @@ int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *range)
{
+ struct phm_ppt_v2_information *pp_table_info =
+ (struct phm_ppt_v2_information *)(hwmgr->pptable);
+ struct phm_tdp_table *tdp_table = pp_table_info->tdp_table;
struct amdgpu_device *adev = hwmgr->adev;
int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
@@ -371,8 +374,8 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
if (low < range->min)
low = range->min;
- if (high > range->max)
- high = range->max;
+ if (high > tdp_table->usSoftwareShutdownTemp)
+ high = tdp_table->usSoftwareShutdownTemp;
if (low > high)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
index 904eb2c9155b..601a596e94f0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
@@ -170,6 +170,8 @@ int vega12_thermal_get_temperature(struct pp_hwmgr *hwmgr)
static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *range)
{
+ struct phm_ppt_v3_information *pptable_information =
+ (struct phm_ppt_v3_information *)hwmgr->pptable;
struct amdgpu_device *adev = hwmgr->adev;
int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
@@ -179,8 +181,8 @@ static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
if (low < range->min)
low = range->min;
- if (high > range->max)
- high = range->max;
+ if (high > pptable_information->us_software_shutdown_temp)
+ high = pptable_information->us_software_shutdown_temp;
if (low > high)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 6ee864455a12..f59e1e737735 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -216,6 +216,7 @@ struct pp_smumgr_func {
bool (*is_hw_avfs_present)(struct pp_hwmgr *hwmgr);
int (*update_dpm_settings)(struct pp_hwmgr *hwmgr, void *profile_setting);
int (*smc_table_manager)(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw); /*rw: true for read, false for write */
+ int (*stop_smc)(struct pp_hwmgr *hwmgr);
};
struct pp_hwmgr_func {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index 82550a8a3a3f..ef4f2392e2e7 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -113,4 +113,6 @@ extern int smum_update_dpm_settings(struct pp_hwmgr *hwmgr, void *profile_settin
extern int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw);
+extern int smum_stop_smc(struct pp_hwmgr *hwmgr);
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index 924788772b07..c05bec5effb2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -238,7 +238,7 @@ static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
switch (dev_id) {
case 0x67BA:
- case 0x66B1:
+ case 0x67B1:
smu_data->power_tune_defaults = &defaults_hawaii_pro;
break;
case 0x67B8:
@@ -2931,6 +2931,29 @@ static int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
return 0;
}
+static void ci_reset_smc(struct pp_hwmgr *hwmgr)
+{
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_RESET_CNTL,
+ rst_reg, 1);
+}
+
+
+static void ci_stop_smc_clock(struct pp_hwmgr *hwmgr)
+{
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_CLOCK_CNTL_0,
+ ck_disable, 1);
+}
+
+static int ci_stop_smc(struct pp_hwmgr *hwmgr)
+{
+ ci_reset_smc(hwmgr);
+ ci_stop_smc_clock(hwmgr);
+
+ return 0;
+}
+
const struct pp_smumgr_func ci_smu_funcs = {
.smu_init = ci_smu_init,
.smu_fini = ci_smu_fini,
@@ -2954,4 +2977,5 @@ const struct pp_smumgr_func ci_smu_funcs = {
.is_dpm_running = ci_is_dpm_running,
.update_dpm_settings = ci_update_dpm_settings,
.update_smc_table = ci_update_smc_table,
+ .stop_smc = ci_stop_smc,
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index a6edd5df33b0..20ecf994d47f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -213,3 +213,11 @@ int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t tabl
return -EINVAL;
}
+
+int smum_stop_smc(struct pp_hwmgr *hwmgr)
+{
+ if (hwmgr->smumgr_funcs->stop_smc)
+ return hwmgr->smumgr_funcs->stop_smc(hwmgr);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
index 59113fdd1c1c..2cd0f8d8470e 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
@@ -643,9 +643,6 @@ static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
/* sclk is bigger than max sclk in the dependence table */
*voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
- vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
- (dep_table->entries[i - 1].vddc -
- (uint16_t)VDDC_VDDCI_DELTA));
if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
*voltage |= (data->vbios_boot_state.vddci_bootup_value *
@@ -653,8 +650,13 @@ static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
else if (dep_table->entries[i - 1].vddci)
*voltage |= (dep_table->entries[i - 1].vddci *
VOLTAGE_SCALE) << VDDC_SHIFT;
- else
+ else {
+ vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+ (dep_table->entries[i - 1].vddc -
+ (uint16_t)VDDC_VDDCI_DELTA));
+
*voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ }
if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
*mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 29409a65d864..a347b27405d8 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -446,7 +446,7 @@ int malidp_de_planes_init(struct drm_device *drm)
const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
struct malidp_plane *plane = NULL;
enum drm_plane_type plane_type;
- unsigned long crtcs = 1 << drm->mode_config.num_crtc;
+ unsigned long crtcs = BIT(drm->mode_config.num_crtc);
unsigned long flags = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 |
DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
u32 *formats;
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
index 1b4783d45c53..3a218b56a008 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
@@ -20,13 +20,15 @@ static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs,
{
switch (fs) {
case 32000:
- *n = 4096;
+ case 48000:
+ case 96000:
+ case 192000:
+ *n = fs * 128 / 1000;
break;
case 44100:
- *n = 6272;
- break;
- case 48000:
- *n = 6144;
+ case 88200:
+ case 176400:
+ *n = fs * 128 / 900;
break;
}
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index 2136c97aeb8e..dcf091f9d843 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -306,8 +306,12 @@ static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c,
const struct i2c_device_id *id)
{
struct device *dev = &stdp4028_i2c->dev;
+ int ret;
+
+ ret = ge_b850v3_lvds_init(dev);
- ge_b850v3_lvds_init(dev);
+ if (ret)
+ return ret;
ge_b850v3_lvds_ptr->stdp4028_i2c = stdp4028_i2c;
i2c_set_clientdata(stdp4028_i2c, ge_b850v3_lvds_ptr);
@@ -365,8 +369,12 @@ static int stdp2690_ge_b850v3_fw_probe(struct i2c_client *stdp2690_i2c,
const struct i2c_device_id *id)
{
struct device *dev = &stdp2690_i2c->dev;
+ int ret;
+
+ ret = ge_b850v3_lvds_init(dev);
- ge_b850v3_lvds_init(dev);
+ if (ret)
+ return ret;
ge_b850v3_lvds_ptr->stdp2690_i2c = stdp2690_i2c;
i2c_set_clientdata(stdp2690_i2c, ge_b850v3_lvds_ptr);
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index a6e8f4591e63..1ea2a1b0fe37 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -180,7 +180,7 @@ static void sii8620_read_buf(struct sii8620 *ctx, u16 addr, u8 *buf, int len)
static u8 sii8620_readb(struct sii8620 *ctx, u16 addr)
{
- u8 ret;
+ u8 ret = 0;
sii8620_read_buf(ctx, addr, &ret, 1);
return ret;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index fd7999642cf8..8b5f9241a887 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -326,7 +326,6 @@ static void dw_mipi_message_config(struct dw_mipi_dsi *dsi,
if (lpm)
val |= CMD_MODE_ALL_LP;
- dsi_write(dsi, DSI_LPCLK_CTRL, lpm ? 0 : PHY_TXREQUESTCLKHS);
dsi_write(dsi, DSI_CMD_MODE_CFG, val);
}
@@ -488,16 +487,22 @@ static void dw_mipi_dsi_video_mode_config(struct dw_mipi_dsi *dsi)
static void dw_mipi_dsi_set_mode(struct dw_mipi_dsi *dsi,
unsigned long mode_flags)
{
+ u32 val;
+
dsi_write(dsi, DSI_PWR_UP, RESET);
if (mode_flags & MIPI_DSI_MODE_VIDEO) {
dsi_write(dsi, DSI_MODE_CFG, ENABLE_VIDEO_MODE);
dw_mipi_dsi_video_mode_config(dsi);
- dsi_write(dsi, DSI_LPCLK_CTRL, PHY_TXREQUESTCLKHS);
} else {
dsi_write(dsi, DSI_MODE_CFG, ENABLE_CMD_MODE);
}
+ val = PHY_TXREQUESTCLKHS;
+ if (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
+ val |= AUTO_CLKLANE_CTRL;
+ dsi_write(dsi, DSI_LPCLK_CTRL, val);
+
dsi_write(dsi, DSI_PWR_UP, POWERUP);
}
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 1a4b44923aec..281cf9cbb44c 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1702,27 +1702,6 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
struct drm_connector *connector = conn_state->connector;
struct drm_crtc_state *crtc_state;
- /*
- * For compatibility with legacy users, we want to make sure that
- * we allow DPMS On<->Off modesets on unregistered connectors, since
- * legacy modesetting users will not be expecting these to fail. We do
- * not however, want to allow legacy users to assign a connector
- * that's been unregistered from sysfs to another CRTC, since doing
- * this with a now non-existent connector could potentially leave us
- * in an invalid state.
- *
- * Since the connector can be unregistered at any point during an
- * atomic check or commit, this is racy. But that's OK: all we care
- * about is ensuring that userspace can't use this connector for new
- * configurations after it's been notified that the connector is no
- * longer present.
- */
- if (!READ_ONCE(connector->registered) && crtc) {
- DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] is not registered\n",
- connector->base.id, connector->name);
- return -EINVAL;
- }
-
if (conn_state->crtc == crtc)
return 0;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index c22062cc9992..6060b69fa618 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -307,6 +307,26 @@ update_connector_routing(struct drm_atomic_state *state,
return 0;
}
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ new_connector_state->crtc);
+ /*
+ * For compatibility with legacy users, we want to make sure that
+ * we allow DPMS On->Off modesets on unregistered connectors. Modesets
+ * which would result in anything else must be considered invalid, to
+ * avoid turning on new displays on dead connectors.
+ *
+ * Since the connector can be unregistered at any point during an
+ * atomic check or commit, this is racy. But that's OK: all we care
+ * about is ensuring that userspace can't do anything but shut off the
+ * display on a connector that was destroyed after its been notified,
+ * not before.
+ */
+ if (drm_connector_is_unregistered(connector) && crtc_state->active) {
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] is not registered\n",
+ connector->base.id, connector->name);
+ return -EINVAL;
+ }
+
funcs = connector->helper_private;
if (funcs->atomic_best_encoder)
@@ -351,7 +371,6 @@ update_connector_routing(struct drm_atomic_state *state,
set_best_encoder(state, new_connector_state, new_encoder);
- crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc);
crtc_state->connectors_changed = true;
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
@@ -2919,7 +2938,7 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set,
ret = handle_conflicting_encoders(state, true);
if (ret)
- return ret;
+ goto fail;
ret = drm_atomic_commit(state);
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 6011d769d50b..7bb68ca4aa0b 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -375,7 +375,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
/* The connector should have been removed from userspace long before
* it is finally destroyed.
*/
- if (WARN_ON(connector->registered))
+ if (WARN_ON(connector->registration_state ==
+ DRM_CONNECTOR_REGISTERED))
drm_connector_unregister(connector);
if (connector->tile_group) {
@@ -432,7 +433,7 @@ int drm_connector_register(struct drm_connector *connector)
return 0;
mutex_lock(&connector->mutex);
- if (connector->registered)
+ if (connector->registration_state != DRM_CONNECTOR_INITIALIZING)
goto unlock;
ret = drm_sysfs_connector_add(connector);
@@ -452,7 +453,7 @@ int drm_connector_register(struct drm_connector *connector)
drm_mode_object_register(connector->dev, &connector->base);
- connector->registered = true;
+ connector->registration_state = DRM_CONNECTOR_REGISTERED;
goto unlock;
err_debugfs:
@@ -474,7 +475,7 @@ EXPORT_SYMBOL(drm_connector_register);
void drm_connector_unregister(struct drm_connector *connector)
{
mutex_lock(&connector->mutex);
- if (!connector->registered) {
+ if (connector->registration_state != DRM_CONNECTOR_REGISTERED) {
mutex_unlock(&connector->mutex);
return;
}
@@ -485,7 +486,7 @@ void drm_connector_unregister(struct drm_connector *connector)
drm_sysfs_connector_remove(connector);
drm_debugfs_connector_remove(connector);
- connector->registered = false;
+ connector->registration_state = DRM_CONNECTOR_UNREGISTERED;
mutex_unlock(&connector->mutex);
}
EXPORT_SYMBOL(drm_connector_unregister);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 373bd4c2b698..84b7b22a9590 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -265,13 +265,13 @@ static ssize_t connector_write(struct file *file, const char __user *ubuf,
buf[len] = '\0';
- if (!strcmp(buf, "on"))
+ if (sysfs_streq(buf, "on"))
connector->force = DRM_FORCE_ON;
- else if (!strcmp(buf, "digital"))
+ else if (sysfs_streq(buf, "digital"))
connector->force = DRM_FORCE_ON_DIGITAL;
- else if (!strcmp(buf, "off"))
+ else if (sysfs_streq(buf, "off"))
connector->force = DRM_FORCE_OFF;
- else if (!strcmp(buf, "unspecified"))
+ else if (sysfs_streq(buf, "unspecified"))
connector->force = DRM_FORCE_UNSPECIFIED;
else
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c
index 0e4f25d63fd2..0b11210c882e 100644
--- a/drivers/gpu/drm/drm_dp_aux_dev.c
+++ b/drivers/gpu/drm/drm_dp_aux_dev.c
@@ -60,7 +60,7 @@ static struct drm_dp_aux_dev *drm_dp_aux_dev_get_by_minor(unsigned index)
mutex_lock(&aux_idr_mutex);
aux_dev = idr_find(&aux_idr, index);
- if (!kref_get_unless_zero(&aux_dev->refcount))
+ if (aux_dev && !kref_get_unless_zero(&aux_dev->refcount))
aux_dev = NULL;
mutex_unlock(&aux_idr_mutex);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 7c3c323773d3..c50fe915f5c8 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -29,6 +29,7 @@
#include <linux/i2c.h>
#include <drm/drm_dp_mst_helper.h>
#include <drm/drmP.h>
+#include <linux/iopoll.h>
#include <drm/drm_fixed.h>
#include <drm/drm_atomic.h>
@@ -2115,9 +2116,9 @@ static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
{
int ret = 0;
- int i = 0;
struct drm_dp_mst_branch *mstb = NULL;
+ mutex_lock(&mgr->payload_lock);
mutex_lock(&mgr->lock);
if (mst_state == mgr->mst_state)
goto out_unlock;
@@ -2176,25 +2177,18 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
/* this can fail if the device is gone */
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
ret = 0;
- mutex_lock(&mgr->payload_lock);
- memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
+ memset(mgr->payloads, 0,
+ mgr->max_payloads * sizeof(mgr->payloads[0]));
+ memset(mgr->proposed_vcpis, 0,
+ mgr->max_payloads * sizeof(mgr->proposed_vcpis[0]));
mgr->payload_mask = 0;
set_bit(0, &mgr->payload_mask);
- for (i = 0; i < mgr->max_payloads; i++) {
- struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
-
- if (vcpi) {
- vcpi->vcpi = 0;
- vcpi->num_slots = 0;
- }
- mgr->proposed_vcpis[i] = NULL;
- }
mgr->vcpi_mask = 0;
- mutex_unlock(&mgr->payload_lock);
}
out_unlock:
mutex_unlock(&mgr->lock);
+ mutex_unlock(&mgr->payload_lock);
if (mstb)
drm_dp_put_mst_branch_device(mstb);
return ret;
@@ -2712,11 +2706,11 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
{
int ret;
- port = drm_dp_get_validated_port_ref(mgr, port);
- if (!port)
+ if (slots < 0)
return false;
- if (slots < 0)
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port)
return false;
if (port->vcpi.vcpi > 0) {
@@ -2731,6 +2725,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
if (ret) {
DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
+ drm_dp_put_port(port);
goto out;
}
DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
@@ -2835,6 +2830,17 @@ fail:
return ret;
}
+static int do_get_act_status(struct drm_dp_aux *aux)
+{
+ int ret;
+ u8 status;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ return status;
+}
/**
* drm_dp_check_act_status() - Check ACT handled status.
@@ -2844,33 +2850,29 @@ fail:
*/
int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
{
- u8 status;
- int ret;
- int count = 0;
-
- do {
- ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
-
- if (ret < 0) {
- DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
- goto fail;
- }
-
- if (status & DP_PAYLOAD_ACT_HANDLED)
- break;
- count++;
- udelay(100);
-
- } while (count < 30);
-
- if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
- DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
- ret = -EINVAL;
- goto fail;
+ /*
+ * There doesn't seem to be any recommended retry count or timeout in
+ * the MST specification. Since some hubs have been observed to take
+ * over 1 second to update their payload allocations under certain
+ * conditions, we use a rather large timeout value.
+ */
+ const int timeout_ms = 3000;
+ int ret, status;
+
+ ret = readx_poll_timeout(do_get_act_status, mgr->aux, status,
+ status & DP_PAYLOAD_ACT_HANDLED || status < 0,
+ 200, timeout_ms * USEC_PER_MSEC);
+ if (ret < 0 && status >= 0) {
+ DRM_DEBUG_KMS("Failed to get ACT after %dms, last status: %02x\n",
+ timeout_ms, status);
+ return -EINVAL;
+ } else if (status < 0) {
+ DRM_DEBUG_KMS("Failed to read payload table status: %d\n",
+ status);
+ return status;
}
+
return 0;
-fail:
- return ret;
}
EXPORT_SYMBOL(drm_dp_check_act_status);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index f5926bf5dabd..108f542176b8 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -198,10 +198,11 @@ static const struct edid_quirk {
{ "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
{ "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP },
- /* Oculus Rift DK1, DK2, and CV1 VR Headsets */
+ /* Oculus Rift DK1, DK2, CV1 and Rift S VR Headsets */
{ "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP },
{ "OVR", 0x0003, EDID_QUIRK_NON_DESKTOP },
{ "OVR", 0x0004, EDID_QUIRK_NON_DESKTOP },
+ { "OVR", 0x0012, EDID_QUIRK_NON_DESKTOP },
/* Windows Mixed Reality Headsets */
{ "ACR", 0x7fce, EDID_QUIRK_NON_DESKTOP },
@@ -4706,7 +4707,7 @@ static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *d
struct drm_display_mode *mode;
unsigned pixel_clock = (timings->pixel_clock[0] |
(timings->pixel_clock[1] << 8) |
- (timings->pixel_clock[2] << 16));
+ (timings->pixel_clock[2] << 16)) + 1;
unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1;
unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1;
unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1;
diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c
index cf804389f5ec..d50a7884e69e 100644
--- a/drivers/gpu/drm/drm_encoder_slave.c
+++ b/drivers/gpu/drm/drm_encoder_slave.c
@@ -84,7 +84,7 @@ int drm_i2c_encoder_init(struct drm_device *dev,
err = encoder_drv->encoder_init(client, dev, encoder);
if (err)
- goto fail_unregister;
+ goto fail_module_put;
if (info->platform_data)
encoder->slave_funcs->set_config(&encoder->base,
@@ -92,9 +92,10 @@ int drm_i2c_encoder_init(struct drm_device *dev,
return 0;
+fail_module_put:
+ module_put(module);
fail_unregister:
i2c_unregister_device(client);
- module_put(module);
fail:
return err;
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index bf90625df3c5..ac545c88a6f3 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -731,9 +731,6 @@ err:
* @file_priv: drm file-private structure
*
* Open an object using the global name, returning a handle and the size.
- *
- * This handle (of course) holds a reference to the object, so the object
- * will not go away until the handle is deleted.
*/
int
drm_gem_open_ioctl(struct drm_device *dev, void *data,
@@ -758,14 +755,15 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
- drm_gem_object_put_unlocked(obj);
if (ret)
- return ret;
+ goto err;
args->handle = handle;
args->size = obj->size;
- return 0;
+err:
+ drm_gem_object_put_unlocked(obj);
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index f8672238d444..ab8847c7dd96 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -96,6 +96,8 @@ static int compat_drm_version(struct file *file, unsigned int cmd,
if (copy_from_user(&v32, (void __user *)arg, sizeof(v32)))
return -EFAULT;
+ memset(&v, 0, sizeof(v));
+
v = (struct drm_version) {
.name_len = v32.name_len,
.name = compat_ptr(v32.name),
@@ -134,6 +136,9 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd,
if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
return -EFAULT;
+
+ memset(&uq, 0, sizeof(uq));
+
uq = (struct drm_unique){
.unique_len = uq32.unique_len,
.unique = compat_ptr(uq32.unique),
@@ -260,6 +265,8 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd,
if (copy_from_user(&c32, argp, sizeof(c32)))
return -EFAULT;
+ memset(&client, 0, sizeof(client));
+
client.idx = c32.idx;
err = drm_ioctl_kernel(file, drm_getclient, &client, DRM_UNLOCKED);
@@ -842,6 +849,8 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
+ memset(&req, 0, sizeof(req));
+
req.request.type = req32.request.type;
req.request.sequence = req32.request.sequence;
req.request.signal = req32.request.signal;
@@ -879,6 +888,8 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
struct drm_mode_fb_cmd2 req64;
int err;
+ memset(&req64, 0, sizeof(req64));
+
if (copy_from_user(&req64, argp,
offsetof(drm_mode_fb_cmd232_t, modifier)))
return -EFAULT;
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index ba129b64b61f..b92682f037b2 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -321,7 +321,12 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
case DRM_CLIENT_CAP_ATOMIC:
if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
return -EINVAL;
- if (req->value > 1)
+ /* The modesetting DDX has a totally broken idea of atomic. */
+ if (current->comm[0] == 'X' && req->value == 1) {
+ pr_info("broken atomic modeset userspace detected, disabling atomic\n");
+ return -EOPNOTSUPP;
+ }
+ if (req->value > 2)
return -EINVAL;
file_priv->atomic = req->value;
file_priv->universal_planes = req->value;
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 80b75501f5c6..7ed8e510565e 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -1034,11 +1034,11 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_pixel_format);
*/
int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline)
{
- u8 payload[3] = { MIPI_DCS_SET_TEAR_SCANLINE, scanline >> 8,
- scanline & 0xff };
+ u8 payload[2] = { scanline >> 8, scanline & 0xff };
ssize_t err;
- err = mipi_dsi_generic_write(dsi, payload, sizeof(payload));
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_SCANLINE, payload,
+ sizeof(payload));
if (err < 0)
return err;
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index cc354b491774..652de972c3ae 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -30,12 +30,6 @@ struct drm_dmi_panel_orientation_data {
int orientation;
};
-static const struct drm_dmi_panel_orientation_data acer_s1003 = {
- .width = 800,
- .height = 1280,
- .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
-};
-
static const struct drm_dmi_panel_orientation_data asus_t100ha = {
.width = 800,
.height = 1280,
@@ -100,13 +94,25 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"),
},
- .driver_data = (void *)&acer_s1003,
+ .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* Asus T100HA */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"),
},
.driver_data = (void *)&asus_t100ha,
+ }, { /* Asus T101HA */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T101HA"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* Asus T103HAF */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* GPD MicroPC (generic strings, also match on bios date) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 896e42a34895..d89a992829be 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -46,8 +46,6 @@
drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
{
drm_dma_handle_t *dmah;
- unsigned long addr;
- size_t sz;
/* pci_alloc_consistent only guarantees alignment to the smallest
* PAGE_SIZE order which is greater than or equal to the requested size.
@@ -61,22 +59,13 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
return NULL;
dmah->size = size;
- dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
+ dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL);
if (dmah->vaddr == NULL) {
kfree(dmah);
return NULL;
}
- memset(dmah->vaddr, 0, size);
-
- /* XXX - Is virt_to_page() legal for consistent mem? */
- /* Reserve */
- for (addr = (unsigned long)dmah->vaddr, sz = size;
- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
- SetPageReserved(virt_to_page((void *)addr));
- }
-
return dmah;
}
@@ -89,19 +78,9 @@ EXPORT_SYMBOL(drm_pci_alloc);
*/
void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
- unsigned long addr;
- size_t sz;
-
- if (dmah->vaddr) {
- /* XXX - Is virt_to_page() legal for consistent mem? */
- /* Unreserve */
- for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
- ClearPageReserved(virt_to_page((void *)addr));
- }
+ if (dmah->vaddr)
dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
dmah->busaddr);
- }
}
/**
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 6a859e077ea0..37ae15dc4fc6 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -694,7 +694,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
ret = pm_runtime_get_sync(gpu->dev);
if (ret < 0) {
dev_err(gpu->dev, "Failed to enable GPU power domain\n");
- return ret;
+ goto pm_put;
}
etnaviv_hw_identify(gpu);
@@ -808,6 +808,7 @@ destroy_iommu:
gpu->mmu = NULL;
fail:
pm_runtime_mark_last_busy(gpu->dev);
+pm_put:
pm_runtime_put_autosuspend(gpu->dev);
return ret;
@@ -848,7 +849,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
ret = pm_runtime_get_sync(gpu->dev);
if (ret < 0)
- return ret;
+ goto pm_put;
dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
@@ -971,6 +972,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
ret = 0;
pm_runtime_mark_last_busy(gpu->dev);
+pm_put:
pm_runtime_put_autosuspend(gpu->dev);
return ret;
@@ -985,7 +987,7 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
dev_err(gpu->dev, "recover hung GPU!\n");
if (pm_runtime_get_sync(gpu->dev) < 0)
- return;
+ goto pm_put;
mutex_lock(&gpu->lock);
@@ -1005,6 +1007,7 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
mutex_unlock(&gpu->lock);
pm_runtime_mark_last_busy(gpu->dev);
+pm_put:
pm_runtime_put_autosuspend(gpu->dev);
}
@@ -1278,8 +1281,10 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
if (!submit->runtime_resumed) {
ret = pm_runtime_get_sync(gpu->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(gpu->dev);
return NULL;
+ }
submit->runtime_resumed = true;
}
@@ -1296,6 +1301,7 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
ret = event_alloc(gpu, nr_events, event);
if (ret) {
DRM_ERROR("no free events\n");
+ pm_runtime_put_noidle(gpu->dev);
return NULL;
}
@@ -1459,7 +1465,7 @@ static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
if (gpu->clk_bus) {
ret = clk_prepare_enable(gpu->clk_bus);
if (ret)
- return ret;
+ goto disable_clk_reg;
}
if (gpu->clk_core) {
@@ -1482,6 +1488,9 @@ disable_clk_core:
disable_clk_bus:
if (gpu->clk_bus)
clk_disable_unprepare(gpu->clk_bus);
+disable_clk_reg:
+ if (gpu->clk_reg)
+ clk_disable_unprepare(gpu->clk_reg);
return ret;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
index 4227a4006c34..b3464d2dc2b4 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
@@ -4,6 +4,7 @@
* Copyright (C) 2017 Zodiac Inflight Innovations
*/
+#include "common.xml.h"
#include "etnaviv_gpu.h"
#include "etnaviv_perfmon.h"
#include "state_hi.xml.h"
@@ -31,17 +32,11 @@ struct etnaviv_pm_domain {
};
struct etnaviv_pm_domain_meta {
+ unsigned int feature;
const struct etnaviv_pm_domain *domains;
u32 nr_domains;
};
-static u32 simple_reg_read(struct etnaviv_gpu *gpu,
- const struct etnaviv_pm_domain *domain,
- const struct etnaviv_pm_signal *signal)
-{
- return gpu_read(gpu, signal->data);
-}
-
static u32 perf_reg_read(struct etnaviv_gpu *gpu,
const struct etnaviv_pm_domain *domain,
const struct etnaviv_pm_signal *signal)
@@ -75,6 +70,34 @@ static u32 pipe_reg_read(struct etnaviv_gpu *gpu,
return value;
}
+static u32 hi_total_cycle_read(struct etnaviv_gpu *gpu,
+ const struct etnaviv_pm_domain *domain,
+ const struct etnaviv_pm_signal *signal)
+{
+ u32 reg = VIVS_HI_PROFILE_TOTAL_CYCLES;
+
+ if (gpu->identity.model == chipModel_GC880 ||
+ gpu->identity.model == chipModel_GC2000 ||
+ gpu->identity.model == chipModel_GC2100)
+ reg = VIVS_MC_PROFILE_CYCLE_COUNTER;
+
+ return gpu_read(gpu, reg);
+}
+
+static u32 hi_total_idle_cycle_read(struct etnaviv_gpu *gpu,
+ const struct etnaviv_pm_domain *domain,
+ const struct etnaviv_pm_signal *signal)
+{
+ u32 reg = VIVS_HI_PROFILE_IDLE_CYCLES;
+
+ if (gpu->identity.model == chipModel_GC880 ||
+ gpu->identity.model == chipModel_GC2000 ||
+ gpu->identity.model == chipModel_GC2100)
+ reg = VIVS_HI_PROFILE_TOTAL_CYCLES;
+
+ return gpu_read(gpu, reg);
+}
+
static const struct etnaviv_pm_domain doms_3d[] = {
{
.name = "HI",
@@ -84,13 +107,13 @@ static const struct etnaviv_pm_domain doms_3d[] = {
.signal = (const struct etnaviv_pm_signal[]) {
{
"TOTAL_CYCLES",
- VIVS_HI_PROFILE_TOTAL_CYCLES,
- &simple_reg_read
+ 0,
+ &hi_total_cycle_read
},
{
"IDLE_CYCLES",
- VIVS_HI_PROFILE_IDLE_CYCLES,
- &simple_reg_read
+ 0,
+ &hi_total_idle_cycle_read
},
{
"AXI_CYCLES_READ_REQUEST_STALLED",
@@ -388,36 +411,78 @@ static const struct etnaviv_pm_domain doms_vg[] = {
static const struct etnaviv_pm_domain_meta doms_meta[] = {
{
+ .feature = chipFeatures_PIPE_3D,
.nr_domains = ARRAY_SIZE(doms_3d),
.domains = &doms_3d[0]
},
{
+ .feature = chipFeatures_PIPE_2D,
.nr_domains = ARRAY_SIZE(doms_2d),
.domains = &doms_2d[0]
},
{
+ .feature = chipFeatures_PIPE_VG,
.nr_domains = ARRAY_SIZE(doms_vg),
.domains = &doms_vg[0]
}
};
+static unsigned int num_pm_domains(const struct etnaviv_gpu *gpu)
+{
+ unsigned int num = 0, i;
+
+ for (i = 0; i < ARRAY_SIZE(doms_meta); i++) {
+ const struct etnaviv_pm_domain_meta *meta = &doms_meta[i];
+
+ if (gpu->identity.features & meta->feature)
+ num += meta->nr_domains;
+ }
+
+ return num;
+}
+
+static const struct etnaviv_pm_domain *pm_domain(const struct etnaviv_gpu *gpu,
+ unsigned int index)
+{
+ const struct etnaviv_pm_domain *domain = NULL;
+ unsigned int offset = 0, i;
+
+ for (i = 0; i < ARRAY_SIZE(doms_meta); i++) {
+ const struct etnaviv_pm_domain_meta *meta = &doms_meta[i];
+
+ if (!(gpu->identity.features & meta->feature))
+ continue;
+
+ if (index - offset >= meta->nr_domains) {
+ offset += meta->nr_domains;
+ continue;
+ }
+
+ domain = meta->domains + (index - offset);
+ }
+
+ return domain;
+}
+
int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu,
struct drm_etnaviv_pm_domain *domain)
{
- const struct etnaviv_pm_domain_meta *meta = &doms_meta[domain->pipe];
+ const unsigned int nr_domains = num_pm_domains(gpu);
const struct etnaviv_pm_domain *dom;
- if (domain->iter >= meta->nr_domains)
+ if (domain->iter >= nr_domains)
return -EINVAL;
- dom = meta->domains + domain->iter;
+ dom = pm_domain(gpu, domain->iter);
+ if (!dom)
+ return -EINVAL;
domain->id = domain->iter;
domain->nr_signals = dom->nr_signals;
strncpy(domain->name, dom->name, sizeof(domain->name));
domain->iter++;
- if (domain->iter == meta->nr_domains)
+ if (domain->iter == nr_domains)
domain->iter = 0xff;
return 0;
@@ -426,14 +491,16 @@ int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu,
int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu,
struct drm_etnaviv_pm_signal *signal)
{
- const struct etnaviv_pm_domain_meta *meta = &doms_meta[signal->pipe];
+ const unsigned int nr_domains = num_pm_domains(gpu);
const struct etnaviv_pm_domain *dom;
const struct etnaviv_pm_signal *sig;
- if (signal->domain >= meta->nr_domains)
+ if (signal->domain >= nr_domains)
return -EINVAL;
- dom = meta->domains + signal->domain;
+ dom = pm_domain(gpu, signal->domain);
+ if (!dom)
+ return -EINVAL;
if (signal->iter >= dom->nr_signals)
return -EINVAL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 2fd299a58297..cd5530bbfe2e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -267,8 +267,10 @@ static void mic_pre_enable(struct drm_bridge *bridge)
goto unlock;
ret = pm_runtime_get_sync(mic->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(mic->dev);
goto unlock;
+ }
mic_set_path(mic, 1);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 17db4b4749d5..2e8479744ca4 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -415,6 +415,8 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct gma_clock_t clock;
+ memset(&clock, 0, sizeof(clock));
+
switch (refclk) {
case 27000:
if (target < 200000) {
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 90ed20083009..3e8b804cf7e7 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -2119,12 +2119,12 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
intel_dp->dpcd,
sizeof(intel_dp->dpcd));
cdv_intel_edp_panel_vdd_off(gma_encoder);
- if (ret == 0) {
+ if (ret <= 0) {
/* if this fails, presume the device is a ghost */
DRM_INFO("failed to retrieve link info, disabling eDP\n");
cdv_intel_dp_encoder_destroy(encoder);
cdv_intel_dp_destroy(connector);
- goto err_priv;
+ goto err_connector;
} else {
DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n",
intel_dp->dpcd[0], intel_dp->dpcd[1],
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
index e28107061148..fc9a34ed58bd 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
@@ -279,11 +279,8 @@ int oaktrail_hdmi_i2c_init(struct pci_dev *dev)
hdmi_dev = pci_get_drvdata(dev);
i2c_dev = kzalloc(sizeof(struct hdmi_i2c_dev), GFP_KERNEL);
- if (i2c_dev == NULL) {
- DRM_ERROR("Can't allocate interface\n");
- ret = -ENOMEM;
- goto exit;
- }
+ if (!i2c_dev)
+ return -ENOMEM;
i2c_dev->adap = &oaktrail_hdmi_i2c_adapter;
i2c_dev->status = I2C_STAT_INIT;
@@ -300,16 +297,23 @@ int oaktrail_hdmi_i2c_init(struct pci_dev *dev)
oaktrail_hdmi_i2c_adapter.name, hdmi_dev);
if (ret) {
DRM_ERROR("Failed to request IRQ for I2C controller\n");
- goto err;
+ goto free_dev;
}
/* Adapter registration */
ret = i2c_add_numbered_adapter(&oaktrail_hdmi_i2c_adapter);
- return ret;
+ if (ret) {
+ DRM_ERROR("Failed to add I2C adapter\n");
+ goto free_irq;
+ }
-err:
+ return 0;
+
+free_irq:
+ free_irq(dev->irq, hdmi_dev);
+free_dev:
kfree(i2c_dev);
-exit:
+
return ret;
}
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index ac32ab5aa002..2fa3c7fc4b6d 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -316,6 +316,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto out_err;
+ ret = -ENOMEM;
+
dev_priv->mmu = psb_mmu_driver_init(dev, 1, 0, 0);
if (!dev_priv->mmu)
goto out_err;
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 78eb10902809..076b6da44f46 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -350,6 +350,7 @@ int psb_irq_postinstall(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long irqflags;
+ unsigned int i;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
@@ -362,20 +363,12 @@ int psb_irq_postinstall(struct drm_device *dev)
PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
- if (dev->vblank[0].enabled)
- psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
- else
- psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
-
- if (dev->vblank[1].enabled)
- psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
- else
- psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
-
- if (dev->vblank[2].enabled)
- psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
- else
- psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+ for (i = 0; i < dev->num_crtcs; ++i) {
+ if (dev->vblank[i].enabled)
+ psb_enable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
+ else
+ psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
+ }
if (dev_priv->ops->hotplug_enable)
dev_priv->ops->hotplug_enable(dev, true);
@@ -388,6 +381,7 @@ void psb_irq_uninstall(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long irqflags;
+ unsigned int i;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
@@ -396,14 +390,10 @@ void psb_irq_uninstall(struct drm_device *dev)
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
- if (dev->vblank[0].enabled)
- psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
-
- if (dev->vblank[1].enabled)
- psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
-
- if (dev->vblank[2].enabled)
- psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+ for (i = 0; i < dev->num_crtcs; ++i) {
+ if (dev->vblank[i].enabled)
+ psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
+ }
dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
_PSB_IRQ_MSVDX_FLAG |
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 3019dbc39aef..83f30d7b6abe 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -206,14 +206,41 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
SKL_FUSE_PG_DIST_STATUS(SKL_PG0) |
SKL_FUSE_PG_DIST_STATUS(SKL_PG1) |
SKL_FUSE_PG_DIST_STATUS(SKL_PG2);
- vgpu_vreg_t(vgpu, LCPLL1_CTL) |=
- LCPLL_PLL_ENABLE |
- LCPLL_PLL_LOCK;
- vgpu_vreg_t(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE;
-
+ /*
+ * Only 1 PIPE enabled in current vGPU display and PIPE_A is
+ * tied to TRANSCODER_A in HW, so it's safe to assume PIPE_A,
+ * TRANSCODER_A can be enabled. PORT_x depends on the input of
+ * setup_virtual_dp_monitor, we can bind DPLL0 to any PORT_x
+ * so we fixed to DPLL0 here.
+ * Setup DPLL0: DP link clk 1620 MHz, non SSC, DP Mode
+ */
+ vgpu_vreg_t(vgpu, DPLL_CTRL1) =
+ DPLL_CTRL1_OVERRIDE(DPLL_ID_SKL_DPLL0);
+ vgpu_vreg_t(vgpu, DPLL_CTRL1) |=
+ DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, DPLL_ID_SKL_DPLL0);
+ vgpu_vreg_t(vgpu, LCPLL1_CTL) =
+ LCPLL_PLL_ENABLE | LCPLL_PLL_LOCK;
+ vgpu_vreg_t(vgpu, DPLL_STATUS) = DPLL_LOCK(DPLL_ID_SKL_DPLL0);
+ /*
+ * Golden M/N are calculated based on:
+ * 24 bpp, 4 lanes, 154000 pixel clk (from virtual EDID),
+ * DP link clk 1620 MHz and non-constant_n.
+ * TODO: calculate DP link symbol clk and stream clk m/n.
+ */
+ vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) = 63 << TU_SIZE_SHIFT;
+ vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) |= 0x5b425e;
+ vgpu_vreg_t(vgpu, PIPE_DATA_N1(TRANSCODER_A)) = 0x800000;
+ vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A)) = 0x3cd6e;
+ vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A)) = 0x80000;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) &=
+ ~DPLL_CTRL2_DDI_CLK_OFF(PORT_B);
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+ DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_B);
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+ DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_B);
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
@@ -234,6 +261,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) &=
+ ~DPLL_CTRL2_DDI_CLK_OFF(PORT_C);
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+ DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_C);
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+ DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_C);
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
@@ -254,6 +287,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) &=
+ ~DPLL_CTRL2_DDI_CLK_OFF(PORT_D);
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+ DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_D);
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+ DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_D);
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 46c8b720e336..3e3876d141ce 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -128,7 +128,7 @@ static bool intel_get_gvt_attrs(struct attribute ***type_attrs,
return true;
}
-static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
+static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
{
int i, j;
struct intel_vgpu_type *type;
@@ -146,7 +146,7 @@ static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
gvt_vgpu_type_groups[i] = group;
}
- return true;
+ return 0;
unwind:
for (j = 0; j < i; j++) {
@@ -154,7 +154,7 @@ unwind:
kfree(group);
}
- return false;
+ return -ENOMEM;
}
static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
@@ -416,7 +416,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
goto out_clean_thread;
ret = intel_gvt_init_vgpu_type_groups(gvt);
- if (ret == false) {
+ if (ret) {
gvt_err("failed to init vgpu type groups: %d\n", ret);
goto out_clean_types;
}
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index e4b9eb1f6b60..f6b81f3256cf 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -570,6 +570,9 @@ struct drm_i915_reg_descriptor {
#define REG32(_reg, ...) \
{ .addr = (_reg), __VA_ARGS__ }
+#define REG32_IDX(_reg, idx) \
+ { .addr = _reg(idx) }
+
/*
* Convenience macro for adding 64-bit registers.
*
@@ -667,6 +670,7 @@ static const struct drm_i915_reg_descriptor gen9_blt_regs[] = {
REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE),
REG32(BCS_SWCTRL),
REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
+ REG32_IDX(RING_CTX_TIMESTAMP, BLT_RING_BASE),
REG64_IDX(BCS_GPR, 0),
REG64_IDX(BCS_GPR, 1),
REG64_IDX(BCS_GPR, 2),
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index db2e9af49ae6..37c80cfecd09 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -33,6 +33,8 @@
#include <uapi/drm/i915_drm.h>
#include <uapi/drm/drm_fourcc.h>
+#include <asm/hypervisor.h>
+
#include <linux/io-mapping.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
@@ -2683,7 +2685,9 @@ static inline bool intel_vtd_active(void)
if (intel_iommu_gfx_mapped)
return true;
#endif
- return false;
+
+ /* Running as a guest, we assume the host is enforcing VT'd */
+ return !hypervisor_is_type(X86_HYPER_NATIVE);
}
static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index f08c54740cbe..8b5b147cdfd1 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -380,7 +380,7 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
return true;
if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
- (vma->node.start + vma->node.size - 1) >> 32)
+ (vma->node.start + vma->node.size + 4095) >> 32)
return true;
if (flags & __EXEC_OBJECT_NEEDS_MAP &&
@@ -1604,7 +1604,9 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
* happened we would make the mistake of assuming that the
* relocations were valid.
*/
- user_access_begin();
+ if (!user_access_begin(VERIFY_WRITE, urelocs, size))
+ goto end_user;
+
for (copied = 0; copied < nreloc; copied++)
unsafe_put_user(-1,
&urelocs[copied].presumed_offset,
@@ -2649,7 +2651,17 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
unsigned int i;
/* Copy the new buffer offsets back to the user's exec list. */
- user_access_begin();
+ /*
+ * Note: count * sizeof(*user_exec_list) does not overflow,
+ * because we checked 'count' in check_buffer_count().
+ *
+ * And this range already got effectively checked earlier
+ * when we did the "copy_from_user()" above.
+ */
+ if (!user_access_begin(VERIFY_WRITE, user_exec_list,
+ count * sizeof(*user_exec_list)))
+ goto end_user;
+
for (i = 0; i < args->buffer_count; i++) {
if (!(exec2_list[i].offset & UPDATE))
continue;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index a262a64f5625..ba24ac698e8b 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -268,6 +268,8 @@ static int compress_page(struct compress *c,
if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
return -EIO;
+
+ cond_resched();
} while (zstream->avail_in);
/* Fallback to uncompressed if we increase size? */
@@ -347,6 +349,7 @@ static int compress_page(struct compress *c,
if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE))
memcpy(ptr, src, PAGE_SIZE);
dst->pages[dst->page_count++] = ptr;
+ cond_resched();
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 29877969310d..b7c398232136 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -3821,6 +3821,7 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
val = I915_READ(GEN11_DE_HPD_IMR);
val &= ~hotplug_irqs;
+ val |= ~enabled_irqs & hotplug_irqs;
I915_WRITE(GEN11_DE_HPD_IMR, val);
POSTING_READ(GEN11_DE_HPD_IMR);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 20cd4c8acecc..77a2f7fc2b37 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -6288,11 +6288,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_connector->get_hw_state = intel_connector_get_hw_state;
/* init MST on ports that can support it */
- if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
- (port == PORT_B || port == PORT_C ||
- port == PORT_D || port == PORT_F))
- intel_dp_mst_encoder_init(intel_dig_port,
- intel_connector->base.base.id);
+ intel_dp_mst_encoder_init(intel_dig_port,
+ intel_connector->base.base.id);
if (!intel_edp_init_connector(intel_dp, intel_connector)) {
intel_dp_aux_fini(intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 58ba14966d4f..8a19cfcfc4f1 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -77,7 +77,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
pipe_config->pbn = mst_pbn;
/* Zombie connectors can't have VCPI slots */
- if (READ_ONCE(connector->registered)) {
+ if (!drm_connector_is_unregistered(connector)) {
slots = drm_dp_atomic_find_vcpi_slots(state,
&intel_dp->mst_mgr,
port,
@@ -317,7 +317,7 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
struct edid *edid;
int ret;
- if (!READ_ONCE(connector->registered))
+ if (drm_connector_is_unregistered(connector))
return intel_connector_update_modes(connector, NULL);
edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port);
@@ -333,7 +333,7 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force)
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
- if (!READ_ONCE(connector->registered))
+ if (drm_connector_is_unregistered(connector))
return connector_status_disconnected;
return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr,
intel_connector->port);
@@ -376,7 +376,7 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
int bpp = 24; /* MST uses fixed bpp */
int max_rate, mode_rate, max_lanes, max_link_clock;
- if (!READ_ONCE(connector->registered))
+ if (drm_connector_is_unregistered(connector))
return MODE_ERROR;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -588,21 +588,31 @@ intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port)
int
intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ enum port port = intel_dig_port->base.port;
int ret;
- intel_dp->can_mst = true;
+ if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp))
+ return 0;
+
+ if (INTEL_GEN(i915) < 12 && port == PORT_A)
+ return 0;
+
+ if (INTEL_GEN(i915) < 11 && port == PORT_E)
+ return 0;
+
intel_dp->mst_mgr.cbs = &mst_cbs;
/* create encoders */
intel_dp_create_fake_mst_encoders(intel_dig_port);
- ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, dev,
+ ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm,
&intel_dp->aux, 16, 3, conn_base_id);
- if (ret) {
- intel_dp->can_mst = false;
+ if (ret)
return ret;
- }
+
+ intel_dp->can_mst = true;
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 8d731eb1de69..aa8d2aca0f02 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2951,7 +2951,7 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
const char *name,
- const uint16_t wm[8])
+ const uint16_t wm[])
{
int level, max_level = ilk_wm_max_level(dev_priv);
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 42daa5c9ff8e..a4dba034dca4 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -206,6 +206,11 @@ static void imx_ldb_encoder_enable(struct drm_encoder *encoder)
int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
+ if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) {
+ dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux);
+ return;
+ }
+
drm_panel_prepare(imx_ldb_ch->panel);
if (dual) {
@@ -264,6 +269,11 @@ imx_ldb_encoder_atomic_mode_set(struct drm_encoder *encoder,
int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
u32 bus_format = imx_ldb_ch->bus_format;
+ if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) {
+ dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux);
+ return;
+ }
+
if (mode->clock > 170000) {
dev_warn(ldb->dev,
"%s: mode exceeds 170 MHz pixel clock\n", __func__);
@@ -311,18 +321,19 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
{
struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
struct imx_ldb *ldb = imx_ldb_ch->ldb;
+ int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
int mux, ret;
drm_panel_disable(imx_ldb_ch->panel);
- if (imx_ldb_ch == &ldb->channel[0])
+ if (imx_ldb_ch == &ldb->channel[0] || dual)
ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK;
- else if (imx_ldb_ch == &ldb->channel[1])
+ if (imx_ldb_ch == &ldb->channel[1] || dual)
ldb->ldb_ctrl &= ~LDB_CH1_MODE_EN_MASK;
regmap_write(ldb->regmap, IOMUXC_GPR2, ldb->ldb_ctrl);
- if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) {
+ if (dual) {
clk_disable_unprepare(ldb->clk[0]);
clk_disable_unprepare(ldb->clk[1]);
}
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index cffd3310240e..c19c1dfbfcdc 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -498,6 +498,13 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
return 0;
}
+static void imx_tve_disable_regulator(void *data)
+{
+ struct imx_tve *tve = data;
+
+ regulator_disable(tve->dac_reg);
+}
+
static bool imx_tve_readable_reg(struct device *dev, unsigned int reg)
{
return (reg % 4 == 0) && (reg <= 0xdc);
@@ -622,6 +629,9 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
ret = regulator_enable(tve->dac_reg);
if (ret)
return ret;
+ ret = devm_add_action_or_reset(dev, imx_tve_disable_regulator, tve);
+ if (ret)
+ return ret;
}
tve->clk = devm_clk_get(dev, "tve");
@@ -668,18 +678,8 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
return 0;
}
-static void imx_tve_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct imx_tve *tve = dev_get_drvdata(dev);
-
- if (!IS_ERR(tve->dac_reg))
- regulator_disable(tve->dac_reg);
-}
-
static const struct component_ops imx_tve_ops = {
.bind = imx_tve_bind,
- .unbind = imx_tve_unbind,
};
static int imx_tve_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 947bc6d62302..d14321763607 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -600,8 +600,13 @@ err_pm:
pm_runtime_disable(dev);
err_node:
of_node_put(private->mutex_node);
- for (i = 0; i < DDP_COMPONENT_ID_MAX; i++)
+ for (i = 0; i < DDP_COMPONENT_ID_MAX; i++) {
of_node_put(private->comp_node[i]);
+ if (private->ddp_comp[i]) {
+ put_device(private->ddp_comp[i]->larb_dev);
+ private->ddp_comp[i] = NULL;
+ }
+ }
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index f7e6aa1b5b7d..83d4a710601d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -108,6 +108,16 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
true, true);
}
+static void mtk_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
+
+ state->pending.enable = false;
+ wmb(); /* Make sure the above parameter is set before update */
+ state->pending.dirty = true;
+}
+
static void mtk_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
@@ -122,6 +132,11 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
if (!crtc || WARN_ON(!fb))
return;
+ if (!plane->state->visible) {
+ mtk_plane_atomic_disable(plane, old_state);
+ return;
+ }
+
gem = fb->obj[0];
mtk_gem = to_mtk_gem_obj(gem);
addr = mtk_gem->dma_addr;
@@ -143,16 +158,6 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
state->pending.dirty = true;
}
-static void mtk_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
-
- state->pending.enable = false;
- wmb(); /* Make sure the above parameter is set before update */
- state->pending.dirty = true;
-}
-
static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
.atomic_check = mtk_plane_atomic_check,
.atomic_update = mtk_plane_atomic_update,
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 62444a3a5742..331fb0c12929 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1476,25 +1476,30 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
dev_err(dev,
"Failed to get system configuration registers: %d\n",
ret);
- return ret;
+ goto put_device;
}
hdmi->sys_regmap = regmap;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hdmi->regs = devm_ioremap_resource(dev, mem);
- if (IS_ERR(hdmi->regs))
- return PTR_ERR(hdmi->regs);
+ if (IS_ERR(hdmi->regs)) {
+ ret = PTR_ERR(hdmi->regs);
+ goto put_device;
+ }
remote = of_graph_get_remote_node(np, 1, 0);
- if (!remote)
- return -EINVAL;
+ if (!remote) {
+ ret = -EINVAL;
+ goto put_device;
+ }
if (!of_device_is_compatible(remote, "hdmi-connector")) {
hdmi->next_bridge = of_drm_find_bridge(remote);
if (!hdmi->next_bridge) {
dev_err(dev, "Waiting for external bridge\n");
of_node_put(remote);
- return -EPROBE_DEFER;
+ ret = -EPROBE_DEFER;
+ goto put_device;
}
}
@@ -1503,7 +1508,8 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
dev_err(dev, "Failed to find ddc-i2c-bus node in %pOF\n",
remote);
of_node_put(remote);
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_device;
}
of_node_put(remote);
@@ -1511,10 +1517,14 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
of_node_put(i2c_np);
if (!hdmi->ddc_adpt) {
dev_err(dev, "Failed to get ddc i2c adapter by node\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_device;
}
return 0;
+put_device:
+ put_device(hdmi->cec_dev);
+ return ret;
}
/*
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 588b3b0c8315..1887473cdd79 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -384,6 +384,17 @@ static int meson_probe_remote(struct platform_device *pdev,
return count;
}
+static void meson_drv_shutdown(struct platform_device *pdev)
+{
+ struct meson_drm *priv = dev_get_drvdata(&pdev->dev);
+
+ if (!priv)
+ return;
+
+ drm_kms_helper_poll_fini(priv->drm);
+ drm_atomic_helper_shutdown(priv->drm);
+}
+
static int meson_drv_probe(struct platform_device *pdev)
{
struct component_match *match = NULL;
@@ -428,6 +439,7 @@ MODULE_DEVICE_TABLE(of, dt_match);
static struct platform_driver meson_drm_platform_driver = {
.probe = meson_drv_probe,
+ .shutdown = meson_drv_shutdown,
.driver = {
.name = "meson-drm",
.of_match_table = dt_match,
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index ba6f3c14495c..ba513018534e 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -681,8 +681,6 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
- gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100);
-
/* Enable USE_RETENTION_FLOPS */
gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
@@ -1196,8 +1194,8 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
{
- *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO,
- REG_A5XX_RBBM_PERFCTR_CP_0_HI);
+ *value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO,
+ REG_A5XX_RBBM_ALWAYSON_COUNTER_HI);
return 0;
}
@@ -1474,18 +1472,31 @@ static const struct adreno_gpu_funcs funcs = {
static void check_speed_bin(struct device *dev)
{
struct nvmem_cell *cell;
- u32 bin, val;
+ u32 val;
+
+ /*
+ * If the OPP table specifies a opp-supported-hw property then we have
+ * to set something with dev_pm_opp_set_supported_hw() or the table
+ * doesn't get populated so pick an arbitrary value that should
+ * ensure the default frequencies are selected but not conflict with any
+ * actual bins
+ */
+ val = 0x80;
cell = nvmem_cell_get(dev, "speed_bin");
- /* If a nvmem cell isn't defined, nothing to do */
- if (IS_ERR(cell))
- return;
+ if (!IS_ERR(cell)) {
+ void *buf = nvmem_cell_read(cell, NULL);
+
+ if (!IS_ERR(buf)) {
+ u8 bin = *((u8 *) buf);
- bin = *((u32 *) nvmem_cell_read(cell, NULL));
- nvmem_cell_put(cell);
+ val = (1 << bin);
+ kfree(buf);
+ }
- val = (1 << bin);
+ nvmem_cell_put(cell);
+ }
dev_pm_opp_set_supported_hw(dev, &val, 1);
}
@@ -1518,7 +1529,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
check_speed_bin(&pdev->dev);
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
+ /* Restricting nr_rings to 1 to temporarily disable preemption */
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret) {
a5xx_destroy(&(a5xx_gpu->base.base));
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 9cde79a7335c..739ca9c2081a 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -117,12 +117,22 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
{
int ret;
u32 val;
+ u32 mask, reset_val;
+
+ val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8);
+ if (val <= 0x20010004) {
+ mask = 0xffffffff;
+ reset_val = 0xbabeface;
+ } else {
+ mask = 0x1ff;
+ reset_val = 0x100;
+ }
gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
- val == 0xbabeface, 100, 10000);
+ (val & mask) == reset_val, 100, 10000);
if (ret)
dev_err(gmu->dev, "GMU firmware initialization timed out\n");
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index c629f742a1d1..c280fdc44939 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -713,8 +713,8 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
/* Force the GPU power on so we can read this register */
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
- *value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
- REG_A6XX_RBBM_PERFCTR_CP_0_HI);
+ *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
+ REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
return 0;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 93d70f4a2154..c9f831604558 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -221,7 +221,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
ring->next = ring->start;
/* reset completed fence seqno: */
- ring->memptrs->fence = ring->seqno;
+ ring->memptrs->fence = ring->fctx->completed_fence;
ring->memptrs->rptr = 0;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 4752f08f0884..3c3b7f7013e8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -659,7 +659,7 @@ static void dpu_crtc_frame_event_cb(void *data, u32 event)
spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
if (!fevent) {
- DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event);
+ DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event);
return;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index ec3fd67378c1..19e2753ffe07 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -2412,7 +2412,7 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
if (!dpu_enc)
- return ERR_PTR(ENOMEM);
+ return ERR_PTR(-ENOMEM);
rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
drm_enc_mode, NULL);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
index d6f79dc755b4..14e2ce87bab1 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
@@ -78,9 +78,17 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
| MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN;
cfg |= MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(vclks_line);
+ /*
+ * Tearcheck emits a blanking signal every vclks_line * vtotal * 2 ticks on
+ * the vsync_clk equating to roughly half the desired panel refresh rate.
+ * This is only necessary as stability fallback if interrupts from the
+ * panel arrive too late or not at all, but is currently used by default
+ * because these panel interrupts are not wired up yet.
+ */
mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_CONFIG_VSYNC(pp_id), cfg);
mdp5_write(mdp5_kms,
- REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), 0xfff0);
+ REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), (2 * mode->vtotal));
+
mdp5_write(mdp5_kms,
REG_MDP5_PP_VSYNC_INIT_VAL(pp_id), mode->vdisplay);
mdp5_write(mdp5_kms, REG_MDP5_PP_RD_PTR_IRQ(pp_id), mode->vdisplay + 1);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index bddd625ab91b..25691b7cece1 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -1021,7 +1021,8 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
return 0;
fail:
- mdp5_destroy(pdev);
+ if (mdp5_kms)
+ mdp5_destroy(pdev);
return ret;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
index 1ca6c69516f5..4c037d855b27 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -147,7 +147,7 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
.disable = dsi_20nm_phy_disable,
.init = msm_dsi_phy_init_common,
},
- .io_start = { 0xfd998300, 0xfd9a0300 },
+ .io_start = { 0xfd998500, 0xfd9a0500 },
.num_dsi_phy = 2,
};
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
index 21a69b046625..d15511b521cb 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
@@ -554,6 +554,7 @@ static int dsi_pll_10nm_restore_state(struct msm_dsi_pll *pll)
struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
void __iomem *phy_base = pll_10nm->phy_cmn_mmio;
u32 val;
+ int ret;
val = pll_read(pll_10nm->mmio + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
val &= ~0x3;
@@ -568,6 +569,13 @@ static int dsi_pll_10nm_restore_state(struct msm_dsi_pll *pll)
val |= cached->pll_mux;
pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val);
+ ret = dsi_pll_10nm_vco_set_rate(&pll->clk_hw, pll_10nm->vco_current_rate, pll_10nm->vco_ref_clk_rate);
+ if (ret) {
+ DRM_DEV_ERROR(&pll_10nm->pdev->dev,
+ "restore vco rate failed. ret=%d\n", ret);
+ return ret;
+ }
+
DBG("DSI PLL%d", pll_10nm->id);
return 0;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 6f81de85fb86..08ff9d7645d7 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -483,20 +483,22 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
drm_mode_config_init(ddev);
- /* Bind all our sub-components: */
- ret = component_bind_all(dev, ddev);
+ ret = msm_init_vram(ddev);
if (ret)
goto err_destroy_mdss;
- ret = msm_init_vram(ddev);
+ /* Bind all our sub-components: */
+ ret = component_bind_all(dev, ddev);
if (ret)
- goto err_msm_uninit;
+ goto err_destroy_mdss;
if (!dev->dma_parms) {
dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
GFP_KERNEL);
- if (!dev->dma_parms)
- return -ENOMEM;
+ if (!dev->dma_parms) {
+ ret = -ENOMEM;
+ goto err_msm_uninit;
+ }
}
dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
@@ -1358,6 +1360,17 @@ static int msm_pdev_remove(struct platform_device *pdev)
return 0;
}
+static void msm_pdev_shutdown(struct platform_device *pdev)
+{
+ struct drm_device *drm = platform_get_drvdata(pdev);
+ struct msm_drm_private *priv = drm ? drm->dev_private : NULL;
+
+ if (!priv || !priv->kms)
+ return;
+
+ drm_atomic_helper_shutdown(drm);
+}
+
static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
{ .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
@@ -1369,6 +1382,7 @@ MODULE_DEVICE_TABLE(of, dt_match);
static struct platform_driver msm_platform_driver = {
.probe = msm_pdev_probe,
.remove = msm_pdev_remove,
+ .shutdown = msm_pdev_shutdown,
.driver = {
.name = "msm",
.of_match_table = dt_match,
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index 349c12f670eb..6c11be79574e 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -56,7 +56,7 @@ int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence,
int ret;
if (fence > fctx->last_fence) {
- DRM_ERROR("%s: waiting on invalid fence: %u (of %u)\n",
+ DRM_ERROR_RATELIMITED("%s: waiting on invalid fence: %u (of %u)\n",
fctx->name, fence, fctx->last_fence);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index e53b7cb2211d..7c0b30c955c3 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -61,7 +61,7 @@ static void sync_for_device(struct msm_gem_object *msm_obj)
{
struct device *dev = msm_obj->base.dev->dev;
- if (get_dma_ops(dev)) {
+ if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
} else {
@@ -74,7 +74,7 @@ static void sync_for_cpu(struct msm_gem_object *msm_obj)
{
struct device *dev = msm_obj->base.dev->dev;
- if (get_dma_ops(dev)) {
+ if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
} else {
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index 5115f75b5b7f..325da440264a 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -78,8 +78,10 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
queue->flags = flags;
if (priv->gpu) {
- if (prio >= priv->gpu->nr_rings)
+ if (prio >= priv->gpu->nr_rings) {
+ kfree(queue);
return -EINVAL;
+ }
queue->prio = prio;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 10107e551fac..fbe156302ee8 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -131,7 +131,7 @@ nv50_dmac_destroy(struct nv50_dmac *dmac)
int
nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
- const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf,
+ const s32 *oclass, u8 head, void *data, u32 size, s64 syncbuf,
struct nv50_dmac *dmac)
{
struct nouveau_cli *cli = (void *)device->object.client;
@@ -166,7 +166,7 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
if (ret)
return ret;
- if (!syncbuf)
+ if (syncbuf < 0)
return 0;
ret = nvif_object_init(&dmac->base.user, 0xf0000000, NV_DMA_IN_MEMORY,
@@ -909,8 +909,10 @@ nv50_mstc_detect(struct drm_connector *connector, bool force)
return connector_status_disconnected;
ret = pm_runtime_get_sync(connector->dev->dev);
- if (ret < 0 && ret != -EACCES)
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put_autosuspend(connector->dev->dev);
return connector_status_disconnected;
+ }
conn_status = drm_dp_mst_detect_port(connector, mstc->port->mgr,
mstc->port);
@@ -1920,8 +1922,10 @@ nv50_disp_atomic_commit(struct drm_device *dev,
int ret, i;
ret = pm_runtime_get_sync(dev->dev);
- if (ret < 0 && ret != -EACCES)
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put_autosuspend(dev->dev);
return ret;
+ }
ret = drm_atomic_helper_setup_commit(state, nonblock);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index 66c125a6b0b3..55205d23360c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -68,7 +68,7 @@ struct nv50_dmac {
int nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
const s32 *oclass, u8 head, void *data, u32 size,
- u64 syncbuf, struct nv50_dmac *dmac);
+ s64 syncbuf, struct nv50_dmac *dmac);
void nv50_dmac_destroy(struct nv50_dmac *);
u32 *evo_wait(struct nv50_dmac *, int nr);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
index f7dbd965e4e7..b49a212af4d8 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
@@ -68,7 +68,7 @@ wimmc37b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
int ret;
ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
- &oclass, 0, &args, sizeof(args), 0,
+ &oclass, 0, &args, sizeof(args), -1,
&wndw->wimm);
if (ret) {
NV_ERROR(drm, "wimm%04x allocation failed: %d\n", oclass, ret);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index fb0094fc5583..b71afde8f115 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -551,8 +551,10 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
pm_runtime_get_noresume(dev->dev);
} else {
ret = pm_runtime_get_sync(dev->dev);
- if (ret < 0 && ret != -EACCES)
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put_autosuspend(dev->dev);
return conn_status;
+ }
}
nv_encoder = nouveau_connector_ddc_detect(connector);
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 9635704a1d86..4561a786fab0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -161,8 +161,11 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf,
}
ret = pm_runtime_get_sync(drm->dev);
- if (ret < 0 && ret != -EACCES)
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put_autosuspend(drm->dev);
return ret;
+ }
+
ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args));
pm_runtime_put_autosuspend(drm->dev);
if (ret < 0)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 2b7a54cc3c9e..81999bed1e4a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -899,8 +899,10 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
/* need to bring up power immediately if opening device */
ret = pm_runtime_get_sync(dev->dev);
- if (ret < 0 && ret != -EACCES)
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put_autosuspend(dev->dev);
return ret;
+ }
get_task_comm(tmpname, current);
snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
@@ -980,8 +982,10 @@ nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
long ret;
ret = pm_runtime_get_sync(dev->dev);
- if (ret < 0 && ret != -EACCES)
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put_autosuspend(dev->dev);
return ret;
+ }
switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) {
case DRM_NOUVEAU_NVIF:
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 0f64c0a1d4b3..d4fe52ec4c96 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -189,8 +189,10 @@ nouveau_fbcon_open(struct fb_info *info, int user)
struct nouveau_fbdev *fbcon = info->par;
struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
int ret = pm_runtime_get_sync(drm->dev->dev);
- if (ret < 0 && ret != -EACCES)
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put(drm->dev->dev);
return ret;
+ }
return 0;
}
@@ -315,7 +317,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
struct nouveau_framebuffer *fb;
struct nouveau_channel *chan;
struct nouveau_bo *nvbo;
- struct drm_mode_fb_cmd2 mode_cmd;
+ struct drm_mode_fb_cmd2 mode_cmd = {};
int ret;
mode_cmd.width = sizes->surface_width;
@@ -599,6 +601,7 @@ fini:
drm_fb_helper_fini(&fbcon->helper);
free:
kfree(fbcon);
+ drm->fbcon = NULL;
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index b56524d343c3..a98fccb0d32f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -46,8 +46,10 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
int ret;
ret = pm_runtime_get_sync(dev);
- if (WARN_ON(ret < 0 && ret != -EACCES))
+ if (WARN_ON(ret < 0 && ret != -EACCES)) {
+ pm_runtime_put_autosuspend(dev);
return;
+ }
if (gem->import_attach)
drm_prime_gem_destroy(gem, nvbo->bo.sg);
@@ -80,8 +82,10 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
return ret;
ret = pm_runtime_get_sync(dev);
- if (ret < 0 && ret != -EACCES)
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put_autosuspend(dev);
goto out;
+ }
ret = nouveau_vma_new(nvbo, &cli->vmm, &vma);
pm_runtime_mark_last_busy(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index c002f8968507..9682f30ab6f6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -176,6 +176,8 @@ void
nouveau_mem_del(struct ttm_mem_reg *reg)
{
struct nouveau_mem *mem = nouveau_mem(reg);
+ if (!mem)
+ return;
nouveau_mem_fini(mem);
kfree(reg->mm_node);
reg->mm_node = NULL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 8ebdc74cc0ad..326948b65542 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -96,12 +96,9 @@ nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags)
else
nvbe->ttm.ttm.func = &nv50_sgdma_backend;
- if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags))
- /*
- * A failing ttm_dma_tt_init() will call ttm_tt_destroy()
- * and thus our nouveau_sgdma_destroy() hook, so we don't need
- * to free nvbe here.
- */
+ if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags)) {
+ kfree(nvbe);
return NULL;
+ }
return &nvbe->ttm.ttm;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
index 7deb81b6dbac..4b571cc6bc70 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
@@ -75,7 +75,7 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd)
nvkm_debug(subdev, "%08x: type %02x, %d bytes\n",
image.base, image.type, image.size);
- if (!shadow_fetch(bios, mthd, image.size)) {
+ if (!shadow_fetch(bios, mthd, image.base + image.size)) {
nvkm_debug(subdev, "%08x: fetch failed\n", image.base);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
index 9b91da09dc5f..8d9812a51ef6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
@@ -101,9 +101,13 @@ platform_init(struct nvkm_bios *bios, const char *name)
else
return ERR_PTR(-ENODEV);
+ if (!pdev->rom || pdev->romlen == 0)
+ return ERR_PTR(-ENODEV);
+
if ((priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
+ priv->size = pdev->romlen;
if (ret = -ENODEV,
- (priv->rom = pci_platform_rom(pdev, &priv->size)))
+ (priv->rom = ioremap(pdev->rom, pdev->romlen)))
return priv;
kfree(priv);
}
@@ -111,11 +115,20 @@ platform_init(struct nvkm_bios *bios, const char *name)
return ERR_PTR(ret);
}
+static void
+platform_fini(void *data)
+{
+ struct priv *priv = data;
+
+ iounmap(priv->rom);
+ kfree(priv);
+}
+
const struct nvbios_source
nvbios_platform = {
.name = "PLATFORM",
.init = platform_init,
- .fini = (void(*)(void *))kfree,
+ .fini = platform_fini,
.read = pcirom_read,
.rw = true,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
index c8ab1b5741a3..db7769cb33eb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
@@ -118,10 +118,10 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
if (retries)
udelay(400);
- /* transaction request, wait up to 1ms for it to complete */
+ /* transaction request, wait up to 2ms for it to complete */
nvkm_wr32(device, 0x00e4e4 + base, 0x00010000 | ctrl);
- timeout = 1000;
+ timeout = 2000;
do {
ctrl = nvkm_rd32(device, 0x00e4e4 + base);
udelay(1);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
index 7ef60895f43a..d0e80ad52684 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
@@ -33,7 +33,7 @@ static void
gm200_i2c_aux_fini(struct gm200_i2c_aux *aux)
{
struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
- nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00310000, 0x00000000);
+ nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00710000, 0x00000000);
}
static int
@@ -54,10 +54,10 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux)
AUX_ERR(&aux->base, "begin idle timeout %08x", ctrl);
return -EBUSY;
}
- } while (ctrl & 0x03010000);
+ } while (ctrl & 0x07010000);
/* set some magic, and wait up to 1ms for it to appear */
- nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00300000, ureq);
+ nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00700000, ureq);
timeout = 1000;
do {
ctrl = nvkm_rd32(device, 0x00d954 + (aux->ch * 0x50));
@@ -67,7 +67,7 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux)
gm200_i2c_aux_fini(aux);
return -EBUSY;
}
- } while ((ctrl & 0x03000000) != urep);
+ } while ((ctrl & 0x07000000) != urep);
return 0;
}
@@ -118,10 +118,10 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
if (retries)
udelay(400);
- /* transaction request, wait up to 1ms for it to complete */
+ /* transaction request, wait up to 2ms for it to complete */
nvkm_wr32(device, 0x00d954 + base, 0x00010000 | ctrl);
- timeout = 1000;
+ timeout = 2000;
do {
ctrl = nvkm_rd32(device, 0x00d954 + base);
udelay(1);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
index d80dbc8f09b2..55a4ea4393c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
#include "priv.h"
+#include <subdev/timer.h>
static void
gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
@@ -31,7 +32,6 @@ gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0400));
u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0400));
nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000);
}
static void
@@ -42,7 +42,6 @@ gf100_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0400));
u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0400));
nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000);
}
static void
@@ -53,7 +52,6 @@ gf100_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0400));
u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0400));
nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000);
}
void
@@ -90,6 +88,12 @@ gf100_ibus_intr(struct nvkm_subdev *ibus)
intr1 &= ~stat;
}
}
+
+ nvkm_mask(device, 0x121c4c, 0x0000003f, 0x00000002);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x121c4c) & 0x0000003f))
+ break;
+ );
}
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
index 9025ed1bd2a9..4caf3ef087e1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
#include "priv.h"
+#include <subdev/timer.h>
static void
gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
@@ -31,7 +32,6 @@ gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0800));
u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0800));
nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000);
}
static void
@@ -42,7 +42,6 @@ gk104_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0800));
u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0800));
nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000);
}
static void
@@ -53,7 +52,6 @@ gk104_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0800));
u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0800));
nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
- nvkm_mask(device, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000);
}
void
@@ -90,6 +88,12 @@ gk104_ibus_intr(struct nvkm_subdev *ibus)
intr1 &= ~stat;
}
}
+
+ nvkm_mask(device, 0x12004c, 0x0000003f, 0x00000002);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x12004c) & 0x0000003f))
+ break;
+ );
}
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index ee11ccaf0563..cb51e248cb41 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -316,9 +316,9 @@ nvkm_mmu_vram(struct nvkm_mmu *mmu)
{
struct nvkm_device *device = mmu->subdev.device;
struct nvkm_mm *mm = &device->fb->ram->vram;
- const u32 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL);
- const u32 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP);
- const u32 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED);
+ const u64 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL);
+ const u64 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP);
+ const u64 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED);
u8 type = NVKM_MEM_KIND * !!mmu->func->kind;
u8 heap = NVKM_MEM_VRAM;
int heapM, heapN, heapU;
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
index 3bfb95d230e0..d8fb686c1fda 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
@@ -193,7 +193,7 @@ static int __init omapdss_boot_init(void)
dss = of_find_matching_node(NULL, omapdss_of_match);
if (dss == NULL || !of_device_is_available(dss))
- return 0;
+ goto put_node;
omapdss_walk_device(dss, true);
@@ -218,6 +218,8 @@ static int __init omapdss_boot_init(void)
kfree(n);
}
+put_node:
+ of_node_put(dss);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index e884183c018a..cb5ce73f7269 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -763,6 +763,7 @@ static int omap_dmm_probe(struct platform_device *dev)
&omap_dmm->refill_pa, GFP_KERNEL);
if (!omap_dmm->refill_va) {
dev_err(&dev->dev, "could not allocate refill memory\n");
+ ret = -ENOMEM;
goto fail;
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 654fea2b4312..8814aa38c5e7 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -1503,7 +1503,7 @@ static const struct drm_display_mode lg_lb070wv8_mode = {
static const struct panel_desc lg_lb070wv8 = {
.modes = &lg_lb070wv8_mode,
.num_modes = 1,
- .bpc = 16,
+ .bpc = 8,
.size = {
.width = 151,
.height = 91,
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 208af9f37914..5177e37d81e6 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -472,9 +472,10 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
return ret;
ret = qxl_release_reserve_list(release, true);
- if (ret)
+ if (ret) {
+ qxl_release_free(qdev, release);
return ret;
-
+ }
cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_SURFACE_CMD_CREATE;
cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
@@ -500,8 +501,8 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
/* no need to add a release to the fence for this surface bo,
since it is only released when we ask to destroy the surface
and it would never signal otherwise */
- qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
qxl_release_fence_buffer_objects(release);
+ qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
surf->hw_surf_alloc = true;
spin_lock(&qdev->surf_id_idr_lock);
@@ -543,9 +544,8 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev,
cmd->surface_id = id;
qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
-
qxl_release_fence_buffer_objects(release);
+ qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 0570c6826bff..769e12b4c13c 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -532,8 +532,8 @@ static int qxl_primary_apply_cursor(struct drm_plane *plane)
cmd->u.set.visible = 1;
qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qxl_release_fence_buffer_objects(release);
+ qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
return ret;
@@ -694,8 +694,8 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
cmd->u.position.y = plane->state->crtc_y + fb->hot_y;
qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qxl_release_fence_buffer_objects(release);
+ qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
if (old_cursor_bo)
qxl_bo_unref(&old_cursor_bo);
@@ -740,8 +740,8 @@ static void qxl_cursor_atomic_disable(struct drm_plane *plane,
cmd->type = QXL_CURSOR_HIDE;
qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qxl_release_fence_buffer_objects(release);
+ qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
}
static int qxl_plane_prepare_fb(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index 4d8681e84e68..d009f2bc28e9 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -241,8 +241,8 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
qxl_bo_physical_address(qdev, dimage->bo, 0);
qxl_release_unmap(qdev, release, &drawable->release_info);
- qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
qxl_release_fence_buffer_objects(release);
+ qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
out_free_palette:
if (palette_bo)
@@ -348,9 +348,10 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
goto out_release_backoff;
rects = drawable_set_clipping(qdev, num_clips, clips_bo);
- if (!rects)
+ if (!rects) {
+ ret = -EINVAL;
goto out_release_backoff;
-
+ }
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
@@ -381,8 +382,8 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
}
qxl_bo_kunmap(clips_bo);
- qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
qxl_release_fence_buffer_objects(release);
+ qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
out_release_backoff:
if (ret)
@@ -432,8 +433,8 @@ void qxl_draw_copyarea(struct qxl_device *qdev,
drawable->u.copy_bits.src_pos.y = sy;
qxl_release_unmap(qdev, release, &drawable->release_info);
- qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
qxl_release_fence_buffer_objects(release);
+ qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
out_free_release:
if (ret)
@@ -476,8 +477,8 @@ void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
qxl_release_unmap(qdev, release, &drawable->release_info);
- qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
qxl_release_fence_buffer_objects(release);
+ qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
out_free_release:
if (ret)
diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c
index 7fbcc35e8ad3..c89c10055641 100644
--- a/drivers/gpu/drm/qxl/qxl_image.c
+++ b/drivers/gpu/drm/qxl/qxl_image.c
@@ -210,7 +210,8 @@ qxl_image_init_helper(struct qxl_device *qdev,
break;
default:
DRM_ERROR("unsupported image bit depth\n");
- return -EINVAL; /* TODO: cleanup */
+ qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
+ return -EINVAL;
}
image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN;
image->u.bitmap.x = width;
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 6cc9f3367fa0..5536b2f7b7b0 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -257,11 +257,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
apply_surf_reloc(qdev, &reloc_info[i]);
}
+ qxl_release_fence_buffer_objects(release);
ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
- if (ret)
- qxl_release_backoff_reserve_list(release);
- else
- qxl_release_fence_buffer_objects(release);
out_free_bos:
out_free_release:
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index 771250aed78d..2412d317d992 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -181,7 +181,7 @@ int qxl_device_init(struct qxl_device *qdev,
&(qdev->ram_header->cursor_ring_hdr),
sizeof(struct qxl_command),
QXL_CURSOR_RING_SIZE,
- qdev->io_base + QXL_IO_NOTIFY_CMD,
+ qdev->io_base + QXL_IO_NOTIFY_CURSOR,
false,
&qdev->cursor_event);
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index a97294ac96d5..90c1afe498be 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -4364,7 +4364,7 @@ static int ci_set_mc_special_registers(struct radeon_device *rdev,
table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
}
j++;
- if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
return -EINVAL;
if (!pi->mem_gddr5) {
@@ -5574,6 +5574,7 @@ static int ci_parse_power_table(struct radeon_device *rdev)
if (!rdev->pm.dpm.ps)
return -ENOMEM;
power_state_offset = (u8 *)state_array->states;
+ rdev->pm.dpm.num_ps = 0;
for (i = 0; i < state_array->ucNumEntries; i++) {
u8 *idx;
power_state = (union pplib_power_state *)power_state_offset;
@@ -5583,10 +5584,8 @@ static int ci_parse_power_table(struct radeon_device *rdev)
if (!rdev->pm.power_state[i].clock_info)
return -EINVAL;
ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
- if (ps == NULL) {
- kfree(rdev->pm.dpm.ps);
+ if (ps == NULL)
return -ENOMEM;
- }
rdev->pm.dpm.ps[i].ps_priv = ps;
ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
non_clock_info,
@@ -5608,8 +5607,8 @@ static int ci_parse_power_table(struct radeon_device *rdev)
k++;
}
power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+ rdev->pm.dpm.num_ps = i + 1;
}
- rdev->pm.dpm.num_ps = state_array->ucNumEntries;
/* fill in the vce power states */
for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 0fd8d6ba9828..f86ca163dcf3 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -2126,7 +2126,7 @@ static int ni_init_smc_spll_table(struct radeon_device *rdev)
if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
ret = -EINVAL;
- if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
+ if (fb_div & ~(SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT))
ret = -EINVAL;
if (clk_v & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT))
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 1a6f6edb3515..62e6e3b73e4e 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1558,6 +1558,7 @@ struct radeon_dpm {
void *priv;
u32 new_active_crtcs;
int new_active_crtc_count;
+ int high_pixelclock_count;
u32 current_active_crtcs;
int current_active_crtc_count;
bool single_display;
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index f422a8d6aec4..821b03d6142b 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2133,11 +2133,14 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
return state_index;
/* last mode is usually default, array is low to high */
for (i = 0; i < num_modes; i++) {
- rdev->pm.power_state[state_index].clock_info =
- kcalloc(1, sizeof(struct radeon_pm_clock_info),
- GFP_KERNEL);
+ /* avoid memory leaks from invalid modes or unknown frev. */
+ if (!rdev->pm.power_state[state_index].clock_info) {
+ rdev->pm.power_state[state_index].clock_info =
+ kzalloc(sizeof(struct radeon_pm_clock_info),
+ GFP_KERNEL);
+ }
if (!rdev->pm.power_state[state_index].clock_info)
- return state_index;
+ goto out;
rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
switch (frev) {
@@ -2256,17 +2259,24 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
break;
}
}
+out:
+ /* free any unused clock_info allocation. */
+ if (state_index && state_index < num_modes) {
+ kfree(rdev->pm.power_state[state_index].clock_info);
+ rdev->pm.power_state[state_index].clock_info = NULL;
+ }
+
/* last mode is usually default */
- if (rdev->pm.default_power_state_index == -1) {
+ if (state_index && rdev->pm.default_power_state_index == -1) {
rdev->pm.power_state[state_index - 1].type =
POWER_STATE_TYPE_DEFAULT;
rdev->pm.default_power_state_index = state_index - 1;
rdev->pm.power_state[state_index - 1].default_clock_mode =
&rdev->pm.power_state[state_index - 1].clock_info[0];
- rdev->pm.power_state[state_index].flags &=
+ rdev->pm.power_state[state_index - 1].flags &=
~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- rdev->pm.power_state[state_index].misc = 0;
- rdev->pm.power_state[state_index].misc2 = 0;
+ rdev->pm.power_state[state_index - 1].misc = 0;
+ rdev->pm.power_state[state_index - 1].misc2 = 0;
}
return state_index;
}
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 04c0ed41374f..dd0528cf9818 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -104,25 +104,33 @@ static bool radeon_read_bios(struct radeon_device *rdev)
static bool radeon_read_platform_bios(struct radeon_device *rdev)
{
- uint8_t __iomem *bios;
- size_t size;
+ phys_addr_t rom = rdev->pdev->rom;
+ size_t romlen = rdev->pdev->romlen;
+ void __iomem *bios;
rdev->bios = NULL;
- bios = pci_platform_rom(rdev->pdev, &size);
- if (!bios) {
+ if (!rom || romlen == 0)
return false;
- }
- if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
+ rdev->bios = kzalloc(romlen, GFP_KERNEL);
+ if (!rdev->bios)
return false;
- }
- rdev->bios = kmemdup(bios, size, GFP_KERNEL);
- if (rdev->bios == NULL) {
- return false;
- }
+
+ bios = ioremap(rom, romlen);
+ if (!bios)
+ goto free_bios;
+
+ memcpy_fromio(rdev->bios, bios, romlen);
+ iounmap(bios);
+
+ if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa)
+ goto free_bios;
return true;
+free_bios:
+ kfree(rdev->bios);
+ return false;
}
#ifdef CONFIG_ACPI
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index de656f555383..b9927101e845 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -882,8 +882,10 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
if (!drm_kms_helper_is_poll_worker()) {
r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
+ if (r < 0) {
+ pm_runtime_put_autosuspend(connector->dev->dev);
return connector_status_disconnected;
+ }
}
if (encoder) {
@@ -1028,8 +1030,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
if (!drm_kms_helper_is_poll_worker()) {
r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
+ if (r < 0) {
+ pm_runtime_put_autosuspend(connector->dev->dev);
return connector_status_disconnected;
+ }
}
encoder = radeon_best_single_encoder(connector);
@@ -1166,8 +1170,10 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
if (!drm_kms_helper_is_poll_worker()) {
r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
+ if (r < 0) {
+ pm_runtime_put_autosuspend(connector->dev->dev);
return connector_status_disconnected;
+ }
}
encoder = radeon_best_single_encoder(connector);
@@ -1250,8 +1256,10 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
if (!drm_kms_helper_is_poll_worker()) {
r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
+ if (r < 0) {
+ pm_runtime_put_autosuspend(connector->dev->dev);
return connector_status_disconnected;
+ }
}
if (radeon_connector->detected_hpd_without_ddc) {
@@ -1665,8 +1673,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (!drm_kms_helper_is_poll_worker()) {
r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
+ if (r < 0) {
+ pm_runtime_put_autosuspend(connector->dev->dev);
return connector_status_disconnected;
+ }
}
if (!force && radeon_check_hpd_status_unchanged(connector)) {
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 7d1e14f0140a..3f0f3a578ddf 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -625,8 +625,10 @@ radeon_crtc_set_config(struct drm_mode_set *set,
dev = set->crtc->dev;
ret = pm_runtime_get_sync(dev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(dev->dev);
return ret;
+ }
ret = drm_crtc_helper_set_config(set, ctx);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index c26f09b47ecb..0cd33289c2b6 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -168,12 +168,7 @@ int radeon_no_wb;
int radeon_modeset = -1;
int radeon_dynclks = -1;
int radeon_r4xx_atom = 0;
-#ifdef __powerpc__
-/* Default to PCI on PowerPC (fdo #95017) */
int radeon_agpmode = -1;
-#else
-int radeon_agpmode = 0;
-#endif
int radeon_vram_limit = 0;
int radeon_gart_size = -1; /* auto */
int radeon_benchmarking = 0;
@@ -523,8 +518,10 @@ long radeon_drm_ioctl(struct file *filp,
long ret;
dev = file_priv->minor->dev;
ret = pm_runtime_get_sync(dev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(dev->dev);
return ret;
+ }
ret = drm_ioctl(filp, cmd, arg);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 3ff835767ac5..3f75b4be7fa4 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -501,6 +501,7 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
*value = rdev->config.si.backend_enable_mask;
} else {
DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
+ return -EINVAL;
}
break;
case RADEON_INFO_MAX_SCLK:
@@ -627,8 +628,10 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
file_priv->driver_priv = NULL;
r = pm_runtime_get_sync(dev->dev);
- if (r < 0)
+ if (r < 0) {
+ pm_runtime_put_autosuspend(dev->dev);
return r;
+ }
/* new gpu have virtual address space support */
if (rdev->family >= CHIP_CAYMAN) {
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 4b6542538ff9..c213a5c9227e 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1715,6 +1715,7 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
struct drm_device *ddev = rdev->ddev;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
+ struct radeon_connector *radeon_connector;
if (!rdev->pm.dpm_enabled)
return;
@@ -1724,6 +1725,7 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
/* update active crtc counts */
rdev->pm.dpm.new_active_crtcs = 0;
rdev->pm.dpm.new_active_crtc_count = 0;
+ rdev->pm.dpm.high_pixelclock_count = 0;
if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
list_for_each_entry(crtc,
&ddev->mode_config.crtc_list, head) {
@@ -1731,6 +1733,12 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
if (crtc->enabled) {
rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
rdev->pm.dpm.new_active_crtc_count++;
+ if (!radeon_crtc->connector)
+ continue;
+
+ radeon_connector = to_radeon_connector(radeon_crtc->connector);
+ if (radeon_connector->pixelclock_for_modeset > 297000)
+ rdev->pm.dpm.high_pixelclock_count++;
}
}
}
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index db2d8b84e137..aac18d527c01 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -3000,6 +3000,9 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
(rdev->pdev->device == 0x6605)) {
max_sclk = 75000;
}
+
+ if (rdev->pm.dpm.high_pixelclock_count > 1)
+ disable_sclk_switching = true;
}
if (rps->vce_active) {
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index edde8d4b87a3..ddda84cdeb47 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -21,6 +21,7 @@ config DRM_RCAR_DW_HDMI
config DRM_RCAR_LVDS
tristate "R-Car DU LVDS Encoder Support"
depends on DRM && DRM_BRIDGE && OF
+ select DRM_KMS_HELPER
select DRM_PANEL
select OF_FLATTREE
select OF_OVERLAY
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi.h b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
index b685ee11623d..a13e14dd7746 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
@@ -152,7 +152,7 @@
#define SUN4I_HDMI_DDC_CMD_IMPLICIT_WRITE 3
#define SUN4I_HDMI_DDC_CLK_REG 0x528
-#define SUN4I_HDMI_DDC_CLK_M(m) (((m) & 0x7) << 3)
+#define SUN4I_HDMI_DDC_CLK_M(m) (((m) & 0xf) << 3)
#define SUN4I_HDMI_DDC_CLK_N(n) ((n) & 0x7)
#define SUN4I_HDMI_DDC_LINE_CTRL_REG 0x540
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
index e826da34e919..14b6762567fb 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
@@ -37,7 +37,7 @@ static unsigned long sun4i_ddc_calc_divider(unsigned long rate,
unsigned long best_rate = 0;
u8 best_m = 0, best_n = 0, _m, _n;
- for (_m = 0; _m < 8; _m++) {
+ for (_m = 0; _m < 16; _m++) {
for (_n = 0; _n < 8; _n++) {
unsigned long tmp_rate;
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 8ad36f574df8..8ba19a8ca40f 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -242,9 +242,8 @@ sun4i_hdmi_connector_detect(struct drm_connector *connector, bool force)
struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
unsigned long reg;
- if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_HPD_REG, reg,
- reg & SUN4I_HDMI_HPD_HIGH,
- 0, 500000)) {
+ reg = readl(hdmi->base + SUN4I_HDMI_HPD_REG);
+ if (!(reg & SUN4I_HDMI_HPD_HIGH)) {
cec_phys_addr_invalidate(hdmi->cec_adap);
return connector_status_disconnected;
}
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index 79eb11cd185d..9a5584efd5e7 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -761,7 +761,7 @@ static int sun6i_dsi_dcs_write_long(struct sun6i_dsi *dsi,
regmap_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(0),
sun6i_dsi_dcs_build_pkt_hdr(dsi, msg));
- bounce = kzalloc(msg->tx_len + sizeof(crc), GFP_KERNEL);
+ bounce = kzalloc(ALIGN(msg->tx_len + sizeof(crc), 4), GFP_KERNEL);
if (!bounce)
return -ENOMEM;
@@ -772,7 +772,7 @@ static int sun6i_dsi_dcs_write_long(struct sun6i_dsi *dsi,
memcpy((u8 *)bounce + msg->tx_len, &crc, sizeof(crc));
len += sizeof(crc);
- regmap_bulk_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(1), bounce, len);
+ regmap_bulk_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(1), bounce, DIV_ROUND_UP(len, 4));
regmap_write(dsi->regs, SUN6I_DSI_CMD_CTL_REG, len + 4 - 1);
kfree(bounce);
diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.h b/drivers/gpu/drm/sun4i/sun8i_csc.h
index 880e8fbb0855..242752b2d328 100644
--- a/drivers/gpu/drm/sun4i/sun8i_csc.h
+++ b/drivers/gpu/drm/sun4i/sun8i_csc.h
@@ -14,7 +14,7 @@ struct sun8i_mixer;
/* VI channel CSC units offsets */
#define CCSC00_OFFSET 0xAA050
-#define CCSC01_OFFSET 0xFA000
+#define CCSC01_OFFSET 0xFA050
#define CCSC10_OFFSET 0xA0000
#define CCSC11_OFFSET 0xF0000
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index 31875b636434..5073622cbb56 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -140,6 +140,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
phy_node = of_parse_phandle(dev->of_node, "phys", 0);
if (!phy_node) {
dev_err(dev, "Can't found PHY phandle\n");
+ ret = -EINVAL;
goto err_disable_clk_tmds;
}
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 71a798e5d559..649b57e5e4b7 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -364,7 +364,7 @@ static struct regmap_config sun8i_mixer_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .max_register = 0xbfffc, /* guessed */
+ .max_register = 0xffffc, /* guessed */
};
static int sun8i_mixer_of_get_id(struct device_node *node)
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 965088afcfad..03adb4cf325b 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1670,6 +1670,11 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
dev_err(dc->dev,
"failed to set clock rate to %lu Hz\n",
state->pclk);
+
+ err = clk_set_rate(dc->clk, state->pclk);
+ if (err < 0)
+ dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n",
+ dc->clk, state->pclk, err);
}
DRM_DEBUG_KMS("rate: %lu, div: %u\n", clk_get_rate(dc->clk),
@@ -1680,11 +1685,6 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
value = SHIFT_CLK_DIVIDER(state->div) | PIXEL_CLK_DIVIDER_PCD1;
tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
}
-
- err = clk_set_rate(dc->clk, state->pclk);
- if (err < 0)
- dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n",
- dc->clk, state->pclk, err);
}
static void tegra_dc_stop(struct tegra_dc *dc)
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index a2bd5876c633..00808a3d6783 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -242,7 +242,7 @@ static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
if (!fpriv)
return -ENOMEM;
- idr_init(&fpriv->contexts);
+ idr_init_base(&fpriv->contexts, 1);
mutex_init(&fpriv->lock);
filp->driver_priv = fpriv;
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index bb97cad1eb69..b08ce1125996 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -143,7 +143,9 @@ int tegra_display_hub_prepare(struct tegra_display_hub *hub)
for (i = 0; i < hub->soc->num_wgrps; i++) {
struct tegra_windowgroup *wgrp = &hub->wgrps[i];
- tegra_windowgroup_enable(wgrp);
+ /* Skip orphaned window group whose parent DC is disabled */
+ if (wgrp->parent)
+ tegra_windowgroup_enable(wgrp);
}
return 0;
@@ -160,7 +162,9 @@ void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
for (i = 0; i < hub->soc->num_wgrps; i++) {
struct tegra_windowgroup *wgrp = &hub->wgrps[i];
- tegra_windowgroup_disable(wgrp);
+ /* Skip orphaned window group whose parent DC is disabled */
+ if (wgrp->parent)
+ tegra_windowgroup_disable(wgrp);
}
}
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 89cb70da2bfe..83108e243050 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -2668,17 +2668,23 @@ static int tegra_sor_init(struct host1x_client *client)
if (err < 0) {
dev_err(sor->dev, "failed to deassert SOR reset: %d\n",
err);
+ clk_disable_unprepare(sor->clk);
return err;
}
}
err = clk_prepare_enable(sor->clk_safe);
- if (err < 0)
+ if (err < 0) {
+ clk_disable_unprepare(sor->clk);
return err;
+ }
err = clk_prepare_enable(sor->clk_dp);
- if (err < 0)
+ if (err < 0) {
+ clk_disable_unprepare(sor->clk_safe);
+ clk_disable_unprepare(sor->clk);
return err;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index a1acab39d87f..096a33f12c61 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -150,12 +150,16 @@ static int panel_connector_get_modes(struct drm_connector *connector)
int i;
for (i = 0; i < timings->num_timings; i++) {
- struct drm_display_mode *mode = drm_mode_create(dev);
+ struct drm_display_mode *mode;
struct videomode vm;
if (videomode_from_timings(timings, &vm, i))
break;
+ mode = drm_mode_create(dev);
+ if (!mode)
+ break;
+
drm_display_mode_from_videomode(&vm, mode);
mode->type = DRM_MODE_TYPE_DRIVER;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 268f5a3b3122..81e076662c7a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -671,7 +671,7 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
/* Don't evict this BO if it's outside of the
* requested placement range
*/
- if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
+ if (place->fpfn >= (bo->mem.start + bo->mem.num_pages) ||
(place->lpfn && place->lpfn <= bo->mem.start))
return false;
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index e3a0691582ff..68cfa25674e5 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -241,7 +241,6 @@ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
ttm_tt_init_fields(ttm, bo, page_flags);
if (ttm_tt_alloc_page_directory(ttm)) {
- ttm_tt_destroy(ttm);
pr_err("Failed allocating page table\n");
return -ENOMEM;
}
@@ -265,7 +264,6 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
INIT_LIST_HEAD(&ttm_dma->pages_list);
if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
- ttm_tt_destroy(ttm);
pr_err("Failed allocating page table\n");
return -ENOMEM;
}
@@ -287,7 +285,6 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
else
ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
if (ret) {
- ttm_tt_destroy(ttm);
pr_err("Failed allocating page table\n");
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c
index e8723a2412a6..c0b113ba329c 100644
--- a/drivers/gpu/drm/tve200/tve200_display.c
+++ b/drivers/gpu/drm/tve200/tve200_display.c
@@ -17,6 +17,7 @@
#include <linux/version.h>
#include <linux/dma-buf.h>
#include <linux/of_graph.h>
+#include <linux/delay.h>
#include <drm/drmP.h>
#include <drm/drm_panel.h>
@@ -132,9 +133,25 @@ static void tve200_display_enable(struct drm_simple_display_pipe *pipe,
struct drm_connector *connector = priv->connector;
u32 format = fb->format->format;
u32 ctrl1 = 0;
+ int retries;
clk_prepare_enable(priv->clk);
+ /* Reset the TVE200 and wait for it to come back online */
+ writel(TVE200_CTRL_4_RESET, priv->regs + TVE200_CTRL_4);
+ for (retries = 0; retries < 5; retries++) {
+ usleep_range(30000, 50000);
+ if (readl(priv->regs + TVE200_CTRL_4) & TVE200_CTRL_4_RESET)
+ continue;
+ else
+ break;
+ }
+ if (retries == 5 &&
+ readl(priv->regs + TVE200_CTRL_4) & TVE200_CTRL_4_RESET) {
+ dev_err(drm->dev, "can't get hardware out of reset\n");
+ return;
+ }
+
/* Function 1 */
ctrl1 |= TVE200_CTRL_CSMODE;
/* Interlace mode for CCIR656: parameterize? */
@@ -231,8 +248,9 @@ static void tve200_display_disable(struct drm_simple_display_pipe *pipe)
drm_crtc_vblank_off(crtc);
- /* Disable and Power Down */
+ /* Disable put into reset and Power Down */
writel(0, priv->regs + TVE200_CTRL);
+ writel(TVE200_CTRL_4_RESET, priv->regs + TVE200_CTRL_4);
clk_disable_unprepare(priv->clk);
}
@@ -280,6 +298,8 @@ static int tve200_display_enable_vblank(struct drm_simple_display_pipe *pipe)
struct drm_device *drm = crtc->dev;
struct tve200_drm_dev_private *priv = drm->dev_private;
+ /* Clear any IRQs and enable */
+ writel(0xFF, priv->regs + TVE200_INT_CLR);
writel(TVE200_INT_V_STATUS, priv->regs + TVE200_INT_EN);
return 0;
}
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index ac344ddb23bc..f93384c23206 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -223,8 +223,8 @@ static int tve200_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (!irq) {
- ret = -EINVAL;
+ if (irq < 0) {
+ ret = irq;
goto clk_disable;
}
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 04270a14fcaa..868dd1ef3b69 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -312,6 +312,7 @@ unbind_all:
component_unbind_all(dev, drm);
gem_destroy:
vc4_gem_destroy(drm);
+ drm_mode_config_cleanup(drm);
vc4_bo_cache_destroy(drm);
dev_put:
drm_dev_put(drm);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index fd5522fd179e..116166266457 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -698,11 +698,23 @@ static enum drm_mode_status
vc4_hdmi_encoder_mode_valid(struct drm_encoder *crtc,
const struct drm_display_mode *mode)
{
- /* HSM clock must be 108% of the pixel clock. Additionally,
- * the AXI clock needs to be at least 25% of pixel clock, but
- * HSM ends up being the limiting factor.
+ /*
+ * As stated in RPi's vc4 firmware "HDMI state machine (HSM) clock must
+ * be faster than pixel clock, infinitesimally faster, tested in
+ * simulation. Otherwise, exact value is unimportant for HDMI
+ * operation." This conflicts with bcm2835's vc4 documentation, which
+ * states HSM's clock has to be at least 108% of the pixel clock.
+ *
+ * Real life tests reveal that vc4's firmware statement holds up, and
+ * users are able to use pixel clocks closer to HSM's, namely for
+ * 1920x1200@60Hz. So it was decided to have leave a 1% margin between
+ * both clocks. Which, for RPi0-3 implies a maximum pixel clock of
+ * 162MHz.
+ *
+ * Additionally, the AXI clock needs to be at least 25% of
+ * pixel clock, but HSM ends up being the limiting factor.
*/
- if (mode->clock > HSM_CLOCK_FREQ / (1000 * 108 / 100))
+ if (mode->clock > HSM_CLOCK_FREQ / (1000 * 101 / 100))
return MODE_CLOCK_HIGH;
return MODE_OK;
@@ -1122,6 +1134,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi)
card->num_links = 1;
card->name = "vc4-hdmi";
card->dev = dev;
+ card->owner = THIS_MODULE;
/*
* Be careful, snd_soc_register_card() calls dev_set_drvdata() and
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 4709f08f39e4..1c1a435d354b 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -219,32 +219,6 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
return 0;
}
-static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
- uint32_t handle, uint64_t *offset)
-{
- struct drm_gem_object *obj;
- int ret;
-
- obj = drm_gem_object_lookup(file, handle);
- if (!obj)
- return -ENOENT;
-
- if (!obj->filp) {
- ret = -EINVAL;
- goto unref;
- }
-
- ret = drm_gem_create_mmap_offset(obj);
- if (ret)
- goto unref;
-
- *offset = drm_vma_node_offset_addr(&obj->vma_node);
-unref:
- drm_gem_object_put_unlocked(obj);
-
- return ret;
-}
-
static struct drm_ioctl_desc vgem_ioctls[] = {
DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
@@ -438,7 +412,6 @@ static struct drm_driver vgem_driver = {
.fops = &vgem_driver_fops,
.dumb_create = vgem_gem_dumb_create,
- .dumb_map_offset = vgem_gem_dumb_map,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 65060c08522d..22397a23780c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -113,8 +113,10 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
vgdev->capsets[i].id > 0, 5 * HZ);
if (ret == 0) {
DRM_ERROR("timed out waiting for cap set %d\n", i);
+ spin_lock(&vgdev->display_info_lock);
kfree(vgdev->capsets);
vgdev->capsets = NULL;
+ spin_unlock(&vgdev->display_info_lock);
return;
}
DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n",
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 608906f06ced..6e45d6b0dad9 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -566,9 +566,13 @@ static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
int i = le32_to_cpu(cmd->capset_index);
spin_lock(&vgdev->display_info_lock);
- vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
- vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
- vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
+ if (vgdev->capsets) {
+ vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
+ vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
+ vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
+ } else {
+ DRM_ERROR("invalid capset memory.");
+ }
spin_unlock(&vgdev->display_info_lock);
wake_up(&vgdev->resp_wq);
}
@@ -864,9 +868,9 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
}
/* gets freed when the ring has consumed it */
- ents = kmalloc_array(obj->pages->nents,
- sizeof(struct virtio_gpu_mem_entry),
- GFP_KERNEL);
+ ents = kvmalloc_array(obj->pages->nents,
+ sizeof(struct virtio_gpu_mem_entry),
+ GFP_KERNEL);
if (!ents) {
DRM_ERROR("failed to allocate ent list\n");
return -ENOMEM;
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index e018752d57bb..b46c31e25c27 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -62,11 +62,6 @@ int vkms_output_init(struct vkms_device *vkmsdev);
struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev);
/* Gem stuff */
-struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
- struct drm_file *file,
- u32 *handle,
- u64 size);
-
int vkms_gem_fault(struct vm_fault *vmf);
int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
index ce394009a36c..f731683c932e 100644
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -93,10 +93,10 @@ int vkms_gem_fault(struct vm_fault *vmf)
return ret;
}
-struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
- struct drm_file *file,
- u32 *handle,
- u64 size)
+static struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
+ struct drm_file *file,
+ u32 *handle,
+ u64 size)
{
struct vkms_gem_object *obj;
int ret;
@@ -109,7 +109,6 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
return ERR_CAST(obj);
ret = drm_gem_handle_create(file, &obj->gem, handle);
- drm_gem_object_put_unlocked(&obj->gem);
if (ret)
return ERR_PTR(ret);
@@ -138,6 +137,8 @@ int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
args->size = gem_obj->size;
args->pitch = pitch;
+ drm_gem_object_put_unlocked(gem_obj);
+
DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 6a712a8d59e9..e486b6517ac5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -2861,7 +2861,7 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
++i;
}
- if (i != unit) {
+ if (&con->head == &dev_priv->dev->mode_config.connector_list) {
DRM_ERROR("Could not find initial display unit.\n");
ret = -EINVAL;
goto out_unlock;
@@ -2885,13 +2885,13 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
break;
}
- if (mode->type & DRM_MODE_TYPE_PREFERRED)
- *p_mode = mode;
- else {
+ if (&mode->head == &con->modes) {
WARN_ONCE(true, "Could not find initial preferred mode.\n");
*p_mode = list_first_entry(&con->modes,
struct drm_display_mode,
head);
+ } else {
+ *p_mode = mode;
}
out_unlock:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 723578117191..0743a7311700 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -79,7 +79,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
struct vmw_legacy_display_unit *entry;
struct drm_framebuffer *fb = NULL;
struct drm_crtc *crtc = NULL;
- int i = 0;
+ int i;
/* If there is no display topology the host just assumes
* that the guest will set the same layout as the host.
@@ -90,12 +90,11 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
crtc = &entry->base.crtc;
w = max(w, crtc->x + crtc->mode.hdisplay);
h = max(h, crtc->y + crtc->mode.vdisplay);
- i++;
}
if (crtc == NULL)
return 0;
- fb = entry->base.crtc.primary->state->fb;
+ fb = crtc->primary->state->fb;
return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0],
fb->format->cpp[0] * 8,
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 6b6d5ab82ec3..1f6c91496d93 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -410,7 +410,7 @@ static int xen_drm_drv_dumb_create(struct drm_file *filp,
args->size = args->pitch * args->height;
obj = xen_drm_front_gem_create(dev, args->size);
- if (IS_ERR_OR_NULL(obj)) {
+ if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto fail;
}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index 802662839e7e..cba7852123d6 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -85,7 +85,7 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
size = round_up(size, PAGE_SIZE);
xen_obj = gem_create_obj(dev, size);
- if (IS_ERR_OR_NULL(xen_obj))
+ if (IS_ERR(xen_obj))
return xen_obj;
if (drm_info->front_info->cfg.be_alloc) {
@@ -119,7 +119,7 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
*/
xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
- if (IS_ERR_OR_NULL(xen_obj->pages)) {
+ if (IS_ERR(xen_obj->pages)) {
ret = PTR_ERR(xen_obj->pages);
xen_obj->pages = NULL;
goto fail;
@@ -138,7 +138,7 @@ struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
struct xen_gem_object *xen_obj;
xen_obj = gem_create(dev, size);
- if (IS_ERR_OR_NULL(xen_obj))
+ if (IS_ERR(xen_obj))
return ERR_CAST(xen_obj);
return &xen_obj->base;
@@ -196,7 +196,7 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev,
size = attach->dmabuf->size;
xen_obj = gem_create_obj(dev, size);
- if (IS_ERR_OR_NULL(xen_obj))
+ if (IS_ERR(xen_obj))
return ERR_CAST(xen_obj);
ret = gem_alloc_pages_array(xen_obj, size);
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
index a3479eb72d79..d9700c69e5b7 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_kms.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
@@ -59,7 +59,7 @@ fb_create(struct drm_device *dev, struct drm_file *filp,
int ret;
fb = drm_gem_fb_create_with_funcs(dev, filp, mode_cmd, &fb_funcs);
- if (IS_ERR_OR_NULL(fb))
+ if (IS_ERR(fb))
return fb;
gem_obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 0121fe7a4548..02f896b50ed0 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -632,8 +632,17 @@ EXPORT_SYMBOL(host1x_driver_register_full);
*/
void host1x_driver_unregister(struct host1x_driver *driver)
{
+ struct host1x *host1x;
+
driver_unregister(&driver->driver);
+ mutex_lock(&devices_lock);
+
+ list_for_each_entry(host1x, &devices, list)
+ host1x_detach_driver(host1x, driver);
+
+ mutex_unlock(&devices_lock);
+
mutex_lock(&drivers_lock);
list_del_init(&driver->list);
mutex_unlock(&drivers_lock);
diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c
index 329e4a3d8ae7..6c9ad4533999 100644
--- a/drivers/gpu/host1x/debug.c
+++ b/drivers/gpu/host1x/debug.c
@@ -25,6 +25,8 @@
#include "debug.h"
#include "channel.h"
+static DEFINE_MUTEX(debug_lock);
+
unsigned int host1x_debug_trace_cmdbuf;
static pid_t host1x_debug_force_timeout_pid;
@@ -61,12 +63,14 @@ static int show_channel(struct host1x_channel *ch, void *data, bool show_fifo)
struct output *o = data;
mutex_lock(&ch->cdma.lock);
+ mutex_lock(&debug_lock);
if (show_fifo)
host1x_hw_show_channel_fifo(m, ch, o);
host1x_hw_show_channel_cdma(m, ch, o);
+ mutex_unlock(&debug_lock);
mutex_unlock(&ch->cdma.lock);
return 0;
diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c
index 91653adc41cc..cdaf1d74e31a 100644
--- a/drivers/gpu/ipu-v3/ipu-image-convert.c
+++ b/drivers/gpu/ipu-v3/ipu-image-convert.c
@@ -998,9 +998,10 @@ done:
return IRQ_WAKE_THREAD;
}
-static irqreturn_t norotate_irq(int irq, void *data)
+static irqreturn_t eof_irq(int irq, void *data)
{
struct ipu_image_convert_chan *chan = data;
+ struct ipu_image_convert_priv *priv = chan->priv;
struct ipu_image_convert_ctx *ctx;
struct ipu_image_convert_run *run;
unsigned long flags;
@@ -1017,45 +1018,26 @@ static irqreturn_t norotate_irq(int irq, void *data)
ctx = run->ctx;
- if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
- /* this is a rotation operation, just ignore */
- spin_unlock_irqrestore(&chan->irqlock, flags);
- return IRQ_HANDLED;
- }
-
- ret = do_irq(run);
-out:
- spin_unlock_irqrestore(&chan->irqlock, flags);
- return ret;
-}
-
-static irqreturn_t rotate_irq(int irq, void *data)
-{
- struct ipu_image_convert_chan *chan = data;
- struct ipu_image_convert_priv *priv = chan->priv;
- struct ipu_image_convert_ctx *ctx;
- struct ipu_image_convert_run *run;
- unsigned long flags;
- irqreturn_t ret;
-
- spin_lock_irqsave(&chan->irqlock, flags);
-
- /* get current run and its context */
- run = chan->current_run;
- if (!run) {
+ if (irq == chan->out_eof_irq) {
+ if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+ /* this is a rotation op, just ignore */
+ ret = IRQ_HANDLED;
+ goto out;
+ }
+ } else if (irq == chan->rot_out_eof_irq) {
+ if (!ipu_rot_mode_is_irt(ctx->rot_mode)) {
+ /* this was NOT a rotation op, shouldn't happen */
+ dev_err(priv->ipu->dev,
+ "Unexpected rotation interrupt\n");
+ ret = IRQ_HANDLED;
+ goto out;
+ }
+ } else {
+ dev_err(priv->ipu->dev, "Received unknown irq %d\n", irq);
ret = IRQ_NONE;
goto out;
}
- ctx = run->ctx;
-
- if (!ipu_rot_mode_is_irt(ctx->rot_mode)) {
- /* this was NOT a rotation operation, shouldn't happen */
- dev_err(priv->ipu->dev, "Unexpected rotation interrupt\n");
- spin_unlock_irqrestore(&chan->irqlock, flags);
- return IRQ_HANDLED;
- }
-
ret = do_irq(run);
out:
spin_unlock_irqrestore(&chan->irqlock, flags);
@@ -1148,7 +1130,7 @@ static int get_ipu_resources(struct ipu_image_convert_chan *chan)
chan->out_chan,
IPU_IRQ_EOF);
- ret = request_threaded_irq(chan->out_eof_irq, norotate_irq, do_bh,
+ ret = request_threaded_irq(chan->out_eof_irq, eof_irq, do_bh,
0, "ipu-ic", chan);
if (ret < 0) {
dev_err(priv->ipu->dev, "could not acquire irq %d\n",
@@ -1161,7 +1143,7 @@ static int get_ipu_resources(struct ipu_image_convert_chan *chan)
chan->rotation_out_chan,
IPU_IRQ_EOF);
- ret = request_threaded_irq(chan->rot_out_eof_irq, rotate_irq, do_bh,
+ ret = request_threaded_irq(chan->rot_out_eof_irq, eof_irq, do_bh,
0, "ipu-ic", chan);
if (ret < 0) {
dev_err(priv->ipu->dev, "could not acquire irq %d\n",
diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
index 895f49b565ee..3eddd8f73b57 100644
--- a/drivers/hid/hid-alps.c
+++ b/drivers/hid/hid-alps.c
@@ -29,6 +29,7 @@
#define U1_MOUSE_REPORT_ID 0x01 /* Mouse data ReportID */
#define U1_ABSOLUTE_REPORT_ID 0x03 /* Absolute data ReportID */
+#define U1_ABSOLUTE_REPORT_ID_SECD 0x02 /* FW-PTP Absolute data ReportID */
#define U1_FEATURE_REPORT_ID 0x05 /* Feature ReportID */
#define U1_SP_ABSOLUTE_REPORT_ID 0x06 /* Feature ReportID */
@@ -372,6 +373,7 @@ static int u1_raw_event(struct alps_dev *hdata, u8 *data, int size)
case U1_FEATURE_REPORT_ID:
break;
case U1_ABSOLUTE_REPORT_ID:
+ case U1_ABSOLUTE_REPORT_ID_SECD:
for (i = 0; i < hdata->max_fingers; i++) {
u8 *contact = &data[i * 5];
@@ -764,6 +766,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
if (input_register_device(data->input2)) {
input_free_device(input2);
+ ret = -ENOENT;
goto exit;
}
}
@@ -806,6 +809,7 @@ static int alps_probe(struct hid_device *hdev, const struct hid_device_id *id)
break;
case HID_DEVICE_ID_ALPS_U1_DUAL:
case HID_DEVICE_ID_ALPS_U1:
+ case HID_DEVICE_ID_ALPS_U1_UNICORN_LEGACY:
data->dev_type = U1;
break;
default:
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 8ab8f2350bbc..b58ab769aa7b 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -57,6 +57,7 @@ MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\")
struct apple_sc {
unsigned long quirks;
unsigned int fn_on;
+ unsigned int fn_found;
DECLARE_BITMAP(pressed_numlock, KEY_CNT);
};
@@ -342,12 +343,15 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
+ struct apple_sc *asc = hid_get_drvdata(hdev);
+
if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
usage->hid == (HID_UP_MSVENDOR | 0x0003) ||
usage->hid == (HID_UP_HPVENDOR2 | 0x0003)) {
/* The fn key on Apple USB keyboards */
set_bit(EV_REP, hi->input->evbit);
hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
+ asc->fn_found = true;
apple_setup_input(hi->input);
return 1;
}
@@ -374,6 +378,19 @@ static int apple_input_mapped(struct hid_device *hdev, struct hid_input *hi,
return 0;
}
+static int apple_input_configured(struct hid_device *hdev,
+ struct hid_input *hidinput)
+{
+ struct apple_sc *asc = hid_get_drvdata(hdev);
+
+ if ((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) {
+ hid_info(hdev, "Fn key not found (Apple Wireless Keyboard clone?), disabling Fn key handling\n");
+ asc->quirks = 0;
+ }
+
+ return 0;
+}
+
static int apple_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
@@ -588,6 +605,7 @@ static struct hid_driver apple_driver = {
.event = apple_event,
.input_mapping = apple_input_mapping,
.input_mapped = apple_input_mapped,
+ .input_configured = apple_input_configured,
};
module_hid_driver(apple_driver);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 2c85d075daee..9b66eb1d42c2 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -93,7 +93,7 @@ EXPORT_SYMBOL_GPL(hid_register_report);
* Register a new field for this report.
*/
-static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages, unsigned values)
+static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages)
{
struct hid_field *field;
@@ -104,7 +104,7 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned
field = kzalloc((sizeof(struct hid_field) +
usages * sizeof(struct hid_usage) +
- values * sizeof(unsigned)), GFP_KERNEL);
+ usages * sizeof(unsigned)), GFP_KERNEL);
if (!field)
return NULL;
@@ -300,7 +300,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
usages = max_t(unsigned, parser->local.usage_index,
parser->global.report_count);
- field = hid_register_field(report, usages, parser->global.report_count);
+ field = hid_register_field(report, usages);
if (!field)
return 0;
@@ -1128,6 +1128,9 @@ EXPORT_SYMBOL_GPL(hid_open_report);
static s32 snto32(__u32 value, unsigned n)
{
+ if (!value || !n)
+ return 0;
+
switch (n) {
case 8: return ((__s8)value);
case 16: return ((__s16)value);
@@ -1426,6 +1429,17 @@ static void hid_output_field(const struct hid_device *hid,
}
/*
+ * Compute the size of a report.
+ */
+static size_t hid_compute_report_size(struct hid_report *report)
+{
+ if (report->size)
+ return ((report->size - 1) >> 3) + 1;
+
+ return 0;
+}
+
+/*
* Create a report. 'data' has to be allocated using
* hid_alloc_report_buf() so that it has proper size.
*/
@@ -1437,7 +1451,7 @@ void hid_output_report(struct hid_report *report, __u8 *data)
if (report->id > 0)
*data++ = report->id;
- memset(data, 0, ((report->size - 1) >> 3) + 1);
+ memset(data, 0, hid_compute_report_size(report));
for (n = 0; n < report->maxfield; n++)
hid_output_field(report->device, report->field[n], data);
}
@@ -1564,7 +1578,7 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
csize--;
}
- rsize = ((report->size - 1) >> 3) + 1;
+ rsize = hid_compute_report_size(report);
if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
rsize = HID_MAX_BUFFER_SIZE - 1;
diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c
index 1689568b597d..12c5d7c96527 100644
--- a/drivers/hid/hid-cypress.c
+++ b/drivers/hid/hid-cypress.c
@@ -26,19 +26,17 @@
#define CP_2WHEEL_MOUSE_HACK 0x02
#define CP_2WHEEL_MOUSE_HACK_ON 0x04
+#define VA_INVAL_LOGICAL_BOUNDARY 0x08
+
/*
* Some USB barcode readers from cypress have usage min and usage max in
* the wrong order
*/
-static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static __u8 *cp_rdesc_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
- unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
unsigned int i;
- if (!(quirks & CP_RDESC_SWAPPED_MIN_MAX))
- return rdesc;
-
if (*rsize < 4)
return rdesc;
@@ -51,6 +49,40 @@ static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
return rdesc;
}
+static __u8 *va_logical_boundary_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ /*
+ * Varmilo VA104M (with VID Cypress and device ID 07B1) incorrectly
+ * reports Logical Minimum of its Consumer Control device as 572
+ * (0x02 0x3c). Fix this by setting its Logical Minimum to zero.
+ */
+ if (*rsize == 25 &&
+ rdesc[0] == 0x05 && rdesc[1] == 0x0c &&
+ rdesc[2] == 0x09 && rdesc[3] == 0x01 &&
+ rdesc[6] == 0x19 && rdesc[7] == 0x00 &&
+ rdesc[11] == 0x16 && rdesc[12] == 0x3c && rdesc[13] == 0x02) {
+ hid_info(hdev,
+ "fixing up varmilo VA104M consumer control report descriptor\n");
+ rdesc[12] = 0x00;
+ rdesc[13] = 0x00;
+ }
+ return rdesc;
+}
+
+static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+
+ if (quirks & CP_RDESC_SWAPPED_MIN_MAX)
+ rdesc = cp_rdesc_fixup(hdev, rdesc, rsize);
+ if (quirks & VA_INVAL_LOGICAL_BOUNDARY)
+ rdesc = va_logical_boundary_fixup(hdev, rdesc, rsize);
+
+ return rdesc;
+}
+
static int cp_input_mapped(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
@@ -131,6 +163,8 @@ static const struct hid_device_id cp_devices[] = {
.driver_data = CP_RDESC_SWAPPED_MIN_MAX },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE),
.driver_data = CP_2WHEEL_MOUSE_HACK },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_VARMILO_VA104M_07B1),
+ .driver_data = VA_INVAL_LOGICAL_BOUNDARY },
{ }
};
MODULE_DEVICE_TABLE(hid, cp_devices);
diff --git a/drivers/hid/hid-elan.c b/drivers/hid/hid-elan.c
index 07e26c3567eb..6346282e0ff0 100644
--- a/drivers/hid/hid-elan.c
+++ b/drivers/hid/hid-elan.c
@@ -192,6 +192,7 @@ static int elan_input_configured(struct hid_device *hdev, struct hid_input *hi)
ret = input_mt_init_slots(input, ELAN_MAX_FINGERS, INPUT_MT_POINTER);
if (ret) {
hid_err(hdev, "Failed to init elan MT slots: %d\n", ret);
+ input_free_device(input);
return ret;
}
@@ -202,6 +203,7 @@ static int elan_input_configured(struct hid_device *hdev, struct hid_input *hi)
if (ret) {
hid_err(hdev, "Failed to register elan input device: %d\n",
ret);
+ input_mt_destroy_slots(input);
input_free_device(input);
return ret;
}
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index fab8fd7082e0..3e58d4c3cf2c 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -119,6 +119,8 @@ static int hammer_input_configured(struct hid_device *hdev,
static const struct hid_device_id hammer_devices[] = {
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index b2fff44c8109..75342f3dfb86 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -81,10 +81,10 @@
#define HID_DEVICE_ID_ALPS_U1_DUAL_PTP 0x121F
#define HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP 0x1220
#define HID_DEVICE_ID_ALPS_U1 0x1215
+#define HID_DEVICE_ID_ALPS_U1_UNICORN_LEGACY 0x121E
#define HID_DEVICE_ID_ALPS_T4_BTNLESS 0x120C
#define HID_DEVICE_ID_ALPS_1222 0x1222
-
#define USB_VENDOR_ID_AMI 0x046b
#define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE 0xff10
@@ -330,6 +330,8 @@
#define USB_DEVICE_ID_CYPRESS_BARCODE_4 0xed81
#define USB_DEVICE_ID_CYPRESS_TRUETOUCH 0xc001
+#define USB_DEVICE_ID_CYPRESS_VARMILO_VA104M_07B1 0X07b1
+
#define USB_VENDOR_ID_DATA_MODUL 0x7374
#define USB_VENDOR_ID_DATA_MODUL_EASYMAXTOUCH 0x1201
@@ -356,6 +358,7 @@
#define USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR 0x1803
#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE1 0x1843
#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE2 0x1844
+#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE3 0x1846
#define USB_VENDOR_ID_DWAV 0x0eef
#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
@@ -378,11 +381,13 @@
#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7349 0x7349
#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7 0x73f7
#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002 0xc002
#define USB_VENDOR_ID_ELAN 0x04f3
#define USB_DEVICE_ID_TOSHIBA_CLICK_L9W 0x0401
#define USB_DEVICE_ID_HP_X2 0x074d
#define USB_DEVICE_ID_HP_X2_10_COVER 0x0755
+#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
@@ -441,6 +446,10 @@
#define USB_VENDOR_ID_FRUCTEL 0x25B6
#define USB_DEVICE_ID_GAMETEL_MT_MODE 0x0002
+#define USB_VENDOR_ID_GAMEVICE 0x27F8
+#define USB_DEVICE_ID_GAMEVICE_GV186 0x0BBE
+#define USB_DEVICE_ID_GAMEVICE_KISHI 0x0BBF
+
#define USB_VENDOR_ID_GAMERON 0x0810
#define USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR 0x0001
#define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002
@@ -469,6 +478,7 @@
#define USB_DEVICE_ID_GOOGLE_MASTERBALL 0x503c
#define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
#define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044
+#define USB_DEVICE_ID_GOOGLE_DON 0x5050
#define USB_VENDOR_ID_GOTOP 0x08f2
#define USB_DEVICE_ID_SUPER_Q2 0x007f
@@ -476,6 +486,7 @@
#define USB_DEVICE_ID_PENPOWER 0x00f4
#define USB_VENDOR_ID_GREENASIA 0x0e8f
+#define USB_DEVICE_ID_GREENASIA_DUAL_SAT_ADAPTOR 0x3010
#define USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD 0x3013
#define USB_VENDOR_ID_GRETAGMACBETH 0x0971
@@ -608,6 +619,7 @@
#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081 0xa081
#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2 0xa0c2
#define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096 0xa096
+#define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A293 0xa293
#define USB_VENDOR_ID_IMATION 0x0718
#define USB_DEVICE_ID_DISC_STAKKA 0xd000
@@ -724,6 +736,7 @@
#define USB_VENDOR_ID_LOGITECH 0x046d
#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
#define USB_DEVICE_ID_LOGITECH_T651 0xb00c
+#define USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD 0xb309
#define USB_DEVICE_ID_LOGITECH_C007 0xc007
#define USB_DEVICE_ID_LOGITECH_C077 0xc077
#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101
@@ -754,6 +767,7 @@
#define USB_DEVICE_ID_LOGITECH_G27_WHEEL 0xc29b
#define USB_DEVICE_ID_LOGITECH_WII_WHEEL 0xc29c
#define USB_DEVICE_ID_LOGITECH_ELITE_KBD 0xc30a
+#define USB_DEVICE_ID_LOGITECH_GROUP_AUDIO 0x0882
#define USB_DEVICE_ID_S510_RECEIVER 0xc50c
#define USB_DEVICE_ID_S510_RECEIVER_2 0xc517
#define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512
@@ -890,6 +904,7 @@
#define USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S 0x8003
#define USB_VENDOR_ID_PLANTRONICS 0x047f
+#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES 0xc056
#define USB_VENDOR_ID_PANASONIC 0x04da
#define USB_DEVICE_ID_PANABOARD_UBT780 0x1044
@@ -971,6 +986,8 @@
#define USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO 0x3232
#define USB_DEVICE_ID_ROCCAT_SAVU 0x2d5a
+#define USB_VENDOR_ID_SAI 0x17dd
+
#define USB_VENDOR_ID_SAITEK 0x06a3
#define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
#define USB_DEVICE_ID_SAITEK_PS1000 0x0621
@@ -980,6 +997,8 @@
#define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa
#define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0
#define USB_DEVICE_ID_SAITEK_X52 0x075c
+#define USB_DEVICE_ID_SAITEK_X52_2 0x0255
+#define USB_DEVICE_ID_SAITEK_X52_PRO 0x0762
#define USB_VENDOR_ID_SAMSUNG 0x0419
#define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001
@@ -1063,6 +1082,9 @@
#define USB_DEVICE_ID_SYMBOL_SCANNER_2 0x1300
#define USB_DEVICE_ID_SYMBOL_SCANNER_3 0x1200
+#define I2C_VENDOR_ID_SYNAPTICS 0x06cb
+#define I2C_PRODUCT_ID_SYNAPTICS_SYNA2393 0x7a13
+
#define USB_VENDOR_ID_SYNAPTICS 0x06cb
#define USB_DEVICE_ID_SYNAPTICS_TP 0x0001
#define USB_DEVICE_ID_SYNAPTICS_INT_TP 0x0002
@@ -1077,6 +1099,7 @@
#define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10
#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3
#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3
+#define USB_DEVICE_ID_SYNAPTICS_DELL_K12A 0x2819
#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968
#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
@@ -1116,6 +1139,9 @@
#define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882 0x8882
#define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883 0x8883
+#define USB_VENDOR_ID_TRUST 0x145f
+#define USB_DEVICE_ID_TRUST_PANORA_TABLET 0x0212
+
#define USB_VENDOR_ID_TURBOX 0x062a
#define USB_DEVICE_ID_TURBOX_KEYBOARD 0x0201
#define USB_DEVICE_ID_ASUS_MD_5110 0x5110
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index dbb0cbe65fc9..4dd151b2924e 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -331,6 +331,11 @@ static const struct hid_device_id hid_battery_quirks[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD),
+ HID_BATTERY_QUIRK_IGNORE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
+ HID_BATTERY_QUIRK_IGNORE },
{}
};
@@ -362,13 +367,13 @@ static int hidinput_query_battery_capacity(struct hid_device *dev)
u8 *buf;
int ret;
- buf = kmalloc(2, GFP_KERNEL);
+ buf = kmalloc(4, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- ret = hid_hw_raw_request(dev, dev->battery_report_id, buf, 2,
+ ret = hid_hw_raw_request(dev, dev->battery_report_id, buf, 4,
dev->battery_report_type, HID_REQ_GET_REPORT);
- if (ret != 2) {
+ if (ret < 2) {
kfree(buf);
return -ENODATA;
}
@@ -796,7 +801,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x3b: /* Battery Strength */
hidinput_setup_battery(device, HID_INPUT_REPORT, field);
usage->type = EV_PWR;
- goto ignore;
+ return;
case 0x3c: /* Invert */
map_key_clear(BTN_TOOL_RUBBER);
@@ -1052,7 +1057,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case HID_DC_BATTERYSTRENGTH:
hidinput_setup_battery(device, HID_INPUT_REPORT, field);
usage->type = EV_PWR;
- goto ignore;
+ return;
}
goto unknown;
@@ -1125,6 +1130,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
}
mapped:
+ /* Mapping failed, bail out */
+ if (!bit)
+ return;
+
if (device->driver->input_mapped &&
device->driver->input_mapped(device, hidinput, field, usage,
&bit, &max) < 0) {
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index b454c4386157..8af62696f2ca 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -452,6 +452,12 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
__set_bit(MSC_RAW, input->mscbit);
}
+ /*
+ * hid-input may mark device as using autorepeat, but neither
+ * the trackpad, nor the mouse actually want it.
+ */
+ __clear_bit(EV_REP, input->evbit);
+
return 0;
}
diff --git a/drivers/hid/hid-mf.c b/drivers/hid/hid-mf.c
index 03f10516131d..a41202d38509 100644
--- a/drivers/hid/hid-mf.c
+++ b/drivers/hid/hid-mf.c
@@ -161,6 +161,8 @@ static const struct hid_device_id mf_devices[] = {
.driver_data = HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2),
.driver_data = 0 }, /* No quirk required */
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE3),
+ .driver_data = HID_QUIRK_MULTI_INPUT },
{ }
};
MODULE_DEVICE_TABLE(hid, mf_devices);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 19dfd8acd0da..ccda72f748ee 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -841,6 +841,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
code = BTN_0 + ((usage->hid - 1) & HID_USAGE);
hid_map_usage(hi, usage, bit, max, EV_KEY, code);
+ if (!*bit)
+ return -1;
input_set_capability(hi->input, EV_KEY, code);
return 1;
@@ -1909,6 +1911,9 @@ static const struct hid_device_id mt_devices[] = {
{ .driver_data = MT_CLS_EGALAX_SERIAL,
MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
+ { .driver_data = MT_CLS_EGALAX,
+ MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002) },
/* Elitegroup panel */
{ .driver_data = MT_CLS_SERIAL,
diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c
index 584b10d3fc3d..460711c1124a 100644
--- a/drivers/hid/hid-plantronics.c
+++ b/drivers/hid/hid-plantronics.c
@@ -16,6 +16,7 @@
#include <linux/hid.h>
#include <linux/module.h>
+#include <linux/jiffies.h>
#define PLT_HID_1_0_PAGE 0xffa00000
#define PLT_HID_2_0_PAGE 0xffa20000
@@ -39,6 +40,16 @@
#define PLT_ALLOW_CONSUMER (field->application == HID_CP_CONSUMERCONTROL && \
(usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER)
+#define PLT_QUIRK_DOUBLE_VOLUME_KEYS BIT(0)
+
+#define PLT_DOUBLE_KEY_TIMEOUT 5 /* ms */
+
+struct plt_drv_data {
+ unsigned long device_type;
+ unsigned long last_volume_key_ts;
+ u32 quirks;
+};
+
static int plantronics_input_mapping(struct hid_device *hdev,
struct hid_input *hi,
struct hid_field *field,
@@ -46,7 +57,8 @@ static int plantronics_input_mapping(struct hid_device *hdev,
unsigned long **bit, int *max)
{
unsigned short mapped_key;
- unsigned long plt_type = (unsigned long)hid_get_drvdata(hdev);
+ struct plt_drv_data *drv_data = hid_get_drvdata(hdev);
+ unsigned long plt_type = drv_data->device_type;
/* special case for PTT products */
if (field->application == HID_GD_JOYSTICK)
@@ -108,6 +120,30 @@ mapped:
return 1;
}
+static int plantronics_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct plt_drv_data *drv_data = hid_get_drvdata(hdev);
+
+ if (drv_data->quirks & PLT_QUIRK_DOUBLE_VOLUME_KEYS) {
+ unsigned long prev_ts, cur_ts;
+
+ /* Usages are filtered in plantronics_usages. */
+
+ if (!value) /* Handle key presses only. */
+ return 0;
+
+ prev_ts = drv_data->last_volume_key_ts;
+ cur_ts = jiffies;
+ if (jiffies_to_msecs(cur_ts - prev_ts) <= PLT_DOUBLE_KEY_TIMEOUT)
+ return 1; /* Ignore the repeated key. */
+
+ drv_data->last_volume_key_ts = cur_ts;
+ }
+
+ return 0;
+}
+
static unsigned long plantronics_device_type(struct hid_device *hdev)
{
unsigned i, col_page;
@@ -136,15 +172,24 @@ exit:
static int plantronics_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
+ struct plt_drv_data *drv_data;
int ret;
+ drv_data = devm_kzalloc(&hdev->dev, sizeof(*drv_data), GFP_KERNEL);
+ if (!drv_data)
+ return -ENOMEM;
+
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
goto err;
}
- hid_set_drvdata(hdev, (void *)plantronics_device_type(hdev));
+ drv_data->device_type = plantronics_device_type(hdev);
+ drv_data->quirks = id->driver_data;
+ drv_data->last_volume_key_ts = jiffies - msecs_to_jiffies(PLT_DOUBLE_KEY_TIMEOUT);
+
+ hid_set_drvdata(hdev, drv_data);
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT |
HID_CONNECT_HIDINPUT_FORCE | HID_CONNECT_HIDDEV_FORCE);
@@ -156,15 +201,26 @@ err:
}
static const struct hid_device_id plantronics_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
+ USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES),
+ .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
{ }
};
MODULE_DEVICE_TABLE(hid, plantronics_devices);
+static const struct hid_usage_id plantronics_usages[] = {
+ { HID_CP_VOLUMEUP, EV_KEY, HID_ANY_ID },
+ { HID_CP_VOLUMEDOWN, EV_KEY, HID_ANY_ID },
+ { HID_TERMINATOR, HID_TERMINATOR, HID_TERMINATOR }
+};
+
static struct hid_driver plantronics_driver = {
.name = "plantronics",
.id_table = plantronics_devices,
+ .usage_table = plantronics_usages,
.input_mapping = plantronics_input_mapping,
+ .event = plantronics_event,
.probe = plantronics_probe,
};
module_hid_driver(plantronics_driver);
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index b9529bed4d76..8fbe7b9cd84a 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -74,6 +74,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_REDRAGON_SEYMUR2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE3), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER), HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
@@ -85,11 +86,17 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_FUTABA, USB_DEVICE_ID_LED_DISPLAY), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_SAT_ADAPTOR), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD), HID_QUIRK_MULTI_INPUT },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_GAMEVICE, USB_DEVICE_ID_GAMEVICE_GV186),
+ HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GAMEVICE, USB_DEVICE_ID_GAMEVICE_KISHI),
+ HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A293), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
@@ -146,6 +153,8 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_PRO), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET },
@@ -163,10 +172,12 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DELL_K12A), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD), HID_QUIRK_BADPAD },
{ HID_USB_DEVICE(USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TRUST, USB_DEVICE_ID_TRUST_PANORA_TABLET), HID_QUIRK_MULTI_INPUT | HID_QUIRK_HIDINPUT_FORCE },
{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT },
@@ -176,6 +187,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_GROUP_AUDIO), HID_QUIRK_NOGET },
{ 0 }
};
@@ -398,9 +410,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
#endif
-#if IS_ENABLED(CONFIG_HID_ITE)
- { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
-#endif
#if IS_ENABLED(CONFIG_HID_ICADE)
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
#endif
@@ -490,6 +499,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE3) },
#endif
#if IS_ENABLED(CONFIG_HID_MICROSOFT)
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) },
@@ -876,6 +886,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_PETZL, USB_DEVICE_ID_PETZL_HEADLAMP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAI, USB_DEVICE_ID_CYPRESS_HIDCOM) },
#if IS_ENABLED(CONFIG_MOUSE_SYNAPTICS_USB)
{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_INT_TP) },
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index bf4675a27396..9be8c31f613f 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -297,31 +297,40 @@ static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj,
struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
int retval = 0, difference, old_profile;
+ struct kone_settings *settings = (struct kone_settings *)buf;
/* I need to get my data in one piece */
if (off != 0 || count != sizeof(struct kone_settings))
return -EINVAL;
mutex_lock(&kone->kone_lock);
- difference = memcmp(buf, &kone->settings, sizeof(struct kone_settings));
+ difference = memcmp(settings, &kone->settings,
+ sizeof(struct kone_settings));
if (difference) {
- retval = kone_set_settings(usb_dev,
- (struct kone_settings const *)buf);
- if (retval) {
- mutex_unlock(&kone->kone_lock);
- return retval;
+ if (settings->startup_profile < 1 ||
+ settings->startup_profile > 5) {
+ retval = -EINVAL;
+ goto unlock;
}
+ retval = kone_set_settings(usb_dev, settings);
+ if (retval)
+ goto unlock;
+
old_profile = kone->settings.startup_profile;
- memcpy(&kone->settings, buf, sizeof(struct kone_settings));
+ memcpy(&kone->settings, settings, sizeof(struct kone_settings));
kone_profile_activated(kone, kone->settings.startup_profile);
if (kone->settings.startup_profile != old_profile)
kone_profile_report(kone, kone->settings.startup_profile);
}
+unlock:
mutex_unlock(&kone->kone_lock);
+ if (retval)
+ return retval;
+
return sizeof(struct kone_settings);
}
static BIN_ATTR(settings, 0660, kone_sysfs_read_settings,
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 4256fdc5cd6d..21fbdcde1faa 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -496,7 +496,8 @@ static int sensor_hub_raw_event(struct hid_device *hdev,
return 1;
ptr = raw_data;
- ptr++; /* Skip report id */
+ if (report->id)
+ ptr++; /* Skip report id */
spin_lock_irqsave(&pdata->lock, flags);
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index d05c387a588e..3c6eda0c5596 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -869,6 +869,23 @@ static u8 *sony_report_fixup(struct hid_device *hdev, u8 *rdesc,
if (sc->quirks & PS3REMOTE)
return ps3remote_fixup(hdev, rdesc, rsize);
+ /*
+ * Some knock-off USB dongles incorrectly report their button count
+ * as 13 instead of 16 causing three non-functional buttons.
+ */
+ if ((sc->quirks & SIXAXIS_CONTROLLER_USB) && *rsize >= 45 &&
+ /* Report Count (13) */
+ rdesc[23] == 0x95 && rdesc[24] == 0x0D &&
+ /* Usage Maximum (13) */
+ rdesc[37] == 0x29 && rdesc[38] == 0x0D &&
+ /* Report Count (3) */
+ rdesc[43] == 0x95 && rdesc[44] == 0x03) {
+ hid_info(hdev, "Fixing up USB dongle report descriptor\n");
+ rdesc[24] = 0x10;
+ rdesc[38] = 0x10;
+ rdesc[44] = 0x00;
+ }
+
return rdesc;
}
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index 6286204d4c56..a3b151b29bd7 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -526,7 +526,8 @@ static int steam_register(struct steam_device *steam)
steam_battery_register(steam);
mutex_lock(&steam_devices_lock);
- list_add(&steam->list, &steam_devices);
+ if (list_empty(&steam->list))
+ list_add(&steam->list, &steam_devices);
mutex_unlock(&steam_devices_lock);
}
@@ -552,7 +553,7 @@ static void steam_unregister(struct steam_device *steam)
hid_info(steam->hdev, "Steam Controller '%s' disconnected",
steam->serial_no);
mutex_lock(&steam_devices_lock);
- list_del(&steam->list);
+ list_del_init(&steam->list);
mutex_unlock(&steam_devices_lock);
steam->serial_no[0] = 0;
}
@@ -738,6 +739,7 @@ static int steam_probe(struct hid_device *hdev,
mutex_init(&steam->mutex);
steam->quirks = id->driver_data;
INIT_WORK(&steam->work_connect, steam_work_connect_cb);
+ INIT_LIST_HEAD(&steam->list);
steam->client_hdev = steam_create_client_hid(hdev);
if (IS_ERR(steam->client_hdev)) {
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index f2c8c59fc582..1f8d403d3db4 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -187,6 +187,8 @@ static const struct i2c_hid_quirks {
I2C_HID_QUIRK_BOGUS_IRQ },
{ USB_VENDOR_ID_ALPS_JP, HID_ANY_ID,
I2C_HID_QUIRK_RESET_ON_RESUME },
+ { I2C_VENDOR_ID_SYNAPTICS, I2C_PRODUCT_ID_SYNAPTICS_SYNA2393,
+ I2C_HID_QUIRK_RESET_ON_RESUME },
{ USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720,
I2C_HID_QUIRK_BAD_INPUT_SIZE },
{ 0, 0 }
@@ -442,6 +444,19 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state)
dev_err(&client->dev, "failed to change power setting.\n");
set_pwr_exit:
+
+ /*
+ * The HID over I2C specification states that if a DEVICE needs time
+ * after the PWR_ON request, it should utilise CLOCK stretching.
+ * However, it has been observered that the Windows driver provides a
+ * 1ms sleep between the PWR_ON and RESET requests.
+ * According to Goodix Windows even waits 60 ms after (other?)
+ * PWR_ON requests. Testing has confirmed that several devices
+ * will not work properly without a delay after a PWR_ON request.
+ */
+ if (!ret && power_state == I2C_HID_PWR_ON)
+ msleep(60);
+
return ret;
}
@@ -463,15 +478,6 @@ static int i2c_hid_hwreset(struct i2c_client *client)
if (ret)
goto out_unlock;
- /*
- * The HID over I2C specification states that if a DEVICE needs time
- * after the PWR_ON request, it should utilise CLOCK stretching.
- * However, it has been observered that the Windows driver provides a
- * 1ms sleep between the PWR_ON and RESET requests and that some devices
- * rely on this.
- */
- usleep_range(1000, 5000);
-
i2c_hid_dbg(ihid, "resetting...\n");
ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);
diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
index 95052373a828..58a753ef2717 100644
--- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
+++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
@@ -374,6 +374,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
.driver_data = (void *)&sipodev_desc
},
{
+ .ident = "Mediacom FlexBook edge 13",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MEDIACOM"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FlexBook_edge13-M-FBE13"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
.ident = "Odys Winbook 13",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AXDIA International GmbH"),
@@ -381,6 +389,22 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
},
.driver_data = (void *)&sipodev_desc
},
+ {
+ .ident = "Schneider SCL142ALM",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SCHNEIDER"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SCL142ALM"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
+ .ident = "Vero K147",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VERO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "K147"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
{ } /* Terminate list */
};
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 11103efebbaa..1e6f8b0d00fb 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -685,16 +685,21 @@ static int usbhid_open(struct hid_device *hid)
struct usbhid_device *usbhid = hid->driver_data;
int res;
+ mutex_lock(&usbhid->mutex);
+
set_bit(HID_OPENED, &usbhid->iofl);
- if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
- return 0;
+ if (hid->quirks & HID_QUIRK_ALWAYS_POLL) {
+ res = 0;
+ goto Done;
+ }
res = usb_autopm_get_interface(usbhid->intf);
/* the device must be awake to reliably request remote wakeup */
if (res < 0) {
clear_bit(HID_OPENED, &usbhid->iofl);
- return -EIO;
+ res = -EIO;
+ goto Done;
}
usbhid->intf->needs_remote_wakeup = 1;
@@ -728,6 +733,9 @@ static int usbhid_open(struct hid_device *hid)
msleep(50);
clear_bit(HID_RESUME_RUNNING, &usbhid->iofl);
+
+ Done:
+ mutex_unlock(&usbhid->mutex);
return res;
}
@@ -735,6 +743,8 @@ static void usbhid_close(struct hid_device *hid)
{
struct usbhid_device *usbhid = hid->driver_data;
+ mutex_lock(&usbhid->mutex);
+
/*
* Make sure we don't restart data acquisition due to
* a resumption we no longer care about by avoiding racing
@@ -746,12 +756,13 @@ static void usbhid_close(struct hid_device *hid)
clear_bit(HID_IN_POLLING, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
- if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
- return;
+ if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL)) {
+ hid_cancel_delayed_stuff(usbhid);
+ usb_kill_urb(usbhid->urbin);
+ usbhid->intf->needs_remote_wakeup = 0;
+ }
- hid_cancel_delayed_stuff(usbhid);
- usb_kill_urb(usbhid->urbin);
- usbhid->intf->needs_remote_wakeup = 0;
+ mutex_unlock(&usbhid->mutex);
}
/*
@@ -1060,6 +1071,8 @@ static int usbhid_start(struct hid_device *hid)
unsigned int n, insize = 0;
int ret;
+ mutex_lock(&usbhid->mutex);
+
clear_bit(HID_DISCONNECTED, &usbhid->iofl);
usbhid->bufsize = HID_MIN_BUFFER_SIZE;
@@ -1180,6 +1193,8 @@ static int usbhid_start(struct hid_device *hid)
usbhid_set_leds(hid);
device_set_wakeup_enable(&dev->dev, 1);
}
+
+ mutex_unlock(&usbhid->mutex);
return 0;
fail:
@@ -1190,6 +1205,7 @@ fail:
usbhid->urbout = NULL;
usbhid->urbctrl = NULL;
hid_free_buffers(dev, hid);
+ mutex_unlock(&usbhid->mutex);
return ret;
}
@@ -1205,6 +1221,8 @@ static void usbhid_stop(struct hid_device *hid)
usbhid->intf->needs_remote_wakeup = 0;
}
+ mutex_lock(&usbhid->mutex);
+
clear_bit(HID_STARTED, &usbhid->iofl);
spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */
set_bit(HID_DISCONNECTED, &usbhid->iofl);
@@ -1225,6 +1243,8 @@ static void usbhid_stop(struct hid_device *hid)
usbhid->urbout = NULL;
hid_free_buffers(hid_to_usb_dev(hid), hid);
+
+ mutex_unlock(&usbhid->mutex);
}
static int usbhid_power(struct hid_device *hid, int lvl)
@@ -1385,6 +1405,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
INIT_WORK(&usbhid->reset_work, hid_reset);
timer_setup(&usbhid->io_retry, hid_retry_timeout, 0);
spin_lock_init(&usbhid->lock);
+ mutex_init(&usbhid->mutex);
ret = hid_add_device(hid);
if (ret) {
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index c34ef95d7cef..2dff663847c6 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -532,12 +532,16 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
switch (cmd) {
case HIDIOCGUSAGE:
+ if (uref->usage_index >= field->report_count)
+ goto inval;
uref->value = field->value[uref->usage_index];
if (copy_to_user(user_arg, uref, sizeof(*uref)))
goto fault;
goto goodreturn;
case HIDIOCSUSAGE:
+ if (uref->usage_index >= field->report_count)
+ goto inval;
field->value[uref->usage_index] = uref->value;
goto goodreturn;
diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h
index da9c61d54be6..caa0ee639581 100644
--- a/drivers/hid/usbhid/usbhid.h
+++ b/drivers/hid/usbhid/usbhid.h
@@ -93,6 +93,7 @@ struct usbhid_device {
dma_addr_t outbuf_dma; /* Output buffer dma */
unsigned long last_out; /* record of last output for timeouts */
+ struct mutex mutex; /* start/stop/open/close */
spinlock_t lock; /* fifo spinlock */
unsigned long iofl; /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */
struct timer_list io_retry; /* Retry timer */
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 3038c975e417..8006732b8f42 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -150,9 +150,9 @@ static int wacom_wac_pen_serial_enforce(struct hid_device *hdev,
}
if (flush)
- wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo);
+ wacom_wac_queue_flush(hdev, wacom_wac->pen_fifo);
else if (insert)
- wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo,
+ wacom_wac_queue_insert(hdev, wacom_wac->pen_fifo,
raw_data, report_size);
return insert && !flush;
@@ -290,9 +290,11 @@ static void wacom_feature_mapping(struct hid_device *hdev,
data[0] = field->report->id;
ret = wacom_get_report(hdev, HID_FEATURE_REPORT,
data, n, WAC_CMD_RETRIES);
- if (ret == n) {
+ if (ret == n && features->type == HID_GENERIC) {
ret = hid_report_raw_event(hdev,
HID_FEATURE_REPORT, data, n, 0);
+ } else if (ret == 2 && features->type != HID_GENERIC) {
+ features->touch_max = data[1];
} else {
features->touch_max = 16;
hid_warn(hdev, "wacom_feature_mapping: "
@@ -1239,6 +1241,38 @@ static int wacom_devm_sysfs_create_group(struct wacom *wacom,
group);
}
+static void wacom_devm_kfifo_release(struct device *dev, void *res)
+{
+ struct kfifo_rec_ptr_2 *devres = res;
+
+ kfifo_free(devres);
+}
+
+static int wacom_devm_kfifo_alloc(struct wacom *wacom)
+{
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct kfifo_rec_ptr_2 *pen_fifo;
+ int error;
+
+ pen_fifo = devres_alloc(wacom_devm_kfifo_release,
+ sizeof(struct kfifo_rec_ptr_2),
+ GFP_KERNEL);
+
+ if (!pen_fifo)
+ return -ENOMEM;
+
+ error = kfifo_alloc(pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL);
+ if (error) {
+ devres_free(pen_fifo);
+ return error;
+ }
+
+ devres_add(&wacom->hdev->dev, pen_fifo);
+ wacom_wac->pen_fifo = pen_fifo;
+
+ return 0;
+}
+
enum led_brightness wacom_leds_brightness_get(struct wacom_led *led)
{
struct wacom *wacom = led->wacom;
@@ -2695,7 +2729,7 @@ static int wacom_probe(struct hid_device *hdev,
goto fail;
}
- error = kfifo_alloc(&wacom_wac->pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL);
+ error = wacom_devm_kfifo_alloc(wacom);
if (error)
goto fail;
@@ -2762,8 +2796,6 @@ static void wacom_remove(struct hid_device *hdev)
if (wacom->wacom_wac.features.type != REMOTE)
wacom_release_resources(wacom);
- kfifo_free(&wacom_wac->pen_fifo);
-
hid_set_drvdata(hdev, NULL);
}
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 77bb46948eea..10524c93f8b6 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2496,7 +2496,7 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
!wacom_wac->shared->is_touch_on) {
if (!wacom_wac->shared->touch_down)
return;
- prox = 0;
+ prox = false;
}
wacom_wac->hid_data.num_received++;
@@ -2557,7 +2557,12 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
wacom_wac->hid_data.tipswitch = value;
break;
case HID_DG_CONTACTMAX:
- features->touch_max = value;
+ if (!features->touch_max) {
+ features->touch_max = value;
+ } else {
+ hid_warn(hdev, "%s: ignoring attempt to overwrite non-zero touch_max "
+ "%d -> %d\n", __func__, features->touch_max, value);
+ }
return;
}
@@ -2729,7 +2734,9 @@ static int wacom_wac_collection(struct hid_device *hdev, struct hid_report *repo
if (report->type != HID_INPUT_REPORT)
return -1;
- if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
+ if (WACOM_PAD_FIELD(field))
+ return 0;
+ else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
wacom_wac_pen_report(hdev, report);
else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input)
wacom_wac_finger_report(hdev, report);
@@ -3521,8 +3528,6 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
{
struct wacom_features *features = &wacom_wac->features;
- input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
-
if (!(features->device_type & WACOM_DEVICETYPE_PEN))
return -ENODEV;
@@ -3537,6 +3542,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
return 0;
}
+ input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
__set_bit(BTN_TOUCH, input_dev->keybit);
__set_bit(ABS_MISC, input_dev->absbit);
@@ -3687,8 +3693,6 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
{
struct wacom_features *features = &wacom_wac->features;
- input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
-
if (!(features->device_type & WACOM_DEVICETYPE_TOUCH))
return -ENODEV;
@@ -3701,6 +3705,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
/* setup has already been done */
return 0;
+ input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
__set_bit(BTN_TOUCH, input_dev->keybit);
if (features->touch_max == 1) {
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index f67d871841c0..46da97162ef4 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -344,7 +344,7 @@ struct wacom_wac {
struct input_dev *pen_input;
struct input_dev *touch_input;
struct input_dev *pad_input;
- struct kfifo_rec_ptr_2 pen_fifo;
+ struct kfifo_rec_ptr_2 *pen_fifo;
int pid;
int num_contacts_left;
u8 bt_features;
diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c
index 41a09f506803..129c5e6bc654 100644
--- a/drivers/hsi/controllers/omap_ssi_core.c
+++ b/drivers/hsi/controllers/omap_ssi_core.c
@@ -389,7 +389,7 @@ static int ssi_add_controller(struct hsi_controller *ssi,
err = ida_simple_get(&platform_omap_ssi_ida, 0, 0, GFP_KERNEL);
if (err < 0)
- goto out_err;
+ return err;
ssi->id = err;
ssi->owner = THIS_MODULE;
diff --git a/drivers/hsi/hsi_core.c b/drivers/hsi/hsi_core.c
index 9065efd21851..71895da63810 100644
--- a/drivers/hsi/hsi_core.c
+++ b/drivers/hsi/hsi_core.c
@@ -223,8 +223,6 @@ static void hsi_add_client_from_dt(struct hsi_port *port,
if (err)
goto err;
- dev_set_name(&cl->device, "%s", name);
-
err = hsi_of_property_parse_mode(client, "hsi-mode", &mode);
if (err) {
err = hsi_of_property_parse_mode(client, "hsi-rx-mode",
@@ -306,6 +304,7 @@ static void hsi_add_client_from_dt(struct hsi_port *port,
cl->device.release = hsi_client_release;
cl->device.of_node = client;
+ dev_set_name(&cl->device, "%s", name);
if (device_register(&cl->device) < 0) {
pr_err("hsi: failed to register client: %s\n", name);
put_device(&cl->device);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 16eb9b3f1cb1..cdd4392c589d 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -773,13 +773,19 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
free_cpumask_var(available_mask);
}
+#define UNLOAD_DELAY_UNIT_MS 10 /* 10 milliseconds */
+#define UNLOAD_WAIT_MS (100*1000) /* 100 seconds */
+#define UNLOAD_WAIT_LOOPS (UNLOAD_WAIT_MS/UNLOAD_DELAY_UNIT_MS)
+#define UNLOAD_MSG_MS (5*1000) /* Every 5 seconds */
+#define UNLOAD_MSG_LOOPS (UNLOAD_MSG_MS/UNLOAD_DELAY_UNIT_MS)
+
static void vmbus_wait_for_unload(void)
{
int cpu;
void *page_addr;
struct hv_message *msg;
struct vmbus_channel_message_header *hdr;
- u32 message_type;
+ u32 message_type, i;
/*
* CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was
@@ -789,10 +795,18 @@ static void vmbus_wait_for_unload(void)
* functional and vmbus_unload_response() will complete
* vmbus_connection.unload_event. If not, the last thing we can do is
* read message pages for all CPUs directly.
+ *
+ * Wait up to 100 seconds since an Azure host must writeback any dirty
+ * data in its disk cache before the VMbus UNLOAD request will
+ * complete. This flushing has been empirically observed to take up
+ * to 50 seconds in cases with a lot of dirty data, so allow additional
+ * leeway and for inaccuracies in mdelay(). But eventually time out so
+ * that the panic path can't get hung forever in case the response
+ * message isn't seen.
*/
- while (1) {
+ for (i = 1; i <= UNLOAD_WAIT_LOOPS; i++) {
if (completion_done(&vmbus_connection.unload_event))
- break;
+ goto completed;
for_each_online_cpu(cpu) {
struct hv_per_cpu_context *hv_cpu
@@ -815,9 +829,18 @@ static void vmbus_wait_for_unload(void)
vmbus_signal_eom(msg, message_type);
}
- mdelay(10);
+ /*
+ * Give a notice periodically so someone watching the
+ * serial output won't think it is completely hung.
+ */
+ if (!(i % UNLOAD_MSG_LOOPS))
+ pr_notice("Waiting for VMBus UNLOAD to complete\n");
+
+ mdelay(UNLOAD_DELAY_UNIT_MS);
}
+ pr_err("Continuing even though VMBus UNLOAD did not complete\n");
+completed:
/*
* We're crashing and already got the UNLOAD_RESPONSE, cleanup all
* maybe-pending messages on all CPUs to be able to receive new
@@ -849,6 +872,9 @@ void vmbus_initiate_unload(bool crash)
{
struct vmbus_channel_message_header hdr;
+ if (xchg(&vmbus_connection.conn_state, DISCONNECTED) == DISCONNECTED)
+ return;
+
/* Pre-Win2012R2 hosts don't support reconnect */
if (vmbus_proto_version < VERSION_WIN8_1)
return;
@@ -995,8 +1021,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
vmbus_device_unregister(channel->device_obj);
put_device(dev);
}
- }
- if (channel->primary_channel != NULL) {
+ } else if (channel->primary_channel != NULL) {
/*
* Sub-channel is being rescinded. Following is the channel
* close sequence when initiated from the driveri (refer to
@@ -1246,6 +1271,8 @@ channel_message_table[CHANNELMSG_COUNT] = {
{ CHANNELMSG_19, 0, NULL },
{ CHANNELMSG_20, 0, NULL },
{ CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL },
+ { CHANNELMSG_22, 0, NULL },
+ { CHANNELMSG_TL_CONNECT_RESULT, 0, NULL },
};
/*
@@ -1257,25 +1284,16 @@ void vmbus_onmessage(void *context)
{
struct hv_message *msg = context;
struct vmbus_channel_message_header *hdr;
- int size;
hdr = (struct vmbus_channel_message_header *)msg->u.payload;
- size = msg->header.payload_size;
trace_vmbus_on_message(hdr);
- if (hdr->msgtype >= CHANNELMSG_COUNT) {
- pr_err("Received invalid channel message type %d size %d\n",
- hdr->msgtype, size);
- print_hex_dump_bytes("", DUMP_PREFIX_NONE,
- (unsigned char *)msg->u.payload, size);
- return;
- }
-
- if (channel_message_table[hdr->msgtype].message_handler)
- channel_message_table[hdr->msgtype].message_handler(hdr);
- else
- pr_err("Unhandled channel message type %d\n", hdr->msgtype);
+ /*
+ * vmbus_on_msg_dpc() makes sure the hdr->msgtype here can not go
+ * out of bound and the message_handler pointer can not be NULL.
+ */
+ channel_message_table[hdr->msgtype].message_handler(hdr);
}
/*
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 9ca0706a9d40..e5fc719a34e7 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -1275,7 +1275,7 @@ static void balloon_up(struct work_struct *dummy)
/* Refuse to balloon below the floor. */
if (avail_pages < num_pages || avail_pages - num_pages < floor) {
- pr_warn("Balloon request will be partially fulfilled. %s\n",
+ pr_info("Balloon request will be partially fulfilled. %s\n",
avail_pages < num_pages ? "Not enough memory." :
"Balloon floor reached.");
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 9aa18f387a34..0699c6018889 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -43,6 +43,7 @@
#include <linux/kdebug.h>
#include <linux/efi.h>
#include <linux/random.h>
+#include <linux/kernel.h>
#include "hyperv_vmbus.h"
struct vmbus_dynid {
@@ -58,14 +59,35 @@ static int hyperv_cpuhp_online;
static void *hv_panic_page;
+/*
+ * Boolean to control whether to report panic messages over Hyper-V.
+ *
+ * It can be set via /proc/sys/kernel/hyperv/record_panic_msg
+ */
+static int sysctl_record_panic_msg = 1;
+
+static int hyperv_report_reg(void)
+{
+ return !sysctl_record_panic_msg || !hv_panic_page;
+}
+
static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
void *args)
{
struct pt_regs *regs;
- regs = current_pt_regs();
+ vmbus_initiate_unload(true);
- hyperv_report_panic(regs, val);
+ /*
+ * Hyper-V should be notified only once about a panic. If we will be
+ * doing hyperv_report_panic_msg() later with kmsg data, don't do
+ * the notification here.
+ */
+ if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
+ && hyperv_report_reg()) {
+ regs = current_pt_regs();
+ hyperv_report_panic(regs, val, false);
+ }
return NOTIFY_DONE;
}
@@ -75,7 +97,13 @@ static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
struct die_args *die = (struct die_args *)args;
struct pt_regs *regs = die->regs;
- hyperv_report_panic(regs, val);
+ /*
+ * Hyper-V should be notified only once about a panic. If we will be
+ * doing hyperv_report_panic_msg() later with kmsg data, don't do
+ * the notification here.
+ */
+ if (hyperv_report_reg())
+ hyperv_report_panic(regs, val, true);
return NOTIFY_DONE;
}
@@ -911,6 +939,10 @@ void vmbus_on_msg_dpc(unsigned long data)
}
entry = &channel_message_table[hdr->msgtype];
+
+ if (!entry->message_handler)
+ goto msg_handled;
+
if (entry->handler_type == VMHT_BLOCKING) {
ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
if (ctx == NULL)
@@ -1089,13 +1121,6 @@ static void vmbus_isr(void)
}
/*
- * Boolean to control whether to report panic messages over Hyper-V.
- *
- * It can be set via /proc/sys/kernel/hyperv/record_panic_msg
- */
-static int sysctl_record_panic_msg = 1;
-
-/*
* Callback from kmsg_dump. Grab as much as possible from the end of the kmsg
* buffer and call into Hyper-V to transfer the data.
*/
@@ -1219,19 +1244,29 @@ static int vmbus_bus_init(void)
hv_panic_page = (void *)get_zeroed_page(GFP_KERNEL);
if (hv_panic_page) {
ret = kmsg_dump_register(&hv_kmsg_dumper);
- if (ret)
+ if (ret) {
pr_err("Hyper-V: kmsg dump register "
"error 0x%x\n", ret);
+ free_page(
+ (unsigned long)hv_panic_page);
+ hv_panic_page = NULL;
+ }
} else
pr_err("Hyper-V: panic message page memory "
"allocation failed");
}
register_die_notifier(&hyperv_die_block);
- atomic_notifier_chain_register(&panic_notifier_list,
- &hyperv_panic_block);
}
+ /*
+ * Always register the panic notifier because we need to unload
+ * the VMbus channel connection to prevent any VMbus
+ * activity after the VM panics.
+ */
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &hyperv_panic_block);
+
vmbus_request_offers();
return 0;
@@ -1243,7 +1278,6 @@ err_alloc:
hv_remove_vmbus_irq();
bus_unregister(&hv_bus);
- free_page((unsigned long)hv_panic_page);
unregister_sysctl_table(hv_ctl_table_hdr);
hv_ctl_table_hdr = NULL;
return ret;
@@ -1875,7 +1909,6 @@ static void hv_kexec_handler(void)
{
hv_synic_clockevents_cleanup();
vmbus_initiate_unload(false);
- vmbus_connection.conn_state = DISCONNECTED;
/* Make sure conn_state is set as hv_synic_cleanup checks for it */
mb();
cpuhp_remove_state(hyperv_cpuhp_online);
@@ -1890,7 +1923,6 @@ static void hv_crash_handler(struct pt_regs *regs)
* doing the cleanup for current CPU only. This should be sufficient
* for kdump.
*/
- vmbus_connection.conn_state = DISCONNECTED;
hv_synic_cleanup(smp_processor_id());
hyperv_cleanup();
};
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 2f2fb1966958..2f40da04a944 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -896,7 +896,7 @@ static int acpi_power_meter_add(struct acpi_device *device)
res = setup_attrs(resource);
if (res)
- goto exit_free;
+ goto exit_free_capability;
resource->hwmon_dev = hwmon_device_register(&device->dev);
if (IS_ERR(resource->hwmon_dev)) {
@@ -909,6 +909,8 @@ static int acpi_power_meter_add(struct acpi_device *device)
exit_remove:
remove_attrs(resource);
+exit_free_capability:
+ free_capabilities(resource);
exit_free:
kfree(resource);
exit:
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 5c677ba44014..b201129a9bea 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -760,15 +760,18 @@ static ssize_t applesmc_light_show(struct device *dev,
}
ret = applesmc_read_key(LIGHT_SENSOR_LEFT_KEY, buffer, data_length);
+ if (ret)
+ goto out;
/* newer macbooks report a single 10-bit bigendian value */
if (data_length == 10) {
left = be16_to_cpu(*(__be16 *)(buffer + 6)) >> 2;
goto out;
}
left = buffer[2];
+
+ ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length);
if (ret)
goto out;
- ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length);
right = buffer[2];
out:
@@ -817,12 +820,11 @@ static ssize_t applesmc_show_fan_speed(struct device *dev,
to_index(attr));
ret = applesmc_read_key(newkey, buffer, 2);
- speed = ((buffer[0] << 8 | buffer[1]) >> 2);
-
if (ret)
return ret;
- else
- return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed);
+
+ speed = ((buffer[0] << 8 | buffer[1]) >> 2);
+ return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed);
}
static ssize_t applesmc_store_fan_speed(struct device *dev,
@@ -858,12 +860,11 @@ static ssize_t applesmc_show_fan_manual(struct device *dev,
u8 buffer[2];
ret = applesmc_read_key(FANS_MANUAL, buffer, 2);
- manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01;
-
if (ret)
return ret;
- else
- return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual);
+
+ manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01;
+ return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual);
}
static ssize_t applesmc_store_fan_manual(struct device *dev,
@@ -879,10 +880,11 @@ static ssize_t applesmc_store_fan_manual(struct device *dev,
return -EINVAL;
ret = applesmc_read_key(FANS_MANUAL, buffer, 2);
- val = (buffer[0] << 8 | buffer[1]);
if (ret)
goto out;
+ val = (buffer[0] << 8 | buffer[1]);
+
if (input)
val = val | (0x01 << to_index(attr));
else
@@ -958,13 +960,12 @@ static ssize_t applesmc_key_count_show(struct device *dev,
u32 count;
ret = applesmc_read_key(KEY_COUNT_KEY, buffer, 4);
- count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) +
- ((u32)buffer[2]<<8) + buffer[3];
-
if (ret)
return ret;
- else
- return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count);
+
+ count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) +
+ ((u32)buffer[2]<<8) + buffer[3];
+ return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count);
}
static ssize_t applesmc_key_at_index_read_show(struct device *dev,
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index 5e449eac788a..a43fa730a513 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -880,6 +880,8 @@ static int aspeed_create_fan(struct device *dev,
ret = of_property_read_u32(child, "reg", &pwm_port);
if (ret)
return ret;
+ if (pwm_port >= ARRAY_SIZE(pwm_port_params))
+ return -EINVAL;
aspeed_create_pwm_port(priv, (u8)pwm_port);
ret = of_property_count_u8_elems(child, "cooling-levels");
diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
index a973eb6a2890..9e44d2385e6f 100644
--- a/drivers/hwmon/da9052-hwmon.c
+++ b/drivers/hwmon/da9052-hwmon.c
@@ -250,9 +250,9 @@ static ssize_t da9052_read_tsi(struct device *dev,
int channel = to_sensor_dev_attr(devattr)->index;
int ret;
- mutex_lock(&hwmon->hwmon_lock);
+ mutex_lock(&hwmon->da9052->auxadc_lock);
ret = __da9052_read_tsi(dev, channel);
- mutex_unlock(&hwmon->hwmon_lock);
+ mutex_unlock(&hwmon->da9052->auxadc_lock);
if (ret < 0)
return ret;
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index 1ed9a7aa953d..f4985622b179 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -454,7 +454,7 @@ static ssize_t pwm1_enable_store(struct device *dev,
}
result = read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg);
- if (result) {
+ if (result < 0) {
count = result;
goto err;
}
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index e5234f953a6d..b6e5aaa54963 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -527,7 +527,7 @@ static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
data->config = config;
- hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, "jc42",
data, &jc42_chip_info,
NULL);
return PTR_ERR_OR_ZERO(hwmon_dev);
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 2cef0c37ff6f..bc6871c8dd4e 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -23,6 +23,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/pci_ids.h>
#include <asm/amd_nb.h>
#include <asm/processor.h>
@@ -41,14 +42,6 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
#define PCI_DEVICE_ID_AMD_15H_M70H_NB_F3 0x15b3
#endif
-#ifndef PCI_DEVICE_ID_AMD_17H_DF_F3
-#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
-#endif
-
-#ifndef PCI_DEVICE_ID_AMD_17H_M10H_DF_F3
-#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
-#endif
-
/* CPUID function 0x80000001, ebx */
#define CPUID_PKGTYPE_MASK 0xf0000000
#define CPUID_PKGTYPE_F 0x00000000
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index f9b8e3e23a8e..dc2bd82b3202 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -630,7 +630,6 @@ static int lm80_probe(struct i2c_client *client,
struct device *dev = &client->dev;
struct device *hwmon_dev;
struct lm80_data *data;
- int rv;
data = devm_kzalloc(dev, sizeof(struct lm80_data), GFP_KERNEL);
if (!data)
@@ -643,14 +642,8 @@ static int lm80_probe(struct i2c_client *client,
lm80_init_client(client);
/* A few vars need to be filled upon startup */
- rv = lm80_read_value(client, LM80_REG_FAN_MIN(1));
- if (rv < 0)
- return rv;
- data->fan[f_min][0] = rv;
- rv = lm80_read_value(client, LM80_REG_FAN_MIN(2));
- if (rv < 0)
- return rv;
- data->fan[f_min][1] = rv;
+ data->fan[f_min][0] = lm80_read_value(client, LM80_REG_FAN_MIN(1));
+ data->fan[f_min][1] = lm80_read_value(client, LM80_REG_FAN_MIN(2));
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
data, lm80_groups);
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index c2f411c290bf..c187e557678e 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -187,6 +187,7 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
#define LM90_HAVE_EMERGENCY_ALARM (1 << 5)/* emergency alarm */
#define LM90_HAVE_TEMP3 (1 << 6) /* 3rd temperature sensor */
#define LM90_HAVE_BROKEN_ALERT (1 << 7) /* Broken alert */
+#define LM90_PAUSE_FOR_CONFIG (1 << 8) /* Pause conversion for config */
/* LM90 status */
#define LM90_STATUS_LTHRM (1 << 0) /* local THERM limit tripped */
@@ -380,6 +381,7 @@ static const struct lm90_params lm90_params[] = {
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
},
[max6657] = {
+ .flags = LM90_PAUSE_FOR_CONFIG,
.alert_alarms = 0x7c,
.max_convrate = 8,
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
@@ -580,6 +582,38 @@ static inline int lm90_select_remote_channel(struct i2c_client *client,
return 0;
}
+static int lm90_write_convrate(struct i2c_client *client,
+ struct lm90_data *data, int val)
+{
+ int err;
+ int config_orig, config_stop;
+
+ /* Save config and pause conversion */
+ if (data->flags & LM90_PAUSE_FOR_CONFIG) {
+ config_orig = lm90_read_reg(client, LM90_REG_R_CONFIG1);
+ if (config_orig < 0)
+ return config_orig;
+ config_stop = config_orig | 0x40;
+ if (config_orig != config_stop) {
+ err = i2c_smbus_write_byte_data(client,
+ LM90_REG_W_CONFIG1,
+ config_stop);
+ if (err < 0)
+ return err;
+ }
+ }
+
+ /* Set conv rate */
+ err = i2c_smbus_write_byte_data(client, LM90_REG_W_CONVRATE, val);
+
+ /* Revert change to config */
+ if (data->flags & LM90_PAUSE_FOR_CONFIG && config_orig != config_stop)
+ i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1,
+ config_orig);
+
+ return err;
+}
+
/*
* Set conversion rate.
* client->update_lock must be held when calling this function (unless we are
@@ -600,7 +634,7 @@ static int lm90_set_convrate(struct i2c_client *client, struct lm90_data *data,
if (interval >= update_interval * 3 / 4)
break;
- err = i2c_smbus_write_byte_data(client, LM90_REG_W_CONVRATE, i);
+ err = lm90_write_convrate(client, data, i);
data->update_interval = DIV_ROUND_CLOSEST(update_interval, 64);
return err;
}
@@ -1606,8 +1640,7 @@ static void lm90_restore_conf(void *_data)
struct i2c_client *client = data->client;
/* Restore initial configuration */
- i2c_smbus_write_byte_data(client, LM90_REG_W_CONVRATE,
- data->convrate_orig);
+ lm90_write_convrate(client, data, data->convrate_orig);
i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1,
data->config_orig);
}
@@ -1624,12 +1657,13 @@ static int lm90_init_client(struct i2c_client *client, struct lm90_data *data)
/*
* Start the conversions.
*/
- lm90_set_convrate(client, data, 500); /* 500ms; 2Hz conversion rate */
config = lm90_read_reg(client, LM90_REG_R_CONFIG1);
if (config < 0)
return config;
data->config_orig = config;
+ lm90_set_convrate(client, data, 500); /* 500ms; 2Hz conversion rate */
+
/* Check Temperature Range Select */
if (data->kind == adt7461 || data->kind == tmp451) {
if (config & 0x04)
diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c
index 221fd1492057..6df28fe0577d 100644
--- a/drivers/hwmon/max6697.c
+++ b/drivers/hwmon/max6697.c
@@ -47,8 +47,9 @@ static const u8 MAX6697_REG_CRIT[] = {
* Map device tree / platform data register bit map to chip bit map.
* Applies to alert register and over-temperature register.
*/
-#define MAX6697_MAP_BITS(reg) ((((reg) & 0x7e) >> 1) | \
+#define MAX6697_ALERT_MAP_BITS(reg) ((((reg) & 0x7e) >> 1) | \
(((reg) & 0x01) << 6) | ((reg) & 0x80))
+#define MAX6697_OVERT_MAP_BITS(reg) (((reg) >> 1) | (((reg) & 0x01) << 7))
#define MAX6697_REG_STAT(n) (0x44 + (n))
@@ -587,12 +588,12 @@ static int max6697_init_chip(struct max6697_data *data,
return ret;
ret = i2c_smbus_write_byte_data(client, MAX6697_REG_ALERT_MASK,
- MAX6697_MAP_BITS(pdata->alert_mask));
+ MAX6697_ALERT_MAP_BITS(pdata->alert_mask));
if (ret < 0)
return ret;
ret = i2c_smbus_write_byte_data(client, MAX6697_REG_OVERT_MASK,
- MAX6697_MAP_BITS(pdata->over_temperature_mask));
+ MAX6697_OVERT_MAP_BITS(pdata->over_temperature_mask));
if (ret < 0)
return ret;
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 13600fa79e7f..a19cf9052fc6 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -364,6 +364,7 @@ MODULE_DEVICE_TABLE(i2c, adm1275_id);
static int adm1275_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ s32 (*config_read_fn)(const struct i2c_client *client, u8 reg);
u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1];
int config, device_config;
int ret;
@@ -408,11 +409,16 @@ static int adm1275_probe(struct i2c_client *client,
"Device mismatch: Configured %s, detected %s\n",
id->name, mid->name);
- config = i2c_smbus_read_byte_data(client, ADM1275_PMON_CONFIG);
+ if (mid->driver_data == adm1272 || mid->driver_data == adm1278 ||
+ mid->driver_data == adm1293 || mid->driver_data == adm1294)
+ config_read_fn = i2c_smbus_read_word_data;
+ else
+ config_read_fn = i2c_smbus_read_byte_data;
+ config = config_read_fn(client, ADM1275_PMON_CONFIG);
if (config < 0)
return config;
- device_config = i2c_smbus_read_byte_data(client, ADM1275_DEVICE_CONFIG);
+ device_config = config_read_fn(client, ADM1275_DEVICE_CONFIG);
if (device_config < 0)
return device_config;
diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
index 47576c460010..9af5ab52ca31 100644
--- a/drivers/hwmon/pmbus/max34440.c
+++ b/drivers/hwmon/pmbus/max34440.c
@@ -400,7 +400,6 @@ static struct pmbus_driver_info max34440_info[] = {
.func[18] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
- .read_byte_data = max34440_read_byte_data,
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
},
@@ -431,7 +430,6 @@ static struct pmbus_driver_info max34440_info[] = {
.func[15] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.func[16] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
- .read_byte_data = max34440_read_byte_data,
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
},
@@ -467,7 +465,6 @@ static struct pmbus_driver_info max34440_info[] = {
.func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.func[21] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
- .read_byte_data = max34440_read_byte_data,
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
},
diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c
index 91976b6ca300..91bfecdb3f5b 100644
--- a/drivers/hwmon/scmi-hwmon.c
+++ b/drivers/hwmon/scmi-hwmon.c
@@ -99,7 +99,7 @@ static enum hwmon_sensor_types scmi_types[] = {
[ENERGY] = hwmon_energy,
};
-static u32 hwmon_attributes[] = {
+static u32 hwmon_attributes[hwmon_max] = {
[hwmon_chip] = HWMON_C_REGISTER_TZ,
[hwmon_temp] = HWMON_T_INPUT | HWMON_T_LABEL,
[hwmon_in] = HWMON_I_INPUT | HWMON_I_LABEL,
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index e90af39283b1..29dc2eac5b06 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -583,15 +583,14 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
spin_lock_irqsave(&drvdata->spinlock, flags);
- /* There is no point in reading a TMC in HW FIFO mode */
- mode = readl_relaxed(drvdata->base + TMC_MODE);
- if (mode != TMC_MODE_CIRCULAR_BUFFER) {
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
- return -EINVAL;
- }
-
/* Re-enable the TMC if need be */
if (drvdata->mode == CS_MODE_SYSFS) {
+ /* There is no point in reading a TMC in HW FIFO mode */
+ mode = readl_relaxed(drvdata->base + TMC_MODE);
+ if (mode != TMC_MODE_CIRCULAR_BUFFER) {
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ return -EINVAL;
+ }
/*
* The trace run will continue with the same allocated trace
* buffer. As such zero-out the buffer so that we don't end
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 3b684687b5a7..9a3cb07555e3 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -183,6 +183,8 @@ static int tmc_pages_alloc(struct tmc_pages *tmc_pages,
} else {
page = alloc_pages_node(node,
GFP_KERNEL | __GFP_ZERO, 0);
+ if (!page)
+ goto err;
}
paddr = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
if (dma_mapping_error(dev, paddr))
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index a8e750291643..6c723b57dfc0 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -944,15 +944,30 @@ int intel_th_set_output(struct intel_th_device *thdev,
{
struct intel_th_device *hub = to_intel_th_hub(thdev);
struct intel_th_driver *hubdrv = to_intel_th_driver(hub->dev.driver);
+ int ret;
/* In host mode, this is up to the external debugger, do nothing. */
if (hub->host_mode)
return 0;
- if (!hubdrv->set_output)
- return -ENOTSUPP;
+ /*
+ * hub is instantiated together with the source device that
+ * calls here, so guaranteed to be present.
+ */
+ hubdrv = to_intel_th_driver(hub->dev.driver);
+ if (!hubdrv || !try_module_get(hubdrv->driver.owner))
+ return -EINVAL;
+
+ if (!hubdrv->set_output) {
+ ret = -ENOTSUPP;
+ goto out;
+ }
+
+ ret = hubdrv->set_output(hub, master);
- return hubdrv->set_output(hub, master);
+out:
+ module_put(hubdrv->driver.owner);
+ return ret;
}
EXPORT_SYMBOL_GPL(intel_th_set_output);
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
index edc52d75e6bd..5041fe7fee9e 100644
--- a/drivers/hwtracing/intel_th/gth.c
+++ b/drivers/hwtracing/intel_th/gth.c
@@ -477,7 +477,7 @@ static void intel_th_gth_disable(struct intel_th_device *thdev,
output->active = false;
for_each_set_bit(master, gth->output[output->port].master,
- TH_CONFIGURABLE_MASTERS) {
+ TH_CONFIGURABLE_MASTERS + 1) {
gth_master_set(gth, master, -1);
}
spin_unlock(&gth->gth_lock);
@@ -616,7 +616,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
othdev->output.port = -1;
othdev->output.active = false;
gth->output[port].output = NULL;
- for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++)
+ for (master = 0; master < TH_CONFIGURABLE_MASTERS + 1; master++)
if (gth->master[master] == port)
gth->master[master] = -1;
spin_unlock(&gth->gth_lock);
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 8424c8c61355..83fab06ccfeb 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -206,11 +206,21 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
+ /* Tiger Lake PCH-H */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x43a6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
/* Jasper Lake PCH */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
+ /* Jasper Lake CPU */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4e29),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
/* Elkhart Lake CPU */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4529),
.driver_data = (kernel_ulong_t)&intel_th_2x,
@@ -220,6 +230,26 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4b26),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
+ {
+ /* Alder Lake-P */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x51a6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Emmitsburg PCH */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1bcc),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Alder Lake-M */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x54a6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Rocket Lake CPU */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4c19),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
{ 0 },
};
diff --git a/drivers/hwtracing/intel_th/sth.c b/drivers/hwtracing/intel_th/sth.c
index 4b7ae47789d2..61e631358e9d 100644
--- a/drivers/hwtracing/intel_th/sth.c
+++ b/drivers/hwtracing/intel_th/sth.c
@@ -157,9 +157,7 @@ static int sth_stm_link(struct stm_data *stm_data, unsigned int master,
{
struct sth_device *sth = container_of(stm_data, struct sth_device, stm);
- intel_th_set_output(to_intel_th_device(sth->dev), master);
-
- return 0;
+ return intel_th_set_output(to_intel_th_device(sth->dev), master);
}
static int intel_th_sw_init(struct sth_device *sth)
diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c
index 7db42395e131..825c59a77a0e 100644
--- a/drivers/hwtracing/stm/heartbeat.c
+++ b/drivers/hwtracing/stm/heartbeat.c
@@ -64,7 +64,7 @@ static void stm_heartbeat_unlink(struct stm_source_data *data)
static int stm_heartbeat_init(void)
{
- int i, ret = -ENOMEM;
+ int i, ret;
if (nr_devs < 0 || nr_devs > STM_HEARTBEAT_MAX)
return -EINVAL;
@@ -72,8 +72,10 @@ static int stm_heartbeat_init(void)
for (i = 0; i < nr_devs; i++) {
stm_heartbeat[i].data.name =
kasprintf(GFP_KERNEL, "heartbeat.%d", i);
- if (!stm_heartbeat[i].data.name)
+ if (!stm_heartbeat[i].data.name) {
+ ret = -ENOMEM;
goto fail_unregister;
+ }
stm_heartbeat[i].data.nr_chans = 1;
stm_heartbeat[i].data.link = stm_heartbeat_link;
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index 883a290f6a4d..f328de980855 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -50,8 +50,22 @@ static void pca_reset(struct i2c_algo_pca_data *adap)
pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_IPRESET);
pca_outw(adap, I2C_PCA_IND, 0xA5);
pca_outw(adap, I2C_PCA_IND, 0x5A);
+
+ /*
+ * After a reset we need to re-apply any configuration
+ * (calculated in pca_init) to get the bus in a working state.
+ */
+ pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_IMODE);
+ pca_outw(adap, I2C_PCA_IND, adap->bus_settings.mode);
+ pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_ISCLL);
+ pca_outw(adap, I2C_PCA_IND, adap->bus_settings.tlow);
+ pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_ISCLH);
+ pca_outw(adap, I2C_PCA_IND, adap->bus_settings.thi);
+
+ pca_set_con(adap, I2C_PCA_CON_ENSIO);
} else {
adap->reset_chip(adap->data);
+ pca_set_con(adap, I2C_PCA_CON_ENSIO | adap->bus_settings.clock_freq);
}
}
@@ -323,7 +337,8 @@ static int pca_xfer(struct i2c_adapter *i2c_adap,
DEB2("BUS ERROR - SDA Stuck low\n");
pca_reset(adap);
goto out;
- case 0x90: /* Bus error - SCL stuck low */
+ case 0x78: /* Bus error - SCL stuck low (PCA9665) */
+ case 0x90: /* Bus error - SCL stuck low (PCA9564) */
DEB2("BUS ERROR - SCL Stuck low\n");
pca_reset(adap);
goto out;
@@ -431,13 +446,14 @@ static int pca_init(struct i2c_adapter *adap)
" Use the nominal frequency.\n", adap->name);
}
- pca_reset(pca_data);
-
clock = pca_clock(pca_data);
printk(KERN_INFO "%s: Clock frequency is %dkHz\n",
adap->name, freqs[clock]);
- pca_set_con(pca_data, I2C_PCA_CON_ENSIO | clock);
+ /* Store settings as these will be needed when the PCA chip is reset */
+ pca_data->bus_settings.clock_freq = clock;
+
+ pca_reset(pca_data);
} else {
int clock;
int mode;
@@ -504,19 +520,15 @@ static int pca_init(struct i2c_adapter *adap)
thi = tlow * min_thi / min_tlow;
}
+ /* Store settings as these will be needed when the PCA chip is reset */
+ pca_data->bus_settings.mode = mode;
+ pca_data->bus_settings.tlow = tlow;
+ pca_data->bus_settings.thi = thi;
+
pca_reset(pca_data);
printk(KERN_INFO
"%s: Clock frequency is %dHz\n", adap->name, clock * 100);
-
- pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_IMODE);
- pca_outw(pca_data, I2C_PCA_IND, mode);
- pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_ISCLL);
- pca_outw(pca_data, I2C_PCA_IND, tlow);
- pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_ISCLH);
- pca_outw(pca_data, I2C_PCA_IND, thi);
-
- pca_set_con(pca_data, I2C_PCA_CON_ENSIO);
}
udelay(500); /* 500 us for oscillator to stabilise */
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index ee6dd1b84fac..017aec34a238 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -1117,6 +1117,7 @@ config I2C_RCAR
tristate "Renesas R-Car I2C Controller"
depends on ARCH_RENESAS || COMPILE_TEST
select I2C_SLAVE
+ select RESET_CONTROLLER if ARCH_RCAR_GEN3
help
If you say yes to this option, support will be included for the
R-Car I2C controller.
diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c
index a1cdcfc74acf..1d59eede537b 100644
--- a/drivers/i2c/busses/i2c-altera.c
+++ b/drivers/i2c/busses/i2c-altera.c
@@ -81,6 +81,7 @@
* @isr_mask: cached copy of local ISR enables.
* @isr_status: cached copy of local ISR status.
* @lock: spinlock for IRQ synchronization.
+ * @isr_mutex: mutex for IRQ thread.
*/
struct altr_i2c_dev {
void __iomem *base;
@@ -97,6 +98,7 @@ struct altr_i2c_dev {
u32 isr_mask;
u32 isr_status;
spinlock_t lock; /* IRQ synchronization */
+ struct mutex isr_mutex;
};
static void
@@ -256,10 +258,11 @@ static irqreturn_t altr_i2c_isr(int irq, void *_dev)
struct altr_i2c_dev *idev = _dev;
u32 status = idev->isr_status;
+ mutex_lock(&idev->isr_mutex);
if (!idev->msg) {
dev_warn(idev->dev, "unexpected interrupt\n");
altr_i2c_int_clear(idev, ALTR_I2C_ALL_IRQ);
- return IRQ_HANDLED;
+ goto out;
}
read = (idev->msg->flags & I2C_M_RD) != 0;
@@ -312,6 +315,8 @@ static irqreturn_t altr_i2c_isr(int irq, void *_dev)
complete(&idev->msg_complete);
dev_dbg(idev->dev, "Message Complete\n");
}
+out:
+ mutex_unlock(&idev->isr_mutex);
return IRQ_HANDLED;
}
@@ -323,6 +328,7 @@ static int altr_i2c_xfer_msg(struct altr_i2c_dev *idev, struct i2c_msg *msg)
u32 value;
u8 addr = i2c_8bit_addr_from_msg(msg);
+ mutex_lock(&idev->isr_mutex);
idev->msg = msg;
idev->msg_len = msg->len;
idev->buf = msg->buf;
@@ -347,6 +353,7 @@ static int altr_i2c_xfer_msg(struct altr_i2c_dev *idev, struct i2c_msg *msg)
altr_i2c_int_enable(idev, imask, true);
altr_i2c_fill_tx_fifo(idev);
}
+ mutex_unlock(&idev->isr_mutex);
time_left = wait_for_completion_timeout(&idev->msg_complete,
ALTR_I2C_XFER_TIMEOUT);
@@ -395,7 +402,6 @@ static int altr_i2c_probe(struct platform_device *pdev)
struct altr_i2c_dev *idev = NULL;
struct resource *res;
int irq, ret;
- u32 val;
idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL);
if (!idev)
@@ -421,18 +427,19 @@ static int altr_i2c_probe(struct platform_device *pdev)
idev->dev = &pdev->dev;
init_completion(&idev->msg_complete);
spin_lock_init(&idev->lock);
+ mutex_init(&idev->isr_mutex);
- val = device_property_read_u32(idev->dev, "fifo-size",
+ ret = device_property_read_u32(idev->dev, "fifo-size",
&idev->fifo_size);
- if (val) {
+ if (ret) {
dev_err(&pdev->dev, "FIFO size set to default of %d\n",
ALTR_I2C_DFLT_FIFO_SZ);
idev->fifo_size = ALTR_I2C_DFLT_FIFO_SZ;
}
- val = device_property_read_u32(idev->dev, "clock-frequency",
+ ret = device_property_read_u32(idev->dev, "clock-frequency",
&idev->bus_clk_rate);
- if (val) {
+ if (ret) {
dev_err(&pdev->dev, "Default to 100kHz\n");
idev->bus_clk_rate = 100000; /* default clock rate */
}
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 826d32049996..2086a96307bf 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -318,7 +318,7 @@ static int brcmstb_send_i2c_cmd(struct brcmstb_i2c_dev *dev,
goto cmd_out;
}
- if ((CMD_RD || CMD_WR) &&
+ if ((cmd == CMD_RD || cmd == CMD_WR) &&
bsc_readl(dev, iic_enable) & BSC_IIC_EN_NOACK_MASK) {
rc = -EREMOTEIO;
dev_dbg(dev->device, "controller received NOACK intr for %s\n",
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index d917cefc5a19..c5475bb4fae6 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -382,10 +382,8 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
* Check for the message size against FIFO depth and set the
* 'hold bus' bit if it is greater than FIFO depth.
*/
- if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
+ if (id->recv_count > CDNS_I2C_FIFO_DEPTH)
ctrl_reg |= CDNS_I2C_CR_HOLD;
- else
- ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
@@ -442,11 +440,8 @@ static void cdns_i2c_msend(struct cdns_i2c *id)
* Check for the message size against FIFO depth and set the
* 'hold bus' bit if it is greater than FIFO depth.
*/
- if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
+ if (id->send_count > CDNS_I2C_FIFO_DEPTH)
ctrl_reg |= CDNS_I2C_CR_HOLD;
- else
- ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
-
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
/* Clear the interrupts in interrupt status register. */
@@ -911,7 +906,10 @@ static int cdns_i2c_probe(struct platform_device *pdev)
if (IS_ERR(id->membase))
return PTR_ERR(id->membase);
- id->irq = platform_get_irq(pdev, 0);
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
+ id->irq = ret;
id->adap.owner = THIS_MODULE;
id->adap.dev.of_node = pdev->dev.of_node;
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 8a8ca945561b..7eba874a981d 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -74,6 +74,9 @@ struct i2c_ram {
char res1[4]; /* Reserved */
ushort rpbase; /* Relocation pointer */
char res2[2]; /* Reserved */
+ /* The following elements are only for CPM2 */
+ char res3[4]; /* Reserved */
+ uint sdmatmp; /* Internal */
};
#define I2COM_START 0x80
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 835d54ac2971..44025507b8f7 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -177,7 +177,6 @@ static wait_queue_head_t pch_event;
static DEFINE_MUTEX(pch_mutex);
/* Definition for ML7213 by LAPIS Semiconductor */
-#define PCI_VENDOR_ID_ROHM 0x10DB
#define PCI_DEVICE_ID_ML7213_I2C 0x802D
#define PCI_DEVICE_ID_ML7223_I2C 0x8010
#define PCI_DEVICE_ID_ML7831_I2C 0x8817
@@ -189,6 +188,7 @@ static const struct pci_device_id pch_pcidev_id[] = {
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_I2C), 1, },
{0,}
};
+MODULE_DEVICE_TABLE(pci, pch_pcidev_id);
static irqreturn_t pch_i2c_handler(int irq, void *pData);
diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
index 959d4912ec0d..0230a13a6ab7 100644
--- a/drivers/i2c/busses/i2c-emev2.c
+++ b/drivers/i2c/busses/i2c-emev2.c
@@ -397,7 +397,10 @@ static int em_i2c_probe(struct platform_device *pdev)
em_i2c_reset(&priv->adap);
- priv->irq = platform_get_irq(pdev, 0);
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto err_clk;
+ priv->irq = ret;
ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0,
"em_i2c", priv);
if (ret)
diff --git a/drivers/i2c/busses/i2c-fsi.c b/drivers/i2c/busses/i2c-fsi.c
index 1e2be2219a60..46aef609fb70 100644
--- a/drivers/i2c/busses/i2c-fsi.c
+++ b/drivers/i2c/busses/i2c-fsi.c
@@ -98,7 +98,7 @@
#define I2C_STAT_DAT_REQ BIT(25)
#define I2C_STAT_CMD_COMP BIT(24)
#define I2C_STAT_STOP_ERR BIT(23)
-#define I2C_STAT_MAX_PORT GENMASK(19, 16)
+#define I2C_STAT_MAX_PORT GENMASK(22, 16)
#define I2C_STAT_ANY_INT BIT(15)
#define I2C_STAT_SCL_IN BIT(11)
#define I2C_STAT_SDA_IN BIT(10)
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 679c6c41f64b..efafd028c5d1 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -384,11 +384,9 @@ static int i801_check_post(struct i801_priv *priv, int status)
dev_err(&priv->pci_dev->dev, "Transaction timeout\n");
/* try to stop the current command */
dev_dbg(&priv->pci_dev->dev, "Terminating the current operation\n");
- outb_p(inb_p(SMBHSTCNT(priv)) | SMBHSTCNT_KILL,
- SMBHSTCNT(priv));
+ outb_p(SMBHSTCNT_KILL, SMBHSTCNT(priv));
usleep_range(1000, 2000);
- outb_p(inb_p(SMBHSTCNT(priv)) & (~SMBHSTCNT_KILL),
- SMBHSTCNT(priv));
+ outb_p(0, SMBHSTCNT(priv));
/* Check if it worked */
status = inb_p(SMBHSTSTS(priv));
@@ -1506,6 +1504,16 @@ static inline int i801_acpi_probe(struct i801_priv *priv) { return 0; }
static inline void i801_acpi_remove(struct i801_priv *priv) { }
#endif
+static unsigned char i801_setup_hstcfg(struct i801_priv *priv)
+{
+ unsigned char hstcfg = priv->original_hstcfg;
+
+ hstcfg &= ~SMBHSTCFG_I2C_EN; /* SMBus timing */
+ hstcfg |= SMBHSTCFG_HST_EN;
+ pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hstcfg);
+ return hstcfg;
+}
+
static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
unsigned char temp;
@@ -1611,14 +1619,10 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
return err;
}
- pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &temp);
- priv->original_hstcfg = temp;
- temp &= ~SMBHSTCFG_I2C_EN; /* SMBus timing */
- if (!(temp & SMBHSTCFG_HST_EN)) {
+ pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &priv->original_hstcfg);
+ temp = i801_setup_hstcfg(priv);
+ if (!(priv->original_hstcfg & SMBHSTCFG_HST_EN))
dev_info(&dev->dev, "Enabling SMBus device\n");
- temp |= SMBHSTCFG_HST_EN;
- }
- pci_write_config_byte(priv->pci_dev, SMBHSTCFG, temp);
if (temp & SMBHSTCFG_SMB_SMI_EN) {
dev_dbg(&dev->dev, "SMBus using interrupt SMI#\n");
@@ -1692,6 +1696,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
pci_set_drvdata(dev, priv);
+ dev_pm_set_driver_flags(&dev->dev, DPM_FLAG_NEVER_SKIP);
pm_runtime_set_autosuspend_delay(&dev->dev, 1000);
pm_runtime_use_autosuspend(&dev->dev);
pm_runtime_put_autosuspend(&dev->dev);
@@ -1745,6 +1750,7 @@ static int i801_resume(struct device *dev)
struct pci_dev *pci_dev = to_pci_dev(dev);
struct i801_priv *priv = pci_get_drvdata(pci_dev);
+ i801_setup_hstcfg(priv);
i801_enable_host_notify(&priv->adapter);
return 0;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index d4b72e4ffd71..83c246e30399 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -404,6 +404,19 @@ static void i2c_imx_dma_free(struct imx_i2c_struct *i2c_imx)
dma->chan_using = NULL;
}
+static void i2c_imx_clear_irq(struct imx_i2c_struct *i2c_imx, unsigned int bits)
+{
+ unsigned int temp;
+
+ /*
+ * i2sr_clr_opcode is the value to clear all interrupts. Here we want to
+ * clear only <bits>, so we write ~i2sr_clr_opcode with just <bits>
+ * toggled. This is required because i.MX needs W0C and Vybrid uses W1C.
+ */
+ temp = ~i2c_imx->hwdata->i2sr_clr_opcode ^ bits;
+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR);
+}
+
static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy)
{
unsigned long orig_jiffies = jiffies;
@@ -416,8 +429,7 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy)
/* check for arbitration lost */
if (temp & I2SR_IAL) {
- temp &= ~I2SR_IAL;
- imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR);
+ i2c_imx_clear_irq(i2c_imx, I2SR_IAL);
return -EAGAIN;
}
@@ -448,6 +460,16 @@ static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx)
dev_dbg(&i2c_imx->adapter.dev, "<%s> Timeout\n", __func__);
return -ETIMEDOUT;
}
+
+ /* check for arbitration lost */
+ if (i2c_imx->i2csr & I2SR_IAL) {
+ dev_dbg(&i2c_imx->adapter.dev, "<%s> Arbitration lost\n", __func__);
+ i2c_imx_clear_irq(i2c_imx, I2SR_IAL);
+
+ i2c_imx->i2csr = 0;
+ return -EAGAIN;
+ }
+
dev_dbg(&i2c_imx->adapter.dev, "<%s> TRX complete\n", __func__);
i2c_imx->i2csr = 0;
return 0;
@@ -557,6 +579,8 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx)
/* Stop I2C transaction */
dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
+ if (!(temp & I2CR_MSTA))
+ i2c_imx->stopped = 1;
temp &= ~(I2CR_MSTA | I2CR_MTX);
if (i2c_imx->dma)
temp &= ~I2CR_DMAEN;
@@ -587,9 +611,7 @@ static irqreturn_t i2c_imx_isr(int irq, void *dev_id)
if (temp & I2SR_IIF) {
/* save status register */
i2c_imx->i2csr = temp;
- temp &= ~I2SR_IIF;
- temp |= (i2c_imx->hwdata->i2sr_clr_opcode & I2SR_IIF);
- imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR);
+ i2c_imx_clear_irq(i2c_imx, I2SR_IIF);
wake_up(&i2c_imx->queue);
return IRQ_HANDLED;
}
@@ -722,9 +744,12 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
*/
dev_dbg(dev, "<%s> clear MSTA\n", __func__);
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
+ if (!(temp & I2CR_MSTA))
+ i2c_imx->stopped = 1;
temp &= ~(I2CR_MSTA | I2CR_MTX);
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
- i2c_imx_bus_busy(i2c_imx, 0);
+ if (!i2c_imx->stopped)
+ i2c_imx_bus_busy(i2c_imx, 0);
} else {
/*
* For i2c master receiver repeat restart operation like:
@@ -847,9 +872,12 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
dev_dbg(&i2c_imx->adapter.dev,
"<%s> clear MSTA\n", __func__);
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
+ if (!(temp & I2CR_MSTA))
+ i2c_imx->stopped = 1;
temp &= ~(I2CR_MSTA | I2CR_MTX);
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
- i2c_imx_bus_busy(i2c_imx, 0);
+ if (!i2c_imx->stopped)
+ i2c_imx_bus_busy(i2c_imx, 0);
} else {
/*
* For i2c master receiver repeat restart operation like:
@@ -1101,14 +1129,6 @@ static int i2c_imx_probe(struct platform_device *pdev)
return ret;
}
- /* Request IRQ */
- ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, IRQF_SHARED,
- pdev->name, i2c_imx);
- if (ret) {
- dev_err(&pdev->dev, "can't claim irq %d\n", irq);
- goto clk_disable;
- }
-
/* Init queue */
init_waitqueue_head(&i2c_imx->queue);
@@ -1127,6 +1147,14 @@ static int i2c_imx_probe(struct platform_device *pdev)
if (ret < 0)
goto rpm_disable;
+ /* Request IRQ */
+ ret = request_threaded_irq(irq, i2c_imx_isr, NULL, IRQF_SHARED,
+ pdev->name, i2c_imx);
+ if (ret) {
+ dev_err(&pdev->dev, "can't claim irq %d\n", irq);
+ goto rpm_disable;
+ }
+
/* Set up clock divider */
i2c_imx->bitrate = IMX_I2C_BIT_RATE;
ret = of_property_read_u32(pdev->dev.of_node,
@@ -1169,13 +1197,12 @@ static int i2c_imx_probe(struct platform_device *pdev)
clk_notifier_unregister:
clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
+ free_irq(irq, i2c_imx);
rpm_disable:
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
-
-clk_disable:
clk_disable_unprepare(i2c_imx->clk);
return ret;
}
@@ -1183,7 +1210,7 @@ clk_disable:
static int i2c_imx_remove(struct platform_device *pdev)
{
struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev);
- int ret;
+ int irq, ret;
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0)
@@ -1203,6 +1230,9 @@ static int i2c_imx_remove(struct platform_device *pdev)
imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
+ irq = platform_get_irq(pdev, 0);
+ if (irq >= 0)
+ free_irq(irq, i2c_imx);
clk_disable_unprepare(i2c_imx->clk);
pm_runtime_put_noidle(&pdev->dev);
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index 41ca9ff7b5da..4dd800c0db14 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -760,7 +760,10 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0);
- i2c->irq = platform_get_irq(pdev, 0);
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto err;
+ i2c->irq = ret;
ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0,
dev_name(&pdev->dev), i2c);
if (ret)
diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c
index 90f5d0407d73..f48e637f1a3e 100644
--- a/drivers/i2c/busses/i2c-meson.c
+++ b/drivers/i2c/busses/i2c-meson.c
@@ -8,6 +8,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/i2c.h>
@@ -35,12 +36,17 @@
#define REG_CTRL_ACK_IGNORE BIT(1)
#define REG_CTRL_STATUS BIT(2)
#define REG_CTRL_ERROR BIT(3)
-#define REG_CTRL_CLKDIV_SHIFT 12
-#define REG_CTRL_CLKDIV_MASK GENMASK(21, 12)
-#define REG_CTRL_CLKDIVEXT_SHIFT 28
-#define REG_CTRL_CLKDIVEXT_MASK GENMASK(29, 28)
+#define REG_CTRL_CLKDIV GENMASK(21, 12)
+#define REG_CTRL_CLKDIVEXT GENMASK(29, 28)
+
+#define REG_SLV_ADDR GENMASK(7, 0)
+#define REG_SLV_SDA_FILTER GENMASK(10, 8)
+#define REG_SLV_SCL_FILTER GENMASK(13, 11)
+#define REG_SLV_SCL_LOW GENMASK(27, 16)
+#define REG_SLV_SCL_LOW_EN BIT(28)
#define I2C_TIMEOUT_MS 500
+#define FILTER_DELAY 15
enum {
TOKEN_END = 0,
@@ -135,19 +141,24 @@ static void meson_i2c_set_clk_div(struct meson_i2c *i2c, unsigned int freq)
unsigned long clk_rate = clk_get_rate(i2c->clk);
unsigned int div;
- div = DIV_ROUND_UP(clk_rate, freq * i2c->data->div_factor);
+ div = DIV_ROUND_UP(clk_rate, freq);
+ div -= FILTER_DELAY;
+ div = DIV_ROUND_UP(div, i2c->data->div_factor);
/* clock divider has 12 bits */
- if (div >= (1 << 12)) {
+ if (div > GENMASK(11, 0)) {
dev_err(i2c->dev, "requested bus frequency too low\n");
- div = (1 << 12) - 1;
+ div = GENMASK(11, 0);
}
- meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIV_MASK,
- (div & GENMASK(9, 0)) << REG_CTRL_CLKDIV_SHIFT);
+ meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIV,
+ FIELD_PREP(REG_CTRL_CLKDIV, div & GENMASK(9, 0)));
+
+ meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIVEXT,
+ FIELD_PREP(REG_CTRL_CLKDIVEXT, div >> 10));
- meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIVEXT_MASK,
- (div >> 10) << REG_CTRL_CLKDIVEXT_SHIFT);
+ /* Disable HIGH/LOW mode */
+ meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_SCL_LOW_EN, 0);
dev_dbg(i2c->dev, "%s: clk %lu, freq %u, div %u\n", __func__,
clk_rate, freq, div);
@@ -276,7 +287,10 @@ static void meson_i2c_do_start(struct meson_i2c *i2c, struct i2c_msg *msg)
token = (msg->flags & I2C_M_RD) ? TOKEN_SLAVE_ADDR_READ :
TOKEN_SLAVE_ADDR_WRITE;
- writel(msg->addr << 1, i2c->regs + REG_SLAVE_ADDR);
+
+ meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_ADDR,
+ FIELD_PREP(REG_SLV_ADDR, msg->addr << 1));
+
meson_i2c_add_token(i2c, TOKEN_START);
meson_i2c_add_token(i2c, token);
}
@@ -435,6 +449,10 @@ static int meson_i2c_probe(struct platform_device *pdev)
return ret;
}
+ /* Disable filtering */
+ meson_i2c_set_mask(i2c, REG_SLAVE_ADDR,
+ REG_SLV_SDA_FILTER | REG_SLV_SCL_FILTER, 0);
+
meson_i2c_set_clk_div(i2c, timings.bus_freq_hz);
return 0;
diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c
index 2fd717d8dd30..71d7bae2cbca 100644
--- a/drivers/i2c/busses/i2c-mlxcpld.c
+++ b/drivers/i2c/busses/i2c-mlxcpld.c
@@ -337,9 +337,9 @@ static int mlxcpld_i2c_wait_for_tc(struct mlxcpld_i2c_priv *priv)
if (priv->smbus_block && (val & MLXCPLD_I2C_SMBUS_BLK_BIT)) {
mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_NUM_DAT_REG,
&datalen, 1);
- if (unlikely(datalen > (I2C_SMBUS_BLOCK_MAX + 1))) {
+ if (unlikely(datalen > I2C_SMBUS_BLOCK_MAX)) {
dev_err(priv->dev, "Incorrect smbus block read message len\n");
- return -E2BIG;
+ return -EPROTO;
}
} else {
datalen = priv->xfer.data_len;
diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c
index d9607905dc2f..845eda70b8ca 100644
--- a/drivers/i2c/busses/i2c-octeon-core.c
+++ b/drivers/i2c/busses/i2c-octeon-core.c
@@ -347,7 +347,7 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
if (result)
return result;
if (recv_len && i == 0) {
- if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
+ if (data[i] > I2C_SMBUS_BLOCK_MAX)
return -EPROTO;
length += data[i];
}
diff --git a/drivers/i2c/busses/i2c-owl.c b/drivers/i2c/busses/i2c-owl.c
index 96b4572e6d9c..cf3fcf35fe3d 100644
--- a/drivers/i2c/busses/i2c-owl.c
+++ b/drivers/i2c/busses/i2c-owl.c
@@ -179,6 +179,9 @@ static irqreturn_t owl_i2c_interrupt(int irq, void *_dev)
fifostat = readl(i2c_dev->base + OWL_I2C_REG_FIFOSTAT);
if (fifostat & OWL_I2C_FIFOSTAT_RNB) {
i2c_dev->err = -ENXIO;
+ /* Clear NACK error bit by writing "1" */
+ owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_FIFOSTAT,
+ OWL_I2C_FIFOSTAT_RNB, true);
goto stop;
}
@@ -186,6 +189,9 @@ static irqreturn_t owl_i2c_interrupt(int irq, void *_dev)
stat = readl(i2c_dev->base + OWL_I2C_REG_STAT);
if (stat & OWL_I2C_STAT_BEB) {
i2c_dev->err = -EIO;
+ /* Clear BUS error bit by writing "1" */
+ owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_STAT,
+ OWL_I2C_STAT_BEB, true);
goto stop;
}
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 9ff3371ec385..f1c79f9b3919 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -958,7 +958,8 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
}
if (dev->vendor == PCI_VENDOR_ID_AMD &&
- dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) {
+ (dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS ||
+ dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS)) {
retval = piix4_setup_sb800(dev, id, 1);
}
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index fbf91d383b40..c10ae4778d35 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -315,11 +315,10 @@ static void i2c_pxa_scream_blue_murder(struct pxa_i2c *i2c, const char *why)
dev_err(dev, "IBMR: %08x IDBR: %08x ICR: %08x ISR: %08x\n",
readl(_IBMR(i2c)), readl(_IDBR(i2c)), readl(_ICR(i2c)),
readl(_ISR(i2c)));
- dev_dbg(dev, "log: ");
+ dev_err(dev, "log:");
for (i = 0; i < i2c->irqlogidx; i++)
- pr_debug("[%08x:%08x] ", i2c->isrlog[i], i2c->icrlog[i]);
-
- pr_debug("\n");
+ pr_cont(" [%03x:%05x]", i2c->isrlog[i], i2c->icrlog[i]);
+ pr_cont("\n");
}
#else /* ifdef DEBUG */
@@ -709,11 +708,9 @@ static inline void i2c_pxa_stop_message(struct pxa_i2c *i2c)
{
u32 icr;
- /*
- * Clear the STOP and ACK flags
- */
+ /* Clear the START, STOP, ACK, TB and MA flags */
icr = readl(_ICR(i2c));
- icr &= ~(ICR_STOP | ICR_ACKNAK);
+ icr &= ~(ICR_START | ICR_STOP | ICR_ACKNAK | ICR_TB | ICR_MA);
writel(icr, _ICR(i2c));
}
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index e09cd0775ae9..3417f7dffa94 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -806,7 +806,8 @@ static int qup_i2c_bam_schedule_desc(struct qup_i2c_dev *qup)
if (ret || qup->bus_err || qup->qup_err) {
reinit_completion(&qup->xfer);
- if (qup_i2c_change_state(qup, QUP_RUN_STATE)) {
+ ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+ if (ret) {
dev_err(qup->dev, "change to run state timed out");
goto desc_err;
}
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 2c29f901d309..3ea2ceec676c 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -117,6 +117,7 @@ enum rcar_i2c_type {
};
struct rcar_i2c_priv {
+ u32 flags;
void __iomem *io;
struct i2c_adapter adap;
struct i2c_msg *msg;
@@ -127,7 +128,6 @@ struct rcar_i2c_priv {
int pos;
u32 icccr;
- u32 flags;
u8 recovery_icmcr; /* protected by adapter lock */
enum rcar_i2c_type devtype;
struct i2c_client *slave;
@@ -587,13 +587,15 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
rcar_i2c_write(priv, ICSIER, SDR | SSR | SAR);
}
- rcar_i2c_write(priv, ICSSR, ~SAR & 0xff);
+ /* Clear SSR, too, because of old STOPs to other clients than us */
+ rcar_i2c_write(priv, ICSSR, ~(SAR | SSR) & 0xff);
}
/* master sent stop */
if (ssr_filtered & SSR) {
i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value);
- rcar_i2c_write(priv, ICSIER, SAR | SSR);
+ rcar_i2c_write(priv, ICSCR, SIE | SDBS); /* clear our NACK */
+ rcar_i2c_write(priv, ICSIER, SAR);
rcar_i2c_write(priv, ICSSR, ~SSR & 0xff);
}
@@ -848,7 +850,7 @@ static int rcar_reg_slave(struct i2c_client *slave)
priv->slave = slave;
rcar_i2c_write(priv, ICSAR, slave->addr);
rcar_i2c_write(priv, ICSSR, 0);
- rcar_i2c_write(priv, ICSIER, SAR | SSR);
+ rcar_i2c_write(priv, ICSIER, SAR);
rcar_i2c_write(priv, ICSCR, SIE | SDBS);
return 0;
@@ -860,11 +862,14 @@ static int rcar_unreg_slave(struct i2c_client *slave)
WARN_ON(!priv->slave);
- /* disable irqs and ensure none is running before clearing ptr */
+ /* ensure no irq is running before clearing ptr */
+ disable_irq(priv->irq);
rcar_i2c_write(priv, ICSIER, 0);
- rcar_i2c_write(priv, ICSCR, 0);
+ rcar_i2c_write(priv, ICSSR, 0);
+ enable_irq(priv->irq);
+ rcar_i2c_write(priv, ICSCR, SDBS);
+ rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
- synchronize_irq(priv->irq);
priv->slave = NULL;
pm_runtime_put(rcar_i2c_priv_to_dev(priv));
@@ -966,6 +971,8 @@ static int rcar_i2c_probe(struct platform_device *pdev)
if (ret < 0)
goto out_pm_put;
+ rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
+
if (priv->devtype == I2C_RCAR_GEN3) {
priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (!IS_ERR(priv->rstc)) {
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 2f2e28d60ef5..d3603e261a84 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -493,7 +493,10 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
* forces us to send a new START
* when we change direction
*/
+ dev_dbg(i2c->dev,
+ "missing START before write->read\n");
s3c24xx_i2c_stop(i2c, -EINVAL);
+ break;
}
goto retry_write;
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index c2005c789d2b..319d1fa617c8 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -471,7 +471,10 @@ static int sh7760_i2c_probe(struct platform_device *pdev)
goto out2;
}
- id->irq = platform_get_irq(pdev, 0);
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto out3;
+ id->irq = ret;
id->adap.nr = pdev->id;
id->adap.algo = &sh7760_i2c_algo;
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
index a94e724f51dc..bb1478e781c4 100644
--- a/drivers/i2c/busses/i2c-sprd.c
+++ b/drivers/i2c/busses/i2c-sprd.c
@@ -71,6 +71,8 @@
/* timeout (ms) for pm runtime autosuspend */
#define SPRD_I2C_PM_TIMEOUT 1000
+/* timeout (ms) for transfer message */
+#define I2C_XFER_TIMEOUT 1000
/* SPRD i2c data structure */
struct sprd_i2c {
@@ -244,6 +246,7 @@ static int sprd_i2c_handle_msg(struct i2c_adapter *i2c_adap,
struct i2c_msg *msg, bool is_last_msg)
{
struct sprd_i2c *i2c_dev = i2c_adap->algo_data;
+ unsigned long time_left;
i2c_dev->msg = msg;
i2c_dev->buf = msg->buf;
@@ -273,7 +276,10 @@ static int sprd_i2c_handle_msg(struct i2c_adapter *i2c_adap,
sprd_i2c_opt_start(i2c_dev);
- wait_for_completion(&i2c_dev->complete);
+ time_left = wait_for_completion_timeout(&i2c_dev->complete,
+ msecs_to_jiffies(I2C_XFER_TIMEOUT));
+ if (!time_left)
+ return -ETIMEDOUT;
return i2c_dev->err;
}
diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c
index 9e62f893958a..81158ae8bfe3 100644
--- a/drivers/i2c/busses/i2c-st.c
+++ b/drivers/i2c/busses/i2c-st.c
@@ -437,6 +437,7 @@ static void st_i2c_wr_fill_tx_fifo(struct st_i2c_dev *i2c_dev)
/**
* st_i2c_rd_fill_tx_fifo() - Fill the Tx FIFO in read mode
* @i2c_dev: Controller's private data
+ * @max: Maximum amount of data to fill into the Tx FIFO
*
* This functions fills the Tx FIFO with fixed pattern when
* in read mode to trigger clock.
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index eb7e533b0dd4..6feafebf85fe 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -49,6 +49,8 @@
#define STM32F7_I2C_CR1_RXDMAEN BIT(15)
#define STM32F7_I2C_CR1_TXDMAEN BIT(14)
#define STM32F7_I2C_CR1_ANFOFF BIT(12)
+#define STM32F7_I2C_CR1_DNF_MASK GENMASK(11, 8)
+#define STM32F7_I2C_CR1_DNF(n) (((n) & 0xf) << 8)
#define STM32F7_I2C_CR1_ERRIE BIT(7)
#define STM32F7_I2C_CR1_TCIE BIT(6)
#define STM32F7_I2C_CR1_STOPIE BIT(5)
@@ -147,7 +149,7 @@
#define STM32F7_I2C_MAX_SLAVE 0x2
#define STM32F7_I2C_DNF_DEFAULT 0
-#define STM32F7_I2C_DNF_MAX 16
+#define STM32F7_I2C_DNF_MAX 15
#define STM32F7_I2C_ANALOG_FILTER_ENABLE 1
#define STM32F7_I2C_ANALOG_FILTER_DELAY_MIN 50 /* ns */
@@ -645,6 +647,13 @@ static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev)
else
stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
STM32F7_I2C_CR1_ANFOFF);
+
+ /* Program the Digital Filter */
+ stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1,
+ STM32F7_I2C_CR1_DNF_MASK);
+ stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
+ STM32F7_I2C_CR1_DNF(i2c_dev->setup.dnf));
+
stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
STM32F7_I2C_CR1_PE);
}
diff --git a/drivers/i2c/busses/i2c-tegra-bpmp.c b/drivers/i2c/busses/i2c-tegra-bpmp.c
index f6cd35d0a2ac..240bd1e90892 100644
--- a/drivers/i2c/busses/i2c-tegra-bpmp.c
+++ b/drivers/i2c/busses/i2c-tegra-bpmp.c
@@ -91,7 +91,7 @@ static int tegra_bpmp_xlate_flags(u16 flags, u16 *out)
flags &= ~I2C_M_RECV_LEN;
}
- return (flags != 0) ? -EINVAL : 0;
+ return 0;
}
/**
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 47d196c026ba..af06198851f1 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -145,8 +145,8 @@ enum msg_end_type {
* @has_continue_xfer_support: Continue transfer supports.
* @has_per_pkt_xfer_complete_irq: Has enable/disable capability for transfer
* complete interrupt per packet basis.
- * @has_single_clk_source: The i2c controller has single clock source. Tegra30
- * and earlier Socs has two clock sources i.e. div-clk and
+ * @has_single_clk_source: The I2C controller has single clock source. Tegra30
+ * and earlier SoCs have two clock sources i.e. div-clk and
* fast-clk.
* @has_config_load_reg: Has the config load register to load the new
* configuration.
@@ -154,8 +154,19 @@ enum msg_end_type {
* @clk_divisor_std_fast_mode: Clock divisor in standard/fast mode. It is
* applicable if there is no fast clock source i.e. single clock
* source.
+ * @clk_divisor_fast_plus_mode: Clock divisor in fast mode plus. It is
+ * applicable if there is no fast clock source (i.e. single
+ * clock source).
+ * @has_multi_master_mode: The I2C controller supports running in single-master
+ * or multi-master mode.
+ * @has_slcg_override_reg: The I2C controller supports a register that
+ * overrides the second level clock gating.
+ * @has_mst_fifo: The I2C controller contains the new MST FIFO interface that
+ * provides additional features and allows for longer messages to
+ * be transferred in one go.
+ * @quirks: i2c adapter quirks for limiting write/read transfer size and not
+ * allowing 0 length transfers.
*/
-
struct tegra_i2c_hw_feature {
bool has_continue_xfer_support;
bool has_per_pkt_xfer_complete_irq;
@@ -167,25 +178,31 @@ struct tegra_i2c_hw_feature {
bool has_multi_master_mode;
bool has_slcg_override_reg;
bool has_mst_fifo;
+ const struct i2c_adapter_quirks *quirks;
};
/**
- * struct tegra_i2c_dev - per device i2c context
+ * struct tegra_i2c_dev - per device I2C context
* @dev: device reference for power management
- * @hw: Tegra i2c hw feature.
- * @adapter: core i2c layer adapter information
- * @div_clk: clock reference for div clock of i2c controller.
- * @fast_clk: clock reference for fast clock of i2c controller.
+ * @hw: Tegra I2C HW feature
+ * @adapter: core I2C layer adapter information
+ * @div_clk: clock reference for div clock of I2C controller
+ * @fast_clk: clock reference for fast clock of I2C controller
+ * @rst: reset control for the I2C controller
* @base: ioremapped registers cookie
- * @cont_id: i2c controller id, used for for packet header
- * @irq: irq number of transfer complete interrupt
- * @is_dvc: identifies the DVC i2c controller, has a different register layout
+ * @cont_id: I2C controller ID, used for packet header
+ * @irq: IRQ number of transfer complete interrupt
+ * @irq_disabled: used to track whether or not the interrupt is enabled
+ * @is_dvc: identifies the DVC I2C controller, has a different register layout
* @msg_complete: transfer completion notifier
* @msg_err: error code for completed message
* @msg_buf: pointer to current message data
* @msg_buf_remaining: size of unsent data in the message buffer
* @msg_read: identifies read transfers
- * @bus_clk_rate: current i2c bus clock rate
+ * @bus_clk_rate: current I2C bus clock rate
+ * @clk_divisor_non_hs_mode: clock divider for non-high-speed modes
+ * @is_multimaster_mode: track if I2C controller is in multi-master mode
+ * @xfer_lock: lock to serialize transfer submission and processing
*/
struct tegra_i2c_dev {
struct device *dev;
@@ -833,6 +850,10 @@ static const struct i2c_adapter_quirks tegra_i2c_quirks = {
.max_write_len = 4096 - 12,
};
+static const struct i2c_adapter_quirks tegra194_i2c_quirks = {
+ .flags = I2C_AQ_NO_ZERO_LEN,
+};
+
static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {
.has_continue_xfer_support = false,
.has_per_pkt_xfer_complete_irq = false,
@@ -844,6 +865,7 @@ static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {
.has_multi_master_mode = false,
.has_slcg_override_reg = false,
.has_mst_fifo = false,
+ .quirks = &tegra_i2c_quirks,
};
static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
@@ -857,6 +879,7 @@ static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
.has_multi_master_mode = false,
.has_slcg_override_reg = false,
.has_mst_fifo = false,
+ .quirks = &tegra_i2c_quirks,
};
static const struct tegra_i2c_hw_feature tegra114_i2c_hw = {
@@ -870,6 +893,7 @@ static const struct tegra_i2c_hw_feature tegra114_i2c_hw = {
.has_multi_master_mode = false,
.has_slcg_override_reg = false,
.has_mst_fifo = false,
+ .quirks = &tegra_i2c_quirks,
};
static const struct tegra_i2c_hw_feature tegra124_i2c_hw = {
@@ -883,6 +907,7 @@ static const struct tegra_i2c_hw_feature tegra124_i2c_hw = {
.has_multi_master_mode = false,
.has_slcg_override_reg = true,
.has_mst_fifo = false,
+ .quirks = &tegra_i2c_quirks,
};
static const struct tegra_i2c_hw_feature tegra210_i2c_hw = {
@@ -896,6 +921,7 @@ static const struct tegra_i2c_hw_feature tegra210_i2c_hw = {
.has_multi_master_mode = true,
.has_slcg_override_reg = true,
.has_mst_fifo = false,
+ .quirks = &tegra_i2c_quirks,
};
static const struct tegra_i2c_hw_feature tegra194_i2c_hw = {
@@ -909,6 +935,7 @@ static const struct tegra_i2c_hw_feature tegra194_i2c_hw = {
.has_multi_master_mode = true,
.has_slcg_override_reg = true,
.has_mst_fifo = true,
+ .quirks = &tegra194_i2c_quirks,
};
/* Match table for of_platform binding */
@@ -960,7 +987,6 @@ static int tegra_i2c_probe(struct platform_device *pdev)
i2c_dev->base = base;
i2c_dev->div_clk = div_clk;
i2c_dev->adapter.algo = &tegra_i2c_algo;
- i2c_dev->adapter.quirks = &tegra_i2c_quirks;
i2c_dev->irq = irq;
i2c_dev->cont_id = pdev->id;
i2c_dev->dev = &pdev->dev;
@@ -976,6 +1002,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
i2c_dev->hw = of_device_get_match_data(&pdev->dev);
i2c_dev->is_dvc = of_device_is_compatible(pdev->dev.of_node,
"nvidia,tegra20-i2c-dvc");
+ i2c_dev->adapter.quirks = i2c_dev->hw->quirks;
init_completion(&i2c_dev->msg_complete);
spin_lock_init(&i2c_dev->xfer_lock);
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index eb0569359387..8ba4122fb340 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -219,6 +219,7 @@ static acpi_status i2c_acpi_add_device(acpi_handle handle, u32 level,
void i2c_acpi_register_devices(struct i2c_adapter *adap)
{
acpi_status status;
+ acpi_handle handle;
if (!has_acpi_companion(&adap->dev))
return;
@@ -229,6 +230,15 @@ void i2c_acpi_register_devices(struct i2c_adapter *adap)
adap, NULL);
if (ACPI_FAILURE(status))
dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
+
+ if (!adap->dev.parent)
+ return;
+
+ handle = ACPI_HANDLE(adap->dev.parent);
+ if (!handle)
+ return;
+
+ acpi_walk_dep_device_list(handle);
}
const struct acpi_device_id *
@@ -693,7 +703,6 @@ int i2c_acpi_install_space_handler(struct i2c_adapter *adapter)
return -ENOMEM;
}
- acpi_walk_dep_device_list(handle);
return 0;
}
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index f225bef1e043..39be53b6f983 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -262,13 +262,14 @@ EXPORT_SYMBOL_GPL(i2c_recover_bus);
static void i2c_init_recovery(struct i2c_adapter *adap)
{
struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
- char *err_str;
+ char *err_str, *err_level = KERN_ERR;
if (!bri)
return;
if (!bri->recover_bus) {
- err_str = "no recover_bus() found";
+ err_str = "no suitable method provided";
+ err_level = KERN_DEBUG;
goto err;
}
@@ -298,7 +299,7 @@ static void i2c_init_recovery(struct i2c_adapter *adap)
return;
err:
- dev_err(&adap->dev, "Not using recovery: %s\n", err_str);
+ dev_printk(err_level, &adap->dev, "Not using recovery: %s\n", err_str);
adap->bus_recovery_info = NULL;
}
@@ -1292,8 +1293,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
/* create pre-declared device nodes */
of_i2c_register_devices(adap);
- i2c_acpi_register_devices(adap);
i2c_acpi_install_space_handler(adap);
+ i2c_acpi_register_devices(adap);
if (adap->nr < __i2c_first_dynamic_bus_num)
i2c_scan_static_board_info(adap);
diff --git a/drivers/i2c/i2c-core-slave.c b/drivers/i2c/i2c-core-slave.c
index 47a9f70a24a9..f2e7e373ee47 100644
--- a/drivers/i2c/i2c-core-slave.c
+++ b/drivers/i2c/i2c-core-slave.c
@@ -22,10 +22,8 @@ int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb)
{
int ret;
- if (!client || !slave_cb) {
- WARN(1, "insufficient data\n");
+ if (WARN(IS_ERR_OR_NULL(client) || !slave_cb, "insufficient data\n"))
return -EINVAL;
- }
if (!(client->flags & I2C_CLIENT_SLAVE))
dev_warn(&client->dev, "%s: client slave flag not set. You might see address collisions\n",
@@ -64,6 +62,9 @@ int i2c_slave_unregister(struct i2c_client *client)
{
int ret;
+ if (IS_ERR_OR_NULL(client))
+ return -EINVAL;
+
if (!client->adapter->algo->unreg_slave) {
dev_err(&client->dev, "%s: not supported by adapter\n", __func__);
return -EOPNOTSUPP;
diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
index 9cd66cabb84f..8d6fad05b0c7 100644
--- a/drivers/i2c/i2c-core-smbus.c
+++ b/drivers/i2c/i2c-core-smbus.c
@@ -497,6 +497,13 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
break;
case I2C_SMBUS_BLOCK_DATA:
case I2C_SMBUS_BLOCK_PROC_CALL:
+ if (msg[1].buf[0] > I2C_SMBUS_BLOCK_MAX) {
+ dev_err(&adapter->dev,
+ "Invalid block size returned: %d\n",
+ msg[1].buf[0]);
+ status = -EPROTO;
+ goto cleanup;
+ }
for (i = 0; i < msg[1].buf[0] + 1; i++)
data->block[i] = msg[1].buf[i];
break;
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index cb07651f4b46..1d10ee86299d 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -48,7 +48,7 @@
struct i2c_dev {
struct list_head list;
struct i2c_adapter *adap;
- struct device *dev;
+ struct device dev;
struct cdev cdev;
};
@@ -92,12 +92,14 @@ static struct i2c_dev *get_free_i2c_dev(struct i2c_adapter *adap)
return i2c_dev;
}
-static void put_i2c_dev(struct i2c_dev *i2c_dev)
+static void put_i2c_dev(struct i2c_dev *i2c_dev, bool del_cdev)
{
spin_lock(&i2c_dev_list_lock);
list_del(&i2c_dev->list);
spin_unlock(&i2c_dev_list_lock);
- kfree(i2c_dev);
+ if (del_cdev)
+ cdev_device_del(&i2c_dev->cdev, &i2c_dev->dev);
+ put_device(&i2c_dev->dev);
}
static ssize_t name_show(struct device *dev,
@@ -446,8 +448,13 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
sizeof(rdwr_arg)))
return -EFAULT;
- /* Put an arbitrary limit on the number of messages that can
- * be sent at once */
+ if (!rdwr_arg.msgs || rdwr_arg.nmsgs == 0)
+ return -EINVAL;
+
+ /*
+ * Put an arbitrary limit on the number of messages that can
+ * be sent at once
+ */
if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
return -EINVAL;
@@ -636,6 +643,14 @@ static const struct file_operations i2cdev_fops = {
static struct class *i2c_dev_class;
+static void i2cdev_dev_release(struct device *dev)
+{
+ struct i2c_dev *i2c_dev;
+
+ i2c_dev = container_of(dev, struct i2c_dev, dev);
+ kfree(i2c_dev);
+}
+
static int i2cdev_attach_adapter(struct device *dev, void *dummy)
{
struct i2c_adapter *adap;
@@ -652,27 +667,23 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy)
cdev_init(&i2c_dev->cdev, &i2cdev_fops);
i2c_dev->cdev.owner = THIS_MODULE;
- res = cdev_add(&i2c_dev->cdev, MKDEV(I2C_MAJOR, adap->nr), 1);
- if (res)
- goto error_cdev;
-
- /* register this i2c device with the driver core */
- i2c_dev->dev = device_create(i2c_dev_class, &adap->dev,
- MKDEV(I2C_MAJOR, adap->nr), NULL,
- "i2c-%d", adap->nr);
- if (IS_ERR(i2c_dev->dev)) {
- res = PTR_ERR(i2c_dev->dev);
- goto error;
+
+ device_initialize(&i2c_dev->dev);
+ i2c_dev->dev.devt = MKDEV(I2C_MAJOR, adap->nr);
+ i2c_dev->dev.class = i2c_dev_class;
+ i2c_dev->dev.parent = &adap->dev;
+ i2c_dev->dev.release = i2cdev_dev_release;
+ dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr);
+
+ res = cdev_device_add(&i2c_dev->cdev, &i2c_dev->dev);
+ if (res) {
+ put_i2c_dev(i2c_dev, false);
+ return res;
}
pr_debug("i2c-dev: adapter [%s] registered as minor %d\n",
adap->name, adap->nr);
return 0;
-error:
- cdev_del(&i2c_dev->cdev);
-error_cdev:
- put_i2c_dev(i2c_dev);
- return res;
}
static int i2cdev_detach_adapter(struct device *dev, void *dummy)
@@ -688,9 +699,7 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy)
if (!i2c_dev) /* attach_adapter must have failed */
return 0;
- cdev_del(&i2c_dev->cdev);
- put_i2c_dev(i2c_dev);
- device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr));
+ put_i2c_dev(i2c_dev, true);
pr_debug("i2c-dev: adapter [%s] unregistered\n", adap->name);
return 0;
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index 035032e20327..9ba9ce5696e1 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -273,6 +273,7 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
err_rollback_available:
device_remove_file(&pdev->dev, &dev_attr_available_masters);
err_rollback:
+ i2c_demux_deactivate_master(priv);
for (j = 0; j < i; j++) {
of_node_put(priv->chan[j].parent_np);
of_changeset_destroy(&priv->chan[j].chgset);
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 8b2b72b93885..4224c4dd8963 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -213,7 +213,6 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
sense_rq->rq_disk = rq->rq_disk;
sense_rq->cmd_flags = REQ_OP_DRV_IN;
ide_req(sense_rq)->type = ATA_PRIV_SENSE;
- sense_rq->rq_flags |= RQF_PREEMPT;
req->cmd[0] = GPCMD_REQUEST_SENSE;
req->cmd[4] = cmd_len;
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 44a7a255ef74..f9b59d41813f 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1784,7 +1784,7 @@ static int ide_cd_probe(ide_drive_t *drive)
ide_cd_read_toc(drive);
g->fops = &idecd_ops;
g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
- device_add_disk(&drive->gendev, g);
+ device_add_disk(&drive->gendev, g, NULL);
return 0;
out_free_disk:
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index e823394ed543..04e008e8f6f9 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -416,7 +416,7 @@ static int ide_gd_probe(ide_drive_t *drive)
if (drive->dev_flags & IDE_DFLAG_REMOVABLE)
g->flags = GENHD_FL_REMOVABLE;
g->fops = &ide_gd_ops;
- device_add_disk(&drive->gendev, g);
+ device_add_disk(&drive->gendev, g, NULL);
return 0;
out_free_disk:
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 0d93e0cfbeaf..438176084610 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -527,11 +527,6 @@ repeat:
* above to return us whatever is in the queue. Since we call
* ide_do_request() ourselves, we end up taking requests while
* the queue is blocked...
- *
- * We let requests forced at head of queue with ide-preempt
- * though. I hope that doesn't happen too much, hopefully not
- * unless the subdriver triggers such a thing in its own PM
- * state machine.
*/
if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
ata_pm_request(rq) == 0 &&
diff --git a/drivers/iio/accel/adis16201.c b/drivers/iio/accel/adis16201.c
index 4c1d482ea73a..94a80f8fdd10 100644
--- a/drivers/iio/accel/adis16201.c
+++ b/drivers/iio/accel/adis16201.c
@@ -216,7 +216,7 @@ static const struct iio_chan_spec adis16201_channels[] = {
ADIS_AUX_ADC_CHAN(ADIS16201_AUX_ADC_REG, ADIS16201_SCAN_AUX_ADC, 0, 12),
ADIS_INCLI_CHAN(X, ADIS16201_XINCL_OUT_REG, ADIS16201_SCAN_INCLI_X,
BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
- ADIS_INCLI_CHAN(X, ADIS16201_YINCL_OUT_REG, ADIS16201_SCAN_INCLI_Y,
+ ADIS_INCLI_CHAN(Y, ADIS16201_YINCL_OUT_REG, ADIS16201_SCAN_INCLI_Y,
BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
IIO_CHAN_SOFT_TIMESTAMP(7)
};
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index cb8c98a44010..e029d4b0f7af 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -197,6 +197,14 @@ struct bmc150_accel_data {
struct mutex mutex;
u8 fifo_mode, watermark;
s16 buffer[8];
+ /*
+ * Ensure there is sufficient space and correct alignment for
+ * the timestamp if enabled
+ */
+ struct {
+ __le16 channels[3];
+ s64 ts __aligned(8);
+ } scan;
u8 bw_bits;
u32 slope_dur;
u32 slope_thres;
@@ -915,15 +923,16 @@ static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev,
* now.
*/
for (i = 0; i < count; i++) {
- u16 sample[8];
int j, bit;
j = 0;
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength)
- memcpy(&sample[j++], &buffer[i * 3 + bit], 2);
+ memcpy(&data->scan.channels[j++], &buffer[i * 3 + bit],
+ sizeof(data->scan.channels[0]));
- iio_push_to_buffers_with_timestamp(indio_dev, sample, tstamp);
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
+ tstamp);
tstamp += sample_period;
}
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index e5fdca74a630..c22afc979206 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -134,6 +134,12 @@ enum kx_chipset {
KX_MAX_CHIPS /* this must be last */
};
+enum kx_acpi_type {
+ ACPI_GENERIC,
+ ACPI_SMO8500,
+ ACPI_KIOX010A,
+};
+
struct kxcjk1013_data {
struct i2c_client *client;
struct iio_trigger *dready_trig;
@@ -150,7 +156,7 @@ struct kxcjk1013_data {
bool motion_trigger_on;
int64_t timestamp;
enum kx_chipset chipset;
- bool is_smo8500_device;
+ enum kx_acpi_type acpi_type;
};
enum kxcjk1013_axis {
@@ -277,6 +283,32 @@ static const struct {
{19163, 1, 0},
{38326, 0, 1} };
+#ifdef CONFIG_ACPI
+enum kiox010a_fn_index {
+ KIOX010A_SET_LAPTOP_MODE = 1,
+ KIOX010A_SET_TABLET_MODE = 2,
+};
+
+static int kiox010a_dsm(struct device *dev, int fn_index)
+{
+ acpi_handle handle = ACPI_HANDLE(dev);
+ guid_t kiox010a_dsm_guid;
+ union acpi_object *obj;
+
+ if (!handle)
+ return -ENODEV;
+
+ guid_parse("1f339696-d475-4e26-8cad-2e9f8e6d7a91", &kiox010a_dsm_guid);
+
+ obj = acpi_evaluate_dsm(handle, &kiox010a_dsm_guid, 1, fn_index, NULL);
+ if (!obj)
+ return -EIO;
+
+ ACPI_FREE(obj);
+ return 0;
+}
+#endif
+
static int kxcjk1013_set_mode(struct kxcjk1013_data *data,
enum kxcjk1013_mode mode)
{
@@ -354,6 +386,13 @@ static int kxcjk1013_chip_init(struct kxcjk1013_data *data)
{
int ret;
+#ifdef CONFIG_ACPI
+ if (data->acpi_type == ACPI_KIOX010A) {
+ /* Make sure the kbd and touchpad on 2-in-1s using 2 KXCJ91008-s work */
+ kiox010a_dsm(&data->client->dev, KIOX010A_SET_LAPTOP_MODE);
+ }
+#endif
+
ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_WHO_AM_I);
if (ret < 0) {
dev_err(&data->client->dev, "Error reading who_am_i\n");
@@ -1241,7 +1280,7 @@ static irqreturn_t kxcjk1013_data_rdy_trig_poll(int irq, void *private)
static const char *kxcjk1013_match_acpi_device(struct device *dev,
enum kx_chipset *chipset,
- bool *is_smo8500_device)
+ enum kx_acpi_type *acpi_type)
{
const struct acpi_device_id *id;
@@ -1250,7 +1289,9 @@ static const char *kxcjk1013_match_acpi_device(struct device *dev,
return NULL;
if (strcmp(id->id, "SMO8500") == 0)
- *is_smo8500_device = true;
+ *acpi_type = ACPI_SMO8500;
+ else if (strcmp(id->id, "KIOX010A") == 0)
+ *acpi_type = ACPI_KIOX010A;
*chipset = (enum kx_chipset)id->driver_data;
@@ -1286,7 +1327,7 @@ static int kxcjk1013_probe(struct i2c_client *client,
} else if (ACPI_HANDLE(&client->dev)) {
name = kxcjk1013_match_acpi_device(&client->dev,
&data->chipset,
- &data->is_smo8500_device);
+ &data->acpi_type);
} else
return -ENODEV;
@@ -1304,7 +1345,7 @@ static int kxcjk1013_probe(struct i2c_client *client,
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &kxcjk1013_info;
- if (client->irq > 0 && !data->is_smo8500_device) {
+ if (client->irq > 0 && data->acpi_type != ACPI_SMO8500) {
ret = devm_request_threaded_irq(&client->dev, client->irq,
kxcjk1013_data_rdy_trig_poll,
kxcjk1013_event_handler,
diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
index 0c0df4fce420..f74cb2e082a6 100644
--- a/drivers/iio/accel/kxsd9.c
+++ b/drivers/iio/accel/kxsd9.c
@@ -212,14 +212,20 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p)
const struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct kxsd9_state *st = iio_priv(indio_dev);
+ /*
+ * Ensure correct positioning and alignment of timestamp.
+ * No need to zero initialize as all elements written.
+ */
+ struct {
+ __be16 chan[4];
+ s64 ts __aligned(8);
+ } hw_values;
int ret;
- /* 4 * 16bit values AND timestamp */
- __be16 hw_values[8];
ret = regmap_bulk_read(st->map,
KXSD9_REG_X,
- &hw_values,
- 8);
+ hw_values.chan,
+ sizeof(hw_values.chan));
if (ret) {
dev_err(st->dev,
"error reading data\n");
@@ -227,7 +233,7 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p)
}
iio_push_to_buffers_with_timestamp(indio_dev,
- hw_values,
+ &hw_values,
iio_get_time_ns(indio_dev));
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/accel/mma7455_core.c b/drivers/iio/accel/mma7455_core.c
index da0ceaac46b5..a3b5d5780bc8 100644
--- a/drivers/iio/accel/mma7455_core.c
+++ b/drivers/iio/accel/mma7455_core.c
@@ -55,6 +55,14 @@
struct mma7455_data {
struct regmap *regmap;
+ /*
+ * Used to reorganize data. Will ensure correct alignment of
+ * the timestamp if present
+ */
+ struct {
+ __le16 channels[3];
+ s64 ts __aligned(8);
+ } scan;
};
static int mma7455_drdy(struct mma7455_data *mma7455)
@@ -85,19 +93,19 @@ static irqreturn_t mma7455_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct mma7455_data *mma7455 = iio_priv(indio_dev);
- u8 buf[16]; /* 3 x 16-bit channels + padding + ts */
int ret;
ret = mma7455_drdy(mma7455);
if (ret)
goto done;
- ret = regmap_bulk_read(mma7455->regmap, MMA7455_REG_XOUTL, buf,
- sizeof(__le16) * 3);
+ ret = regmap_bulk_read(mma7455->regmap, MMA7455_REG_XOUTL,
+ mma7455->scan.channels,
+ sizeof(mma7455->scan.channels));
if (ret)
goto done;
- iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_push_to_buffers_with_timestamp(indio_dev, &mma7455->scan,
iio_get_time_ns(indio_dev));
done:
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 421a0a8a1379..15c254b4745c 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -107,6 +107,12 @@ struct mma8452_data {
u8 data_cfg;
const struct mma_chip_info *chip_info;
int sleep_val;
+
+ /* Ensure correct alignment of time stamp when present */
+ struct {
+ __be16 channels[3];
+ s64 ts __aligned(8);
+ } buffer;
};
/**
@@ -1088,14 +1094,13 @@ static irqreturn_t mma8452_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct mma8452_data *data = iio_priv(indio_dev);
- u8 buffer[16]; /* 3 16-bit channels + padding + ts */
int ret;
- ret = mma8452_read(data, (__be16 *)buffer);
+ ret = mma8452_read(data, data->buffer.channels);
if (ret < 0)
goto done;
- iio_push_to_buffers_with_timestamp(indio_dev, buffer,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer,
iio_get_time_ns(indio_dev));
done:
@@ -1651,10 +1656,13 @@ static int mma8452_probe(struct i2c_client *client,
ret = mma8452_set_freefall_mode(data, false);
if (ret < 0)
- goto buffer_cleanup;
+ goto unregister_device;
return 0;
+unregister_device:
+ iio_device_unregister(indio_dev);
+
buffer_cleanup:
iio_triggered_buffer_cleanup(indio_dev);
diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c
index 4964561595f5..7218acf1a907 100644
--- a/drivers/iio/accel/sca3000.c
+++ b/drivers/iio/accel/sca3000.c
@@ -982,7 +982,7 @@ static int sca3000_read_data(struct sca3000_state *st,
st->tx[0] = SCA3000_READ_REG(reg_address_high);
ret = spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
if (ret) {
- dev_err(get_device(&st->us->dev), "problem reading register");
+ dev_err(&st->us->dev, "problem reading register\n");
return ret;
}
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 9421c1ec86f7..1dabd366ec0b 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -658,6 +658,7 @@ config STM32_ADC_CORE
depends on ARCH_STM32 || COMPILE_TEST
depends on OF
depends on REGULATOR
+ depends on HAS_IOMEM
select IIO_BUFFER
select MFD_STM32_TIMERS
select IIO_STM32_TIMER_TRIGGER
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index d4bbe5b53318..b5952ee3031c 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -279,6 +279,7 @@ static int ad7793_setup(struct iio_dev *indio_dev,
id &= AD7793_ID_MASK;
if (id != st->chip_info->id) {
+ ret = -ENODEV;
dev_err(&st->sd.spi->dev, "device ID query failed\n");
goto out;
}
@@ -542,7 +543,7 @@ static const struct iio_info ad7797_info = {
.read_raw = &ad7793_read_raw,
.write_raw = &ad7793_write_raw,
.write_raw_get_fmt = &ad7793_write_raw_get_fmt,
- .attrs = &ad7793_attribute_group,
+ .attrs = &ad7797_attribute_group,
.validate_trigger = ad_sd_validate_trigger,
};
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index d1239624187d..1ab106b3d3a6 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -146,6 +146,11 @@ struct ina2xx_chip_info {
int range_vbus; /* Bus voltage maximum in V */
int pga_gain_vshunt; /* Shunt voltage PGA gain */
bool allow_async_readout;
+ /* data buffer needs space for channel data and timestamp */
+ struct {
+ u16 chan[4];
+ u64 ts __aligned(8);
+ } scan;
};
static const struct ina2xx_config ina2xx_config[] = {
@@ -736,8 +741,6 @@ static int ina2xx_conversion_ready(struct iio_dev *indio_dev)
static int ina2xx_work_buffer(struct iio_dev *indio_dev)
{
struct ina2xx_chip_info *chip = iio_priv(indio_dev);
- /* data buffer needs space for channel data and timestap */
- unsigned short data[4 + sizeof(s64)/sizeof(short)];
int bit, ret, i = 0;
s64 time;
@@ -756,10 +759,10 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev)
if (ret < 0)
return ret;
- data[i++] = val;
+ chip->scan.chan[i++] = val;
}
- iio_push_to_buffers_with_timestamp(indio_dev, data, time);
+ iio_push_to_buffers_with_timestamp(indio_dev, &chip->scan, time);
return 0;
};
diff --git a/drivers/iio/adc/max1118.c b/drivers/iio/adc/max1118.c
index 49db9e9ae625..b372b226ac20 100644
--- a/drivers/iio/adc/max1118.c
+++ b/drivers/iio/adc/max1118.c
@@ -38,6 +38,11 @@ struct max1118 {
struct spi_device *spi;
struct mutex lock;
struct regulator *reg;
+ /* Ensure natural alignment of buffer elements */
+ struct {
+ u8 channels[2];
+ s64 ts __aligned(8);
+ } scan;
u8 data ____cacheline_aligned;
};
@@ -162,7 +167,6 @@ static irqreturn_t max1118_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct max1118 *adc = iio_priv(indio_dev);
- u8 data[16] = { }; /* 2x 8-bit ADC data + padding + 8 bytes timestamp */
int scan_index;
int i = 0;
@@ -180,10 +184,10 @@ static irqreturn_t max1118_trigger_handler(int irq, void *p)
goto out;
}
- data[i] = ret;
+ adc->scan.channels[i] = ret;
i++;
}
- iio_push_to_buffers_with_timestamp(indio_dev, data,
+ iio_push_to_buffers_with_timestamp(indio_dev, &adc->scan,
iio_get_time_ns(indio_dev));
out:
mutex_unlock(&adc->lock);
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
index 819f26011500..4ee4ca35c255 100644
--- a/drivers/iio/adc/mcp3422.c
+++ b/drivers/iio/adc/mcp3422.c
@@ -99,16 +99,12 @@ static int mcp3422_update_config(struct mcp3422 *adc, u8 newconfig)
{
int ret;
- mutex_lock(&adc->lock);
-
ret = i2c_master_send(adc->i2c, &newconfig, 1);
if (ret > 0) {
adc->config = newconfig;
ret = 0;
}
- mutex_unlock(&adc->lock);
-
return ret;
}
@@ -141,6 +137,8 @@ static int mcp3422_read_channel(struct mcp3422 *adc,
u8 config;
u8 req_channel = channel->channel;
+ mutex_lock(&adc->lock);
+
if (req_channel != MCP3422_CHANNEL(adc->config)) {
config = adc->config;
config &= ~MCP3422_CHANNEL_MASK;
@@ -148,12 +146,18 @@ static int mcp3422_read_channel(struct mcp3422 *adc,
config &= ~MCP3422_PGA_MASK;
config |= MCP3422_PGA_VALUE(adc->pga[req_channel]);
ret = mcp3422_update_config(adc, config);
- if (ret < 0)
+ if (ret < 0) {
+ mutex_unlock(&adc->lock);
return ret;
+ }
msleep(mcp3422_read_times[MCP3422_SAMPLE_RATE(adc->config)]);
}
- return mcp3422_read(adc, value, &config);
+ ret = mcp3422_read(adc, value, &config);
+
+ mutex_unlock(&adc->lock);
+
+ return ret;
}
static int mcp3422_read_raw(struct iio_dev *iio,
diff --git a/drivers/iio/adc/qcom-spmi-vadc.c b/drivers/iio/adc/qcom-spmi-vadc.c
index 3680e0d47412..2640155b2644 100644
--- a/drivers/iio/adc/qcom-spmi-vadc.c
+++ b/drivers/iio/adc/qcom-spmi-vadc.c
@@ -606,7 +606,7 @@ static const struct vadc_channels vadc_chans[] = {
VADC_CHAN_NO_SCALE(P_MUX16_1_3, 1)
VADC_CHAN_NO_SCALE(LR_MUX1_BAT_THERM, 0)
- VADC_CHAN_NO_SCALE(LR_MUX2_BAT_ID, 0)
+ VADC_CHAN_VOLT(LR_MUX2_BAT_ID, 0, SCALE_DEFAULT)
VADC_CHAN_NO_SCALE(LR_MUX3_XO_THERM, 0)
VADC_CHAN_NO_SCALE(LR_MUX4_AMUX_THM1, 0)
VADC_CHAN_NO_SCALE(LR_MUX5_AMUX_THM2, 0)
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index 1f98566d5b3c..5ae3ce60a33f 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -383,7 +383,7 @@ static int rockchip_saradc_resume(struct device *dev)
ret = clk_prepare_enable(info->clk);
if (ret)
- return ret;
+ clk_disable_unprepare(info->pclk);
return ret;
}
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 0409dcf5b047..59fd8b620c50 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -1308,8 +1308,30 @@ static unsigned int stm32_adc_dma_residue(struct stm32_adc *adc)
static void stm32_adc_dma_buffer_done(void *data)
{
struct iio_dev *indio_dev = data;
+ struct stm32_adc *adc = iio_priv(indio_dev);
+ int residue = stm32_adc_dma_residue(adc);
+
+ /*
+ * In DMA mode the trigger services of IIO are not used
+ * (e.g. no call to iio_trigger_poll).
+ * Calling irq handler associated to the hardware trigger is not
+ * relevant as the conversions have already been done. Data
+ * transfers are performed directly in DMA callback instead.
+ * This implementation avoids to call trigger irq handler that
+ * may sleep, in an atomic context (DMA irq handler context).
+ */
+ dev_dbg(&indio_dev->dev, "%s bufi=%d\n", __func__, adc->bufi);
+
+ while (residue >= indio_dev->scan_bytes) {
+ u16 *buffer = (u16 *)&adc->rx_buf[adc->bufi];
- iio_trigger_poll_chained(indio_dev->trig);
+ iio_push_to_buffers(indio_dev, buffer);
+
+ residue -= indio_dev->scan_bytes;
+ adc->bufi += indio_dev->scan_bytes;
+ if (adc->bufi >= adc->rx_buf_sz)
+ adc->bufi = 0;
+ }
}
static int stm32_adc_dma_start(struct iio_dev *indio_dev)
@@ -1660,15 +1682,27 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
return 0;
}
-static int stm32_adc_dma_request(struct iio_dev *indio_dev)
+static int stm32_adc_dma_request(struct device *dev, struct iio_dev *indio_dev)
{
struct stm32_adc *adc = iio_priv(indio_dev);
struct dma_slave_config config;
int ret;
- adc->dma_chan = dma_request_slave_channel(&indio_dev->dev, "rx");
- if (!adc->dma_chan)
+ adc->dma_chan = dma_request_chan(dev, "rx");
+ if (IS_ERR(adc->dma_chan)) {
+ ret = PTR_ERR(adc->dma_chan);
+ if (ret != -ENODEV) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev,
+ "DMA channel request failed with %d\n",
+ ret);
+ return ret;
+ }
+
+ /* DMA is optional: fall back to IRQ mode */
+ adc->dma_chan = NULL;
return 0;
+ }
adc->rx_buf = dma_alloc_coherent(adc->dma_chan->device->dev,
STM32_DMA_BUFFER_SIZE,
@@ -1703,6 +1737,7 @@ static int stm32_adc_probe(struct platform_device *pdev)
{
struct iio_dev *indio_dev;
struct device *dev = &pdev->dev;
+ irqreturn_t (*handler)(int irq, void *p) = NULL;
struct stm32_adc *adc;
int ret;
@@ -1781,13 +1816,15 @@ static int stm32_adc_probe(struct platform_device *pdev)
if (ret < 0)
goto err_clk_disable;
- ret = stm32_adc_dma_request(indio_dev);
+ ret = stm32_adc_dma_request(dev, indio_dev);
if (ret < 0)
goto err_clk_disable;
+ if (!adc->dma_chan)
+ handler = &stm32_adc_trigger_handler;
+
ret = iio_triggered_buffer_setup(indio_dev,
- &iio_pollfunc_store_time,
- &stm32_adc_trigger_handler,
+ &iio_pollfunc_store_time, handler,
&stm32_adc_buffer_setup_ops);
if (ret) {
dev_err(&pdev->dev, "buffer setup failed\n");
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index f5586dd6414d..1c492a7f4587 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -45,7 +45,7 @@ enum sd_converter_type {
struct stm32_dfsdm_dev_data {
int type;
- int (*init)(struct iio_dev *indio_dev);
+ int (*init)(struct device *dev, struct iio_dev *indio_dev);
unsigned int num_channels;
const struct regmap_config *regmap_cfg;
};
@@ -923,7 +923,8 @@ static void stm32_dfsdm_dma_release(struct iio_dev *indio_dev)
}
}
-static int stm32_dfsdm_dma_request(struct iio_dev *indio_dev)
+static int stm32_dfsdm_dma_request(struct device *dev,
+ struct iio_dev *indio_dev)
{
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
struct dma_slave_config config = {
@@ -933,9 +934,13 @@ static int stm32_dfsdm_dma_request(struct iio_dev *indio_dev)
};
int ret;
- adc->dma_chan = dma_request_slave_channel(&indio_dev->dev, "rx");
- if (!adc->dma_chan)
- return -EINVAL;
+ adc->dma_chan = dma_request_chan(dev, "rx");
+ if (IS_ERR(adc->dma_chan)) {
+ int ret = PTR_ERR(adc->dma_chan);
+
+ adc->dma_chan = NULL;
+ return ret;
+ }
adc->rx_buf = dma_alloc_coherent(adc->dma_chan->device->dev,
DFSDM_DMA_BUFFER_SIZE,
@@ -993,7 +998,7 @@ static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev,
&adc->dfsdm->ch_list[ch->channel]);
}
-static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev)
+static int stm32_dfsdm_audio_init(struct device *dev, struct iio_dev *indio_dev)
{
struct iio_chan_spec *ch;
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
@@ -1023,10 +1028,10 @@ static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev)
indio_dev->num_channels = 1;
indio_dev->channels = ch;
- return stm32_dfsdm_dma_request(indio_dev);
+ return stm32_dfsdm_dma_request(dev, indio_dev);
}
-static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)
+static int stm32_dfsdm_adc_init(struct device *dev, struct iio_dev *indio_dev)
{
struct iio_chan_spec *ch;
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
@@ -1170,7 +1175,7 @@ static int stm32_dfsdm_adc_probe(struct platform_device *pdev)
adc->dfsdm->fl_list[adc->fl_id].sync_mode = val;
adc->dev_data = dev_data;
- ret = dev_data->init(iio);
+ ret = dev_data->init(dev, iio);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c
index 405e3779c0c5..ef95363ebac2 100644
--- a/drivers/iio/adc/ti-adc081c.c
+++ b/drivers/iio/adc/ti-adc081c.c
@@ -36,6 +36,12 @@ struct adc081c {
/* 8, 10 or 12 */
int bits;
+
+ /* Ensure natural alignment of buffer elements */
+ struct {
+ u16 channel;
+ s64 ts __aligned(8);
+ } scan;
};
#define REG_CONV_RES 0x00
@@ -131,14 +137,13 @@ static irqreturn_t adc081c_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct adc081c *data = iio_priv(indio_dev);
- u16 buf[8]; /* 2 bytes data + 6 bytes padding + 8 bytes timestamp */
int ret;
ret = i2c_smbus_read_word_swapped(data->i2c, REG_CONV_RES);
if (ret < 0)
goto out;
- buf[0] = ret;
- iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ data->scan.channel = ret;
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
iio_get_time_ns(indio_dev));
out:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/adc/ti-adc0832.c b/drivers/iio/adc/ti-adc0832.c
index 188dae705bf7..a408d97e2d2e 100644
--- a/drivers/iio/adc/ti-adc0832.c
+++ b/drivers/iio/adc/ti-adc0832.c
@@ -31,6 +31,12 @@ struct adc0832 {
struct regulator *reg;
struct mutex lock;
u8 mux_bits;
+ /*
+ * Max size needed: 16x 1 byte ADC data + 8 bytes timestamp
+ * May be shorter if not all channels are enabled subject
+ * to the timestamp remaining 8 byte aligned.
+ */
+ u8 data[24] __aligned(8);
u8 tx_buf[2] ____cacheline_aligned;
u8 rx_buf[2];
@@ -202,7 +208,6 @@ static irqreturn_t adc0832_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct adc0832 *adc = iio_priv(indio_dev);
- u8 data[24] = { }; /* 16x 1 byte ADC data + 8 bytes timestamp */
int scan_index;
int i = 0;
@@ -220,10 +225,10 @@ static irqreturn_t adc0832_trigger_handler(int irq, void *p)
goto out;
}
- data[i] = ret;
+ adc->data[i] = ret;
i++;
}
- iio_push_to_buffers_with_timestamp(indio_dev, data,
+ iio_push_to_buffers_with_timestamp(indio_dev, adc->data,
iio_get_time_ns(indio_dev));
out:
mutex_unlock(&adc->lock);
diff --git a/drivers/iio/adc/ti-adc084s021.c b/drivers/iio/adc/ti-adc084s021.c
index 25504640e126..ec490e7a5b73 100644
--- a/drivers/iio/adc/ti-adc084s021.c
+++ b/drivers/iio/adc/ti-adc084s021.c
@@ -28,6 +28,11 @@ struct adc084s021 {
struct spi_transfer spi_trans;
struct regulator *reg;
struct mutex lock;
+ /* Buffer used to align data */
+ struct {
+ __be16 channels[4];
+ s64 ts __aligned(8);
+ } scan;
/*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache line.
@@ -143,14 +148,13 @@ static irqreturn_t adc084s021_buffer_trigger_handler(int irq, void *pollfunc)
struct iio_poll_func *pf = pollfunc;
struct iio_dev *indio_dev = pf->indio_dev;
struct adc084s021 *adc = iio_priv(indio_dev);
- __be16 data[8] = {0}; /* 4 * 16-bit words of data + 8 bytes timestamp */
mutex_lock(&adc->lock);
- if (adc084s021_adc_conversion(adc, &data) < 0)
+ if (adc084s021_adc_conversion(adc, adc->scan.channels) < 0)
dev_err(&adc->spi->dev, "Failed to read data\n");
- iio_push_to_buffers_with_timestamp(indio_dev, data,
+ iio_push_to_buffers_with_timestamp(indio_dev, &adc->scan,
iio_get_time_ns(indio_dev));
mutex_unlock(&adc->lock);
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/adc/ti-adc12138.c b/drivers/iio/adc/ti-adc12138.c
index 703d68ae96b7..4517d7742bc3 100644
--- a/drivers/iio/adc/ti-adc12138.c
+++ b/drivers/iio/adc/ti-adc12138.c
@@ -50,6 +50,12 @@ struct adc12138 {
struct completion complete;
/* The number of cclk periods for the S/H's acquisition time */
unsigned int acquisition_time;
+ /*
+ * Maximum size needed: 16x 2 bytes ADC data + 8 bytes timestamp.
+ * Less may be need if not all channels are enabled, as long as
+ * the 8 byte alignment of the timestamp is maintained.
+ */
+ __be16 data[20] __aligned(8);
u8 tx_buf[2] ____cacheline_aligned;
u8 rx_buf[2];
@@ -332,7 +338,6 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct adc12138 *adc = iio_priv(indio_dev);
- __be16 data[20] = { }; /* 16x 2 bytes ADC data + 8 bytes timestamp */
__be16 trash;
int ret;
int scan_index;
@@ -348,7 +353,7 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p)
reinit_completion(&adc->complete);
ret = adc12138_start_and_read_conv(adc, scan_chan,
- i ? &data[i - 1] : &trash);
+ i ? &adc->data[i - 1] : &trash);
if (ret) {
dev_warn(&adc->spi->dev,
"failed to start conversion\n");
@@ -365,7 +370,7 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p)
}
if (i) {
- ret = adc12138_read_conv_data(adc, &data[i - 1]);
+ ret = adc12138_read_conv_data(adc, &adc->data[i - 1]);
if (ret) {
dev_warn(&adc->spi->dev,
"failed to get conversion data\n");
@@ -373,7 +378,7 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p)
}
}
- iio_push_to_buffers_with_timestamp(indio_dev, data,
+ iio_push_to_buffers_with_timestamp(indio_dev, adc->data,
iio_get_time_ns(indio_dev));
out:
mutex_unlock(&adc->lock);
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index 6a114dcb4a3a..dc8d859e4b92 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -312,6 +312,7 @@ static const struct iio_chan_spec ads1115_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP),
};
+#ifdef CONFIG_PM
static int ads1015_set_power_state(struct ads1015_data *data, bool on)
{
int ret;
@@ -329,6 +330,15 @@ static int ads1015_set_power_state(struct ads1015_data *data, bool on)
return ret < 0 ? ret : 0;
}
+#else /* !CONFIG_PM */
+
+static int ads1015_set_power_state(struct ads1015_data *data, bool on)
+{
+ return 0;
+}
+
+#endif /* !CONFIG_PM */
+
static
int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val)
{
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 1ae86e7359f7..1b0046cc717b 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -103,6 +103,16 @@ static const unsigned int XADC_ZYNQ_UNMASK_TIMEOUT = 500;
#define XADC_FLAGS_BUFFERED BIT(0)
+/*
+ * The XADC hardware supports a samplerate of up to 1MSPS. Unfortunately it does
+ * not have a hardware FIFO. Which means an interrupt is generated for each
+ * conversion sequence. At 1MSPS sample rate the CPU in ZYNQ7000 is completely
+ * overloaded by the interrupts that it soft-lockups. For this reason the driver
+ * limits the maximum samplerate 150kSPS. At this rate the CPU is fairly busy,
+ * but still responsive.
+ */
+#define XADC_MAX_SAMPLERATE 150000
+
static void xadc_write_reg(struct xadc *xadc, unsigned int reg,
uint32_t val)
{
@@ -675,7 +685,7 @@ static int xadc_trigger_set_state(struct iio_trigger *trigger, bool state)
spin_lock_irqsave(&xadc->lock, flags);
xadc_read_reg(xadc, XADC_AXI_REG_IPIER, &val);
- xadc_write_reg(xadc, XADC_AXI_REG_IPISR, val & XADC_AXI_INT_EOS);
+ xadc_write_reg(xadc, XADC_AXI_REG_IPISR, XADC_AXI_INT_EOS);
if (state)
val |= XADC_AXI_INT_EOS;
else
@@ -723,13 +733,14 @@ static int xadc_power_adc_b(struct xadc *xadc, unsigned int seq_mode)
{
uint16_t val;
+ /* Powerdown the ADC-B when it is not needed. */
switch (seq_mode) {
case XADC_CONF1_SEQ_SIMULTANEOUS:
case XADC_CONF1_SEQ_INDEPENDENT:
- val = XADC_CONF2_PD_ADC_B;
+ val = 0;
break;
default:
- val = 0;
+ val = XADC_CONF2_PD_ADC_B;
break;
}
@@ -798,6 +809,16 @@ static int xadc_preenable(struct iio_dev *indio_dev)
if (ret)
goto err;
+ /*
+ * In simultaneous mode the upper and lower aux channels are samples at
+ * the same time. In this mode the upper 8 bits in the sequencer
+ * register are don't care and the lower 8 bits control two channels
+ * each. As such we must set the bit if either the channel in the lower
+ * group or the upper group is enabled.
+ */
+ if (seq_mode == XADC_CONF1_SEQ_SIMULTANEOUS)
+ scan_mask = ((scan_mask >> 8) | scan_mask) & 0xff0000;
+
ret = xadc_write_adc_reg(xadc, XADC_REG_SEQ(1), scan_mask >> 16);
if (ret)
goto err;
@@ -824,11 +845,27 @@ static const struct iio_buffer_setup_ops xadc_buffer_ops = {
.postdisable = &xadc_postdisable,
};
+static int xadc_read_samplerate(struct xadc *xadc)
+{
+ unsigned int div;
+ uint16_t val16;
+ int ret;
+
+ ret = xadc_read_adc_reg(xadc, XADC_REG_CONF2, &val16);
+ if (ret)
+ return ret;
+
+ div = (val16 & XADC_CONF2_DIV_MASK) >> XADC_CONF2_DIV_OFFSET;
+ if (div < 2)
+ div = 2;
+
+ return xadc_get_dclk_rate(xadc) / div / 26;
+}
+
static int xadc_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val, int *val2, long info)
{
struct xadc *xadc = iio_priv(indio_dev);
- unsigned int div;
uint16_t val16;
int ret;
@@ -881,41 +918,31 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
*val = -((273150 << 12) / 503975);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SAMP_FREQ:
- ret = xadc_read_adc_reg(xadc, XADC_REG_CONF2, &val16);
- if (ret)
+ ret = xadc_read_samplerate(xadc);
+ if (ret < 0)
return ret;
- div = (val16 & XADC_CONF2_DIV_MASK) >> XADC_CONF2_DIV_OFFSET;
- if (div < 2)
- div = 2;
-
- *val = xadc_get_dclk_rate(xadc) / div / 26;
-
+ *val = ret;
return IIO_VAL_INT;
default:
return -EINVAL;
}
}
-static int xadc_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan, int val, int val2, long info)
+static int xadc_write_samplerate(struct xadc *xadc, int val)
{
- struct xadc *xadc = iio_priv(indio_dev);
unsigned long clk_rate = xadc_get_dclk_rate(xadc);
unsigned int div;
if (!clk_rate)
return -EINVAL;
- if (info != IIO_CHAN_INFO_SAMP_FREQ)
- return -EINVAL;
-
if (val <= 0)
return -EINVAL;
/* Max. 150 kSPS */
- if (val > 150000)
- val = 150000;
+ if (val > XADC_MAX_SAMPLERATE)
+ val = XADC_MAX_SAMPLERATE;
val *= 26;
@@ -928,7 +955,7 @@ static int xadc_write_raw(struct iio_dev *indio_dev,
* limit.
*/
div = clk_rate / val;
- if (clk_rate / div / 26 > 150000)
+ if (clk_rate / div / 26 > XADC_MAX_SAMPLERATE)
div++;
if (div < 2)
div = 2;
@@ -939,6 +966,17 @@ static int xadc_write_raw(struct iio_dev *indio_dev,
div << XADC_CONF2_DIV_OFFSET);
}
+static int xadc_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long info)
+{
+ struct xadc *xadc = iio_priv(indio_dev);
+
+ if (info != IIO_CHAN_INFO_SAMP_FREQ)
+ return -EINVAL;
+
+ return xadc_write_samplerate(xadc, val);
+}
+
static const struct iio_event_spec xadc_temp_events[] = {
{
.type = IIO_EV_TYPE_THRESH,
@@ -1226,6 +1264,21 @@ static int xadc_probe(struct platform_device *pdev)
if (ret)
goto err_free_samplerate_trigger;
+ /*
+ * Make sure not to exceed the maximum samplerate since otherwise the
+ * resulting interrupt storm will soft-lock the system.
+ */
+ if (xadc->ops->flags & XADC_FLAGS_BUFFERED) {
+ ret = xadc_read_samplerate(xadc);
+ if (ret < 0)
+ goto err_free_samplerate_trigger;
+ if (ret > XADC_MAX_SAMPLERATE) {
+ ret = xadc_write_samplerate(xadc, XADC_MAX_SAMPLERATE);
+ if (ret < 0)
+ goto err_free_samplerate_trigger;
+ }
+ }
+
ret = request_irq(xadc->irq, xadc->ops->interrupt_handler, 0,
dev_name(&pdev->dev), indio_dev);
if (ret)
diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c
index b4a46eb45789..46d5d48b58b6 100644
--- a/drivers/iio/chemical/ccs811.c
+++ b/drivers/iio/chemical/ccs811.c
@@ -78,6 +78,11 @@ struct ccs811_data {
struct ccs811_reading buffer;
struct iio_trigger *drdy_trig;
bool drdy_trig_on;
+ /* Ensures correct alignment of timestamp if present */
+ struct {
+ s16 channels[2];
+ s64 ts __aligned(8);
+ } scan;
};
static const struct iio_chan_spec ccs811_channels[] = {
@@ -309,17 +314,17 @@ static irqreturn_t ccs811_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct ccs811_data *data = iio_priv(indio_dev);
struct i2c_client *client = data->client;
- s16 buf[8]; /* s16 eCO2 + s16 TVOC + padding + 8 byte timestamp */
int ret;
- ret = i2c_smbus_read_i2c_block_data(client, CCS811_ALG_RESULT_DATA, 4,
- (u8 *)&buf);
+ ret = i2c_smbus_read_i2c_block_data(client, CCS811_ALG_RESULT_DATA,
+ sizeof(data->scan.channels),
+ (u8 *)data->scan.channels);
if (ret != 4) {
dev_err(&client->dev, "cannot read sensor data\n");
goto err;
}
- iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
iio_get_time_ns(indio_dev));
err:
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 26fbd1bd9413..09279e40c55c 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -93,7 +93,7 @@ int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr)
struct st_sensor_odr_avl odr_out = {0, 0};
struct st_sensor_data *sdata = iio_priv(indio_dev);
- if (!sdata->sensor_settings->odr.addr)
+ if (!sdata->sensor_settings->odr.mask)
return 0;
err = st_sensors_match_odr(sdata->sensor_settings, odr, &odr_out);
diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
index d9037ea59168..97b9ce305a68 100644
--- a/drivers/iio/dac/ad5504.c
+++ b/drivers/iio/dac/ad5504.c
@@ -189,9 +189,9 @@ static ssize_t ad5504_write_dac_powerdown(struct iio_dev *indio_dev,
return ret;
if (pwr_down)
- st->pwr_down_mask |= (1 << chan->channel);
- else
st->pwr_down_mask &= ~(1 << chan->channel);
+ else
+ st->pwr_down_mask |= (1 << chan->channel);
ret = ad5504_spi_write(st, AD5504_ADDR_CTRL,
AD5504_DAC_PWRDWN_MODE(st->pwr_down_mode) |
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
index 095530c233e4..7549abd544c0 100644
--- a/drivers/iio/dac/ad5592r-base.c
+++ b/drivers/iio/dac/ad5592r-base.c
@@ -417,7 +417,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
s64 tmp = *val * (3767897513LL / 25LL);
*val = div_s64_rem(tmp, 1000000000LL, val2);
- ret = IIO_VAL_INT_PLUS_MICRO;
+ return IIO_VAL_INT_PLUS_MICRO;
} else {
int mult;
@@ -448,7 +448,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
ret = IIO_VAL_INT;
break;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
unlock:
diff --git a/drivers/iio/dac/vf610_dac.c b/drivers/iio/dac/vf610_dac.c
index 5dccdd16cab3..9cf2e2933b66 100644
--- a/drivers/iio/dac/vf610_dac.c
+++ b/drivers/iio/dac/vf610_dac.c
@@ -234,6 +234,7 @@ static int vf610_dac_probe(struct platform_device *pdev)
return 0;
error_iio_device_register:
+ vf610_dac_exit(info);
clk_disable_unprepare(info->clk);
return ret;
diff --git a/drivers/iio/gyro/itg3200_buffer.c b/drivers/iio/gyro/itg3200_buffer.c
index 59770e5b6660..b080362a8766 100644
--- a/drivers/iio/gyro/itg3200_buffer.c
+++ b/drivers/iio/gyro/itg3200_buffer.c
@@ -49,13 +49,20 @@ static irqreturn_t itg3200_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct itg3200 *st = iio_priv(indio_dev);
- __be16 buf[ITG3200_SCAN_ELEMENTS + sizeof(s64)/sizeof(u16)];
-
- int ret = itg3200_read_all_channels(st->i2c, buf);
+ /*
+ * Ensure correct alignment and padding including for the
+ * timestamp that may be inserted.
+ */
+ struct {
+ __be16 buf[ITG3200_SCAN_ELEMENTS];
+ s64 ts __aligned(8);
+ } scan;
+
+ int ret = itg3200_read_all_channels(st->i2c, scan.buf);
if (ret < 0)
goto error_ret;
- iio_push_to_buffers_with_timestamp(indio_dev, buf, pf->timestamp);
+ iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp);
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
index 5ddebede31a6..e6fdd0f4b4bc 100644
--- a/drivers/iio/gyro/mpu3050-core.c
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -270,7 +270,16 @@ static int mpu3050_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_OFFSET:
switch (chan->type) {
case IIO_TEMP:
- /* The temperature scaling is (x+23000)/280 Celsius */
+ /*
+ * The temperature scaling is (x+23000)/280 Celsius
+ * for the "best fit straight line" temperature range
+ * of -30C..85C. The 23000 includes room temperature
+ * offset of +35C, 280 is the precision scale and x is
+ * the 16-bit signed integer reported by hardware.
+ *
+ * Temperature value itself represents temperature of
+ * the sensor die.
+ */
*val = 23000;
return IIO_VAL_INT;
default:
@@ -327,7 +336,7 @@ static int mpu3050_read_raw(struct iio_dev *indio_dev,
goto out_read_raw_unlock;
}
- *val = be16_to_cpu(raw_val);
+ *val = (s16)be16_to_cpu(raw_val);
ret = IIO_VAL_INT;
goto out_read_raw_unlock;
@@ -549,6 +558,8 @@ static irqreturn_t mpu3050_trigger_handler(int irq, void *p)
MPU3050_FIFO_R,
&fifo_values[offset],
toread);
+ if (ret)
+ goto out_trigger_unlock;
dev_dbg(mpu3050->dev,
"%04x %04x %04x %04x %04x\n",
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index a739fff01c6b..63210a3b1b87 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -71,6 +71,7 @@ static const struct reg_field afe4403_reg_fields[] = {
* @regulator: Pointer to the regulator for the IC
* @trig: IIO trigger for this device
* @irq: ADC_RDY line interrupt number
+ * @buffer: Used to construct data layout to push into IIO buffer.
*/
struct afe4403_data {
struct device *dev;
@@ -80,6 +81,8 @@ struct afe4403_data {
struct regulator *regulator;
struct iio_trigger *trig;
int irq;
+ /* Ensure suitable alignment for timestamp */
+ s32 buffer[8] __aligned(8);
};
enum afe4403_chan_id {
@@ -317,7 +320,6 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private)
struct iio_dev *indio_dev = pf->indio_dev;
struct afe4403_data *afe = iio_priv(indio_dev);
int ret, bit, i = 0;
- s32 buffer[8];
u8 tx[4] = {AFE440X_CONTROL0, 0x0, 0x0, AFE440X_CONTROL0_READ};
u8 rx[3];
@@ -334,9 +336,9 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private)
if (ret)
goto err;
- buffer[i++] = (rx[0] << 16) |
- (rx[1] << 8) |
- (rx[2]);
+ afe->buffer[i++] = (rx[0] << 16) |
+ (rx[1] << 8) |
+ (rx[2]);
}
/* Disable reading from the device */
@@ -345,7 +347,8 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private)
if (ret)
goto err;
- iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp);
+ iio_push_to_buffers_with_timestamp(indio_dev, afe->buffer,
+ pf->timestamp);
err:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
index 11910922e655..23e1ac6501a1 100644
--- a/drivers/iio/health/afe4404.c
+++ b/drivers/iio/health/afe4404.c
@@ -91,6 +91,7 @@ static const struct reg_field afe4404_reg_fields[] = {
* @regulator: Pointer to the regulator for the IC
* @trig: IIO trigger for this device
* @irq: ADC_RDY line interrupt number
+ * @buffer: Used to construct a scan to push to the iio buffer.
*/
struct afe4404_data {
struct device *dev;
@@ -99,6 +100,7 @@ struct afe4404_data {
struct regulator *regulator;
struct iio_trigger *trig;
int irq;
+ s32 buffer[10] __aligned(8);
};
enum afe4404_chan_id {
@@ -336,17 +338,17 @@ static irqreturn_t afe4404_trigger_handler(int irq, void *private)
struct iio_dev *indio_dev = pf->indio_dev;
struct afe4404_data *afe = iio_priv(indio_dev);
int ret, bit, i = 0;
- s32 buffer[10];
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength) {
ret = regmap_read(afe->regmap, afe4404_channel_values[bit],
- &buffer[i++]);
+ &afe->buffer[i++]);
if (ret)
goto err;
}
- iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp);
+ iio_push_to_buffers_with_timestamp(indio_dev, afe->buffer,
+ pf->timestamp);
err:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index ff6666ac5d68..0fcaa2c0b2f4 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -38,6 +38,11 @@ struct hdc100x_data {
/* integration time of the sensor */
int adc_int_us[2];
+ /* Ensure natural alignment of timestamp */
+ struct {
+ __be16 channels[2];
+ s64 ts __aligned(8);
+ } scan;
};
/* integration time in us */
@@ -319,7 +324,6 @@ static irqreturn_t hdc100x_trigger_handler(int irq, void *p)
struct i2c_client *client = data->client;
int delay = data->adc_int_us[0] + data->adc_int_us[1];
int ret;
- s16 buf[8]; /* 2x s16 + padding + 8 byte timestamp */
/* dual read starts at temp register */
mutex_lock(&data->lock);
@@ -330,13 +334,13 @@ static irqreturn_t hdc100x_trigger_handler(int irq, void *p)
}
usleep_range(delay, delay + 1000);
- ret = i2c_master_recv(client, (u8 *)buf, 4);
+ ret = i2c_master_recv(client, (u8 *)data->scan.channels, 4);
if (ret < 0) {
dev_err(&client->dev, "cannot read sensor data\n");
goto err;
}
- iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
iio_get_time_ns(indio_dev));
err:
mutex_unlock(&data->lock);
diff --git a/drivers/iio/humidity/hid-sensor-humidity.c b/drivers/iio/humidity/hid-sensor-humidity.c
index 4bc95f31c730..d8c30d94d742 100644
--- a/drivers/iio/humidity/hid-sensor-humidity.c
+++ b/drivers/iio/humidity/hid-sensor-humidity.c
@@ -28,7 +28,10 @@
struct hid_humidity_state {
struct hid_sensor_common common_attributes;
struct hid_sensor_hub_attribute_info humidity_attr;
- s32 humidity_data;
+ struct {
+ s32 humidity_data;
+ u64 timestamp __aligned(8);
+ } scan;
int scale_pre_decml;
int scale_post_decml;
int scale_precision;
@@ -138,9 +141,8 @@ static int humidity_proc_event(struct hid_sensor_hub_device *hsdev,
struct hid_humidity_state *humid_st = iio_priv(indio_dev);
if (atomic_read(&humid_st->common_attributes.data_ready))
- iio_push_to_buffers_with_timestamp(indio_dev,
- &humid_st->humidity_data,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_timestamp(indio_dev, &humid_st->scan,
+ iio_get_time_ns(indio_dev));
return 0;
}
@@ -155,7 +157,7 @@ static int humidity_capture_sample(struct hid_sensor_hub_device *hsdev,
switch (usage_id) {
case HID_USAGE_SENSOR_ATMOSPHERIC_HUMIDITY:
- humid_st->humidity_data = *(s32 *)raw_data;
+ humid_st->scan.humidity_data = *(s32 *)raw_data;
return 0;
default:
diff --git a/drivers/iio/humidity/hts221.h b/drivers/iio/humidity/hts221.h
index e41a3d83e95d..838c05056075 100644
--- a/drivers/iio/humidity/hts221.h
+++ b/drivers/iio/humidity/hts221.h
@@ -15,8 +15,6 @@
#include <linux/iio/iio.h>
-#define HTS221_DATA_SIZE 2
-
enum hts221_sensor_type {
HTS221_SENSOR_H,
HTS221_SENSOR_T,
@@ -40,6 +38,11 @@ struct hts221_hw {
bool enabled;
u8 odr;
+ /* Ensure natural alignment of timestamp */
+ struct {
+ __le16 channels[2];
+ s64 ts __aligned(8);
+ } scan;
};
extern const struct dev_pm_ops hts221_pm_ops;
diff --git a/drivers/iio/humidity/hts221_buffer.c b/drivers/iio/humidity/hts221_buffer.c
index 1a94b0b91721..910a5d4eae23 100644
--- a/drivers/iio/humidity/hts221_buffer.c
+++ b/drivers/iio/humidity/hts221_buffer.c
@@ -163,7 +163,6 @@ static const struct iio_buffer_setup_ops hts221_buffer_ops = {
static irqreturn_t hts221_buffer_handler_thread(int irq, void *p)
{
- u8 buffer[ALIGN(2 * HTS221_DATA_SIZE, sizeof(s64)) + sizeof(s64)];
struct iio_poll_func *pf = p;
struct iio_dev *iio_dev = pf->indio_dev;
struct hts221_hw *hw = iio_priv(iio_dev);
@@ -173,18 +172,20 @@ static irqreturn_t hts221_buffer_handler_thread(int irq, void *p)
/* humidity data */
ch = &iio_dev->channels[HTS221_SENSOR_H];
err = regmap_bulk_read(hw->regmap, ch->address,
- buffer, HTS221_DATA_SIZE);
+ &hw->scan.channels[0],
+ sizeof(hw->scan.channels[0]));
if (err < 0)
goto out;
/* temperature data */
ch = &iio_dev->channels[HTS221_SENSOR_T];
err = regmap_bulk_read(hw->regmap, ch->address,
- buffer + HTS221_DATA_SIZE, HTS221_DATA_SIZE);
+ &hw->scan.channels[1],
+ sizeof(hw->scan.channels[1]));
if (err < 0)
goto out;
- iio_push_to_buffers_with_timestamp(iio_dev, buffer,
+ iio_push_to_buffers_with_timestamp(iio_dev, &hw->scan,
iio_get_time_ns(iio_dev));
out:
diff --git a/drivers/iio/imu/adis16400_buffer.c b/drivers/iio/imu/adis16400_buffer.c
index e70a5339acb1..3fc11aec98b9 100644
--- a/drivers/iio/imu/adis16400_buffer.c
+++ b/drivers/iio/imu/adis16400_buffer.c
@@ -38,8 +38,11 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev,
return -ENOMEM;
adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL);
- if (!adis->buffer)
+ if (!adis->buffer) {
+ kfree(adis->xfer);
+ adis->xfer = NULL;
return -ENOMEM;
+ }
tx = adis->buffer + burst_length;
tx[0] = ADIS_READ_REG(ADIS16400_GLOB_CMD);
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index 46a569005a13..ec0f50ec813e 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -288,8 +288,7 @@ static int adis16400_initial_setup(struct iio_dev *indio_dev)
if (ret)
goto err_ret;
- ret = sscanf(indio_dev->name, "adis%u\n", &device_id);
- if (ret != 1) {
+ if (sscanf(indio_dev->name, "adis%u\n", &device_id) != 1) {
ret = -EINVAL;
goto err_ret;
}
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
index c85659ca9507..1e413bb233ae 100644
--- a/drivers/iio/imu/bmi160/bmi160_core.c
+++ b/drivers/iio/imu/bmi160/bmi160_core.c
@@ -110,6 +110,13 @@ enum bmi160_sensor_type {
struct bmi160_data {
struct regmap *regmap;
+ /*
+ * Ensure natural alignment for timestamp if present.
+ * Max length needed: 2 * 3 channels + 4 bytes padding + 8 byte ts.
+ * If fewer channels are enabled, less space may be needed, as
+ * long as the timestamp is still aligned to 8 bytes.
+ */
+ __le16 buf[12] __aligned(8);
};
const struct regmap_config bmi160_regmap_config = {
@@ -385,8 +392,6 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct bmi160_data *data = iio_priv(indio_dev);
- __le16 buf[16];
- /* 3 sens x 3 axis x __le16 + 3 x __le16 pad + 4 x __le16 tstamp */
int i, ret, j = 0, base = BMI160_REG_DATA_MAGN_XOUT_L;
__le16 sample;
@@ -396,10 +401,10 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p)
&sample, sizeof(sample));
if (ret < 0)
goto done;
- buf[j++] = sample;
+ data->buf[j++] = sample;
}
- iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buf,
iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index 631360b14ca7..4d89de0be58b 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -475,13 +475,29 @@ static irqreturn_t st_lsm6dsx_handler_irq(int irq, void *private)
static irqreturn_t st_lsm6dsx_handler_thread(int irq, void *private)
{
struct st_lsm6dsx_hw *hw = private;
- int count;
+ int fifo_len = 0, len;
- mutex_lock(&hw->fifo_lock);
- count = st_lsm6dsx_read_fifo(hw);
- mutex_unlock(&hw->fifo_lock);
+ /*
+ * If we are using edge IRQs, new samples can arrive while
+ * processing current interrupt since there are no hw
+ * guarantees the irq line stays "low" long enough to properly
+ * detect the new interrupt. In this case the new sample will
+ * be missed.
+ * Polling FIFO status register allow us to read new
+ * samples even if the interrupt arrives while processing
+ * previous data and the timeslot where the line is "low" is
+ * too short to be properly detected.
+ */
+ do {
+ mutex_lock(&hw->fifo_lock);
+ len = st_lsm6dsx_read_fifo(hw);
+ mutex_unlock(&hw->fifo_lock);
+
+ if (len > 0)
+ fifo_len += len;
+ } while (len > 0);
- return !count ? IRQ_NONE : IRQ_HANDLED;
+ return fifo_len ? IRQ_HANDLED : IRQ_NONE;
}
static int st_lsm6dsx_buffer_preenable(struct iio_dev *iio_dev)
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index a0d089afa1a2..61b9e1bbcc31 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -850,12 +850,12 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
indio_dev->masklength,
in_ind + 1);
while (in_ind != out_ind) {
- in_ind = find_next_bit(indio_dev->active_scan_mask,
- indio_dev->masklength,
- in_ind + 1);
length = iio_storage_bytes_for_si(indio_dev, in_ind);
/* Make sure we are aligned */
in_loc = roundup(in_loc, length) + length;
+ in_ind = find_next_bit(indio_dev->active_scan_mask,
+ indio_dev->masklength,
+ in_ind + 1);
}
length = iio_storage_bytes_for_si(indio_dev, in_ind);
out_loc = roundup(out_loc, length);
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
index cf5a0c242609..3c25d6bb3764 100644
--- a/drivers/iio/light/hid-sensor-prox.c
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -37,6 +37,9 @@ struct prox_state {
struct hid_sensor_common common_attributes;
struct hid_sensor_hub_attribute_info prox_attr;
u32 human_presence;
+ int scale_pre_decml;
+ int scale_post_decml;
+ int scale_precision;
};
/* Channel definitions */
@@ -107,8 +110,9 @@ static int prox_read_raw(struct iio_dev *indio_dev,
ret_type = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SCALE:
- *val = prox_state->prox_attr.units;
- ret_type = IIO_VAL_INT;
+ *val = prox_state->scale_pre_decml;
+ *val2 = prox_state->scale_post_decml;
+ ret_type = prox_state->scale_precision;
break;
case IIO_CHAN_INFO_OFFSET:
*val = hid_sensor_convert_exponent(
@@ -248,6 +252,11 @@ static int prox_parse_report(struct platform_device *pdev,
HID_USAGE_SENSOR_HUMAN_PRESENCE,
&st->common_attributes.sensitivity);
+ st->scale_precision = hid_sensor_format_scale(
+ hsdev->usage,
+ &st->prox_attr,
+ &st->scale_pre_decml, &st->scale_post_decml);
+
return ret;
}
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 830a2d45aa4d..947f17588024 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -1245,13 +1245,16 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct ltr501_data *data = iio_priv(indio_dev);
- u16 buf[8];
+ struct {
+ u16 channels[3];
+ s64 ts __aligned(8);
+ } scan;
__le16 als_buf[2];
u8 mask = 0;
int j = 0;
int ret, psdata;
- memset(buf, 0, sizeof(buf));
+ memset(&scan, 0, sizeof(scan));
/* figure out which data needs to be ready */
if (test_bit(0, indio_dev->active_scan_mask) ||
@@ -1270,9 +1273,9 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
if (ret < 0)
return ret;
if (test_bit(0, indio_dev->active_scan_mask))
- buf[j++] = le16_to_cpu(als_buf[1]);
+ scan.channels[j++] = le16_to_cpu(als_buf[1]);
if (test_bit(1, indio_dev->active_scan_mask))
- buf[j++] = le16_to_cpu(als_buf[0]);
+ scan.channels[j++] = le16_to_cpu(als_buf[0]);
}
if (mask & LTR501_STATUS_PS_RDY) {
@@ -1280,10 +1283,10 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
&psdata, 2);
if (ret < 0)
goto done;
- buf[j++] = psdata & LTR501_PS_DATA_MASK;
+ scan.channels[j++] = psdata & LTR501_PS_DATA_MASK;
}
- iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_push_to_buffers_with_timestamp(indio_dev, &scan,
iio_get_time_ns(indio_dev));
done:
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
index bcdb0eb9e537..7d2b3d065726 100644
--- a/drivers/iio/light/max44000.c
+++ b/drivers/iio/light/max44000.c
@@ -78,6 +78,11 @@
struct max44000_data {
struct mutex lock;
struct regmap *regmap;
+ /* Ensure naturally aligned timestamp */
+ struct {
+ u16 channels[2];
+ s64 ts __aligned(8);
+ } scan;
};
/* Default scale is set to the minimum of 0.03125 or 1 / (1 << 5) lux */
@@ -491,7 +496,6 @@ static irqreturn_t max44000_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct max44000_data *data = iio_priv(indio_dev);
- u16 buf[8]; /* 2x u16 + padding + 8 bytes timestamp */
int index = 0;
unsigned int regval;
int ret;
@@ -501,17 +505,17 @@ static irqreturn_t max44000_trigger_handler(int irq, void *p)
ret = max44000_read_alsval(data);
if (ret < 0)
goto out_unlock;
- buf[index++] = ret;
+ data->scan.channels[index++] = ret;
}
if (test_bit(MAX44000_SCAN_INDEX_PRX, indio_dev->active_scan_mask)) {
ret = regmap_read(data->regmap, MAX44000_REG_PRX_DATA, &regval);
if (ret < 0)
goto out_unlock;
- buf[index] = regval;
+ data->scan.channels[index] = regval;
}
mutex_unlock(&data->lock);
- iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
iio_get_time_ns(indio_dev));
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c
index ffe9ce798ea2..d61c0645244e 100644
--- a/drivers/iio/light/rpr0521.c
+++ b/drivers/iio/light/rpr0521.c
@@ -197,6 +197,17 @@ struct rpr0521_data {
bool pxs_need_dis;
struct regmap *regmap;
+
+ /*
+ * Ensure correct naturally aligned timestamp.
+ * Note that the read will put garbage data into
+ * the padding but this should not be a problem
+ */
+ struct {
+ __le16 channels[3];
+ u8 garbage;
+ s64 ts __aligned(8);
+ } scan;
};
static IIO_CONST_ATTR(in_intensity_scale_available, RPR0521_ALS_SCALE_AVAIL);
@@ -452,8 +463,6 @@ static irqreturn_t rpr0521_trigger_consumer_handler(int irq, void *p)
struct rpr0521_data *data = iio_priv(indio_dev);
int err;
- u8 buffer[16]; /* 3 16-bit channels + padding + ts */
-
/* Use irq timestamp when reasonable. */
if (iio_trigger_using_own(indio_dev) && data->irq_timestamp) {
pf->timestamp = data->irq_timestamp;
@@ -464,11 +473,11 @@ static irqreturn_t rpr0521_trigger_consumer_handler(int irq, void *p)
pf->timestamp = iio_get_time_ns(indio_dev);
err = regmap_bulk_read(data->regmap, RPR0521_REG_PXS_DATA,
- &buffer,
+ data->scan.channels,
(3 * 2) + 1); /* 3 * 16-bit + (discarded) int clear reg. */
if (!err)
iio_push_to_buffers_with_timestamp(indio_dev,
- buffer, pf->timestamp);
+ &data->scan, pf->timestamp);
else
dev_err(&data->client->dev,
"Trigger consumer can't read from sensor.\n");
diff --git a/drivers/iio/light/si1133.c b/drivers/iio/light/si1133.c
index 015a21f0c2ef..9174ab928880 100644
--- a/drivers/iio/light/si1133.c
+++ b/drivers/iio/light/si1133.c
@@ -102,6 +102,9 @@
#define SI1133_INPUT_FRACTION_LOW 15
#define SI1133_LUX_OUTPUT_FRACTION 12
#define SI1133_LUX_BUFFER_SIZE 9
+#define SI1133_MEASURE_BUFFER_SIZE 3
+
+#define SI1133_SIGN_BIT_INDEX 23
static const int si1133_scale_available[] = {
1, 2, 4, 8, 16, 32, 64, 128};
@@ -234,13 +237,13 @@ static const struct si1133_lux_coeff lux_coeff = {
}
};
-static int si1133_calculate_polynomial_inner(u32 input, u8 fraction, u16 mag,
+static int si1133_calculate_polynomial_inner(s32 input, u8 fraction, u16 mag,
s8 shift)
{
return ((input << fraction) / mag) << shift;
}
-static int si1133_calculate_output(u32 x, u32 y, u8 x_order, u8 y_order,
+static int si1133_calculate_output(s32 x, s32 y, u8 x_order, u8 y_order,
u8 input_fraction, s8 sign,
const struct si1133_coeff *coeffs)
{
@@ -276,7 +279,7 @@ static int si1133_calculate_output(u32 x, u32 y, u8 x_order, u8 y_order,
* The algorithm is from:
* https://siliconlabs.github.io/Gecko_SDK_Doc/efm32zg/html/si1133_8c_source.html#l00716
*/
-static int si1133_calc_polynomial(u32 x, u32 y, u8 input_fraction, u8 num_coeff,
+static int si1133_calc_polynomial(s32 x, s32 y, u8 input_fraction, u8 num_coeff,
const struct si1133_coeff *coeffs)
{
u8 x_order, y_order;
@@ -614,7 +617,7 @@ static int si1133_measure(struct si1133_data *data,
{
int err;
- __be16 resp;
+ u8 buffer[SI1133_MEASURE_BUFFER_SIZE];
err = si1133_set_adcmux(data, 0, chan->channel);
if (err)
@@ -625,12 +628,13 @@ static int si1133_measure(struct si1133_data *data,
if (err)
return err;
- err = si1133_bulk_read(data, SI1133_REG_HOSTOUT(0), sizeof(resp),
- (u8 *)&resp);
+ err = si1133_bulk_read(data, SI1133_REG_HOSTOUT(0), sizeof(buffer),
+ buffer);
if (err)
return err;
- *val = be16_to_cpu(resp);
+ *val = sign_extend32((buffer[0] << 16) | (buffer[1] << 8) | buffer[2],
+ SI1133_SIGN_BIT_INDEX);
return err;
}
@@ -704,9 +708,9 @@ static int si1133_get_lux(struct si1133_data *data, int *val)
{
int err;
int lux;
- u32 high_vis;
- u32 low_vis;
- u32 ir;
+ s32 high_vis;
+ s32 low_vis;
+ s32 ir;
u8 buffer[SI1133_LUX_BUFFER_SIZE];
/* Activate lux channels */
@@ -719,9 +723,16 @@ static int si1133_get_lux(struct si1133_data *data, int *val)
if (err)
return err;
- high_vis = (buffer[0] << 16) | (buffer[1] << 8) | buffer[2];
- low_vis = (buffer[3] << 16) | (buffer[4] << 8) | buffer[5];
- ir = (buffer[6] << 16) | (buffer[7] << 8) | buffer[8];
+ high_vis =
+ sign_extend32((buffer[0] << 16) | (buffer[1] << 8) | buffer[2],
+ SI1133_SIGN_BIT_INDEX);
+
+ low_vis =
+ sign_extend32((buffer[3] << 16) | (buffer[4] << 8) | buffer[5],
+ SI1133_SIGN_BIT_INDEX);
+
+ ir = sign_extend32((buffer[6] << 16) | (buffer[7] << 8) | buffer[8],
+ SI1133_SIGN_BIT_INDEX);
if (high_vis > SI1133_ADC_THRESHOLD || ir > SI1133_ADC_THRESHOLD)
lux = si1133_calc_polynomial(high_vis, ir,
diff --git a/drivers/iio/light/si1145.c b/drivers/iio/light/si1145.c
index 76f16f9c7616..31f78fd7f915 100644
--- a/drivers/iio/light/si1145.c
+++ b/drivers/iio/light/si1145.c
@@ -172,6 +172,7 @@ struct si1145_part_info {
* @part_info: Part information
* @trig: Pointer to iio trigger
* @meas_rate: Value of MEAS_RATE register. Only set in HW in auto mode
+ * @buffer: Used to pack data read from sensor.
*/
struct si1145_data {
struct i2c_client *client;
@@ -183,6 +184,14 @@ struct si1145_data {
bool autonomous;
struct iio_trigger *trig;
int meas_rate;
+ /*
+ * Ensure timestamp will be naturally aligned if present.
+ * Maximum buffer size (may be only partly used if not all
+ * channels are enabled):
+ * 6*2 bytes channels data + 4 bytes alignment +
+ * 8 bytes timestamp
+ */
+ u8 buffer[24] __aligned(8);
};
/**
@@ -444,12 +453,6 @@ static irqreturn_t si1145_trigger_handler(int irq, void *private)
struct iio_poll_func *pf = private;
struct iio_dev *indio_dev = pf->indio_dev;
struct si1145_data *data = iio_priv(indio_dev);
- /*
- * Maximum buffer size:
- * 6*2 bytes channels data + 4 bytes alignment +
- * 8 bytes timestamp
- */
- u8 buffer[24];
int i, j = 0;
int ret;
u8 irq_status = 0;
@@ -482,7 +485,7 @@ static irqreturn_t si1145_trigger_handler(int irq, void *private)
ret = i2c_smbus_read_i2c_block_data_or_emulated(
data->client, indio_dev->channels[i].address,
- sizeof(u16) * run, &buffer[j]);
+ sizeof(u16) * run, &data->buffer[j]);
if (ret < 0)
goto done;
j += run * sizeof(u16);
@@ -497,7 +500,7 @@ static irqreturn_t si1145_trigger_handler(int irq, void *private)
goto done;
}
- iio_push_to_buffers_with_timestamp(indio_dev, buffer,
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
iio_get_time_ns(indio_dev));
done:
diff --git a/drivers/iio/light/st_uvis25.h b/drivers/iio/light/st_uvis25.h
index 5e970ab480cd..f9bb7a5755dd 100644
--- a/drivers/iio/light/st_uvis25.h
+++ b/drivers/iio/light/st_uvis25.h
@@ -28,6 +28,11 @@ struct st_uvis25_hw {
struct iio_trigger *trig;
bool enabled;
int irq;
+ /* Ensure timestamp is naturally aligned */
+ struct {
+ u8 chan;
+ s64 ts __aligned(8);
+ } scan;
};
extern const struct dev_pm_ops st_uvis25_pm_ops;
diff --git a/drivers/iio/light/st_uvis25_core.c b/drivers/iio/light/st_uvis25_core.c
index 302635836e6b..815211e024a8 100644
--- a/drivers/iio/light/st_uvis25_core.c
+++ b/drivers/iio/light/st_uvis25_core.c
@@ -235,17 +235,19 @@ static const struct iio_buffer_setup_ops st_uvis25_buffer_ops = {
static irqreturn_t st_uvis25_buffer_handler_thread(int irq, void *p)
{
- u8 buffer[ALIGN(sizeof(u8), sizeof(s64)) + sizeof(s64)];
struct iio_poll_func *pf = p;
struct iio_dev *iio_dev = pf->indio_dev;
struct st_uvis25_hw *hw = iio_priv(iio_dev);
+ unsigned int val;
int err;
- err = regmap_read(hw->regmap, ST_UVIS25_REG_OUT_ADDR, (int *)buffer);
+ err = regmap_read(hw->regmap, ST_UVIS25_REG_OUT_ADDR, &val);
if (err < 0)
goto out;
- iio_push_to_buffers_with_timestamp(iio_dev, buffer,
+ hw->scan.chan = val;
+
+ iio_push_to_buffers_with_timestamp(iio_dev, &hw->scan,
iio_get_time_ns(iio_dev));
out:
diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
index 4b5d9988f025..23295fec5be5 100644
--- a/drivers/iio/light/tsl2583.c
+++ b/drivers/iio/light/tsl2583.c
@@ -350,6 +350,14 @@ static int tsl2583_als_calibrate(struct iio_dev *indio_dev)
return lux_val;
}
+ /* Avoid division by zero of lux_value later on */
+ if (lux_val == 0) {
+ dev_err(&chip->client->dev,
+ "%s: lux_val of 0 will produce out of range trim_value\n",
+ __func__);
+ return -ENODATA;
+ }
+
gain_trim_val = (unsigned int)(((chip->als_settings.als_cal_target)
* chip->als_settings.als_gain_trim) / lux_val);
if ((gain_trim_val < 250) || (gain_trim_val > 4000)) {
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index d3d65ecb30a5..a3fdffa4ef84 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -166,7 +166,6 @@ static int vcnl4000_measure(struct vcnl4000_data *data, u8 req_mask,
u8 rdy_mask, u8 data_reg, int *val)
{
int tries = 20;
- __be16 buf;
int ret;
mutex_lock(&data->vcnl4000_lock);
@@ -193,13 +192,12 @@ static int vcnl4000_measure(struct vcnl4000_data *data, u8 req_mask,
goto fail;
}
- ret = i2c_smbus_read_i2c_block_data(data->client,
- data_reg, sizeof(buf), (u8 *) &buf);
+ ret = i2c_smbus_read_word_swapped(data->client, data_reg);
if (ret < 0)
goto fail;
mutex_unlock(&data->vcnl4000_lock);
- *val = be16_to_cpu(buf);
+ *val = ret;
return 0;
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index 806a3185b66d..d3f09405c00b 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -184,6 +184,11 @@ struct ak8974 {
bool drdy_irq;
struct completion drdy_complete;
bool drdy_active_low;
+ /* Ensure timestamp is naturally aligned */
+ struct {
+ __le16 channels[3];
+ s64 ts __aligned(8);
+ } scan;
};
static const char ak8974_reg_avdd[] = "avdd";
@@ -580,7 +585,6 @@ static void ak8974_fill_buffer(struct iio_dev *indio_dev)
{
struct ak8974 *ak8974 = iio_priv(indio_dev);
int ret;
- __le16 hw_values[8]; /* Three axes + 64bit padding */
pm_runtime_get_sync(&ak8974->i2c->dev);
mutex_lock(&ak8974->lock);
@@ -590,13 +594,13 @@ static void ak8974_fill_buffer(struct iio_dev *indio_dev)
dev_err(&ak8974->i2c->dev, "error triggering measure\n");
goto out_unlock;
}
- ret = ak8974_getresult(ak8974, hw_values);
+ ret = ak8974_getresult(ak8974, ak8974->scan.channels);
if (ret) {
dev_err(&ak8974->i2c->dev, "error getting measures\n");
goto out_unlock;
}
- iio_push_to_buffers_with_timestamp(indio_dev, hw_values,
+ iio_push_to_buffers_with_timestamp(indio_dev, &ak8974->scan,
iio_get_time_ns(indio_dev));
out_unlock:
@@ -764,19 +768,21 @@ static int ak8974_probe(struct i2c_client *i2c,
ak8974->map = devm_regmap_init_i2c(i2c, &ak8974_regmap_config);
if (IS_ERR(ak8974->map)) {
dev_err(&i2c->dev, "failed to allocate register map\n");
+ pm_runtime_put_noidle(&i2c->dev);
+ pm_runtime_disable(&i2c->dev);
return PTR_ERR(ak8974->map);
}
ret = ak8974_set_power(ak8974, AK8974_PWR_ON);
if (ret) {
dev_err(&i2c->dev, "could not power on\n");
- goto power_off;
+ goto disable_pm;
}
ret = ak8974_detect(ak8974);
if (ret) {
dev_err(&i2c->dev, "neither AK8974 nor AMI30x found\n");
- goto power_off;
+ goto disable_pm;
}
ret = ak8974_selftest(ak8974);
@@ -786,14 +792,9 @@ static int ak8974_probe(struct i2c_client *i2c,
ret = ak8974_reset(ak8974);
if (ret) {
dev_err(&i2c->dev, "AK8974 reset failed\n");
- goto power_off;
+ goto disable_pm;
}
- pm_runtime_set_autosuspend_delay(&i2c->dev,
- AK8974_AUTOSUSPEND_DELAY);
- pm_runtime_use_autosuspend(&i2c->dev);
- pm_runtime_put(&i2c->dev);
-
indio_dev->dev.parent = &i2c->dev;
indio_dev->channels = ak8974_channels;
indio_dev->num_channels = ARRAY_SIZE(ak8974_channels);
@@ -846,6 +847,11 @@ no_irq:
goto cleanup_buffer;
}
+ pm_runtime_set_autosuspend_delay(&i2c->dev,
+ AK8974_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(&i2c->dev);
+ pm_runtime_put(&i2c->dev);
+
return 0;
cleanup_buffer:
@@ -854,7 +860,6 @@ disable_pm:
pm_runtime_put_noidle(&i2c->dev);
pm_runtime_disable(&i2c->dev);
ak8974_set_power(ak8974, AK8974_PWR_OFF);
-power_off:
regulator_bulk_disable(ARRAY_SIZE(ak8974->regs), ak8974->regs);
return ret;
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 42a827a66512..379aa7f4a804 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -381,6 +381,12 @@ struct ak8975_data {
struct iio_mount_matrix orientation;
struct regulator *vdd;
struct regulator *vid;
+
+ /* Ensure natural alignment of timestamp */
+ struct {
+ s16 channels[3];
+ s64 ts __aligned(8);
+ } scan;
};
/* Enable attached power regulator if any. */
@@ -815,7 +821,6 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev)
const struct i2c_client *client = data->client;
const struct ak_def *def = data->def;
int ret;
- s16 buff[8]; /* 3 x 16 bits axis values + 1 aligned 64 bits timestamp */
__le16 fval[3];
mutex_lock(&data->lock);
@@ -838,12 +843,13 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev)
mutex_unlock(&data->lock);
/* Clamp to valid range. */
- buff[0] = clamp_t(s16, le16_to_cpu(fval[0]), -def->range, def->range);
- buff[1] = clamp_t(s16, le16_to_cpu(fval[1]), -def->range, def->range);
- buff[2] = clamp_t(s16, le16_to_cpu(fval[2]), -def->range, def->range);
+ data->scan.channels[0] = clamp_t(s16, le16_to_cpu(fval[0]), -def->range, def->range);
+ data->scan.channels[1] = clamp_t(s16, le16_to_cpu(fval[1]), -def->range, def->range);
+ data->scan.channels[2] = clamp_t(s16, le16_to_cpu(fval[2]), -def->range, def->range);
- iio_push_to_buffers_with_timestamp(indio_dev, buff,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
iio_get_time_ns(indio_dev));
+
return;
unlock:
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index f063355480ba..57fbe5bfe883 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -56,6 +56,12 @@ struct mag3110_data {
struct mutex lock;
u8 ctrl_reg1;
int sleep_val;
+ /* Ensure natural alignment of timestamp */
+ struct {
+ __be16 channels[3];
+ u8 temperature;
+ s64 ts __aligned(8);
+ } scan;
};
static int mag3110_request(struct mag3110_data *data)
@@ -387,10 +393,9 @@ static irqreturn_t mag3110_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct mag3110_data *data = iio_priv(indio_dev);
- u8 buffer[16]; /* 3 16-bit channels + 1 byte temp + padding + ts */
int ret;
- ret = mag3110_read(data, (__be16 *) buffer);
+ ret = mag3110_read(data, data->scan.channels);
if (ret < 0)
goto done;
@@ -399,10 +404,10 @@ static irqreturn_t mag3110_trigger_handler(int irq, void *p)
MAG3110_DIE_TEMP);
if (ret < 0)
goto done;
- buffer[6] = ret;
+ data->scan.temperature = ret;
}
- iio_push_to_buffers_with_timestamp(indio_dev, buffer,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
iio_get_time_ns(indio_dev));
done:
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index fe87d27779d9..074f6f865008 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -261,6 +261,8 @@ static u32 bmp280_compensate_humidity(struct bmp280_data *data,
+ (s32)2097152) * calib->H2 + 8192) >> 14);
var -= ((((var >> 15) * (var >> 15)) >> 7) * (s32)calib->H1) >> 4;
+ var = clamp_val(var, 0, 419430400);
+
return var >> 12;
};
@@ -703,7 +705,7 @@ static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas)
unsigned int ctrl;
if (data->use_eoc)
- init_completion(&data->done);
+ reinit_completion(&data->done);
ret = regmap_write(data->regmap, BMP280_REG_CTRL_MEAS, ctrl_meas);
if (ret)
@@ -959,6 +961,9 @@ static int bmp085_fetch_eoc_irq(struct device *dev,
"trying to enforce it\n");
irq_trig = IRQF_TRIGGER_RISING;
}
+
+ init_completion(&data->done);
+
ret = devm_request_threaded_irq(dev,
irq,
bmp085_eoc_irq,
diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
index 7537547fb7ee..bd001062dc65 100644
--- a/drivers/iio/pressure/mpl3115.c
+++ b/drivers/iio/pressure/mpl3115.c
@@ -147,7 +147,14 @@ static irqreturn_t mpl3115_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct mpl3115_data *data = iio_priv(indio_dev);
- u8 buffer[16]; /* 32-bit channel + 16-bit channel + padding + ts */
+ /*
+ * 32-bit channel + 16-bit channel + padding + ts
+ * Note that it is possible for only one of the first 2
+ * channels to be enabled. If that happens, the first element
+ * of the buffer may be either 16 or 32-bits. As such we cannot
+ * use a simple structure definition to express this data layout.
+ */
+ u8 buffer[16] __aligned(8);
int ret, pos = 0;
mutex_lock(&data->lock);
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index f950cfde5db9..f4ea886fdde4 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -215,16 +215,21 @@ static irqreturn_t ms5611_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct ms5611_state *st = iio_priv(indio_dev);
- s32 buf[4]; /* s32 (pressure) + s32 (temp) + 2 * s32 (timestamp) */
+ /* Ensure buffer elements are naturally aligned */
+ struct {
+ s32 channels[2];
+ s64 ts __aligned(8);
+ } scan;
int ret;
mutex_lock(&st->lock);
- ret = ms5611_read_temp_and_pressure(indio_dev, &buf[1], &buf[0]);
+ ret = ms5611_read_temp_and_pressure(indio_dev, &scan.channels[1],
+ &scan.channels[0]);
mutex_unlock(&st->lock);
if (ret < 0)
goto err;
- iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_push_to_buffers_with_timestamp(indio_dev, &scan,
iio_get_time_ns(indio_dev));
err:
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index 81d8f24eaeb4..23b4fa81e783 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -672,8 +672,10 @@ static int zpa2326_resume(const struct iio_dev *indio_dev)
int err;
err = pm_runtime_get_sync(indio_dev->dev.parent);
- if (err < 0)
+ if (err < 0) {
+ pm_runtime_put(indio_dev->dev.parent);
return err;
+ }
if (err > 0) {
/*
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index 47af54f14756..67f85268b63d 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -158,6 +158,7 @@ static int lidar_get_measurement(struct lidar_data *data, u16 *reg)
ret = lidar_write_control(data, LIDAR_REG_CONTROL_ACQUIRE);
if (ret < 0) {
dev_err(&client->dev, "cannot send start measurement command");
+ pm_runtime_put_noidle(&client->dev);
return ret;
}
diff --git a/drivers/iio/temperature/hid-sensor-temperature.c b/drivers/iio/temperature/hid-sensor-temperature.c
index b592fc4f007e..d710a1a66817 100644
--- a/drivers/iio/temperature/hid-sensor-temperature.c
+++ b/drivers/iio/temperature/hid-sensor-temperature.c
@@ -28,7 +28,10 @@
struct temperature_state {
struct hid_sensor_common common_attributes;
struct hid_sensor_hub_attribute_info temperature_attr;
- s32 temperature_data;
+ struct {
+ s32 temperature_data;
+ u64 timestamp __aligned(8);
+ } scan;
int scale_pre_decml;
int scale_post_decml;
int scale_precision;
@@ -45,7 +48,7 @@ static const struct iio_chan_spec temperature_channels[] = {
BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_HYSTERESIS),
},
- IIO_CHAN_SOFT_TIMESTAMP(3),
+ IIO_CHAN_SOFT_TIMESTAMP(1),
};
/* Adjust channel real bits based on report descriptor */
@@ -136,9 +139,8 @@ static int temperature_proc_event(struct hid_sensor_hub_device *hsdev,
struct temperature_state *temp_st = iio_priv(indio_dev);
if (atomic_read(&temp_st->common_attributes.data_ready))
- iio_push_to_buffers_with_timestamp(indio_dev,
- &temp_st->temperature_data,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_timestamp(indio_dev, &temp_st->scan,
+ iio_get_time_ns(indio_dev));
return 0;
}
@@ -153,7 +155,7 @@ static int temperature_capture_sample(struct hid_sensor_hub_device *hsdev,
switch (usage_id) {
case HID_USAGE_SENSOR_DATA_ENVIRONMENTAL_TEMPERATURE:
- temp_st->temperature_data = *(s32 *)raw_data;
+ temp_st->scan.temperature_data = *(s32 *)raw_data;
return 0;
default:
return -EINVAL;
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 6e96a2fb97dc..30385ba7c5d9 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -408,16 +408,15 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
struct flowi6 fl6;
struct dst_entry *dst;
struct rt6_info *rt;
- int ret;
memset(&fl6, 0, sizeof fl6);
fl6.daddr = dst_in->sin6_addr;
fl6.saddr = src_in->sin6_addr;
fl6.flowi6_oif = addr->bound_dev_if;
- ret = ipv6_stub->ipv6_dst_lookup(addr->net, NULL, &dst, &fl6);
- if (ret < 0)
- return ret;
+ dst = ipv6_stub->ipv6_dst_lookup_flow(addr->net, NULL, &fl6, NULL);
+ if (IS_ERR(dst))
+ return PTR_ERR(dst);
rt = (struct rt6_info *)dst;
if (ipv6_addr_any(&src_in->sin6_addr)) {
@@ -572,13 +571,12 @@ static void process_one_req(struct work_struct *_work)
req->callback = NULL;
spin_lock_bh(&lock);
+ /*
+ * Although the work will normally have been canceled by the workqueue,
+ * it can still be requeued as long as it is on the req_list.
+ */
+ cancel_delayed_work(&req->work);
if (!list_empty(&req->list)) {
- /*
- * Although the work will normally have been canceled by the
- * workqueue, it can still be requeued as long as it is on the
- * req_list.
- */
- cancel_delayed_work(&req->work);
list_del_init(&req->list);
kfree(req);
}
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 64f206e11d49..9bdb3fd97d26 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1100,14 +1100,22 @@ retest:
break;
}
- spin_lock_irq(&cm.lock);
+ spin_lock_irq(&cm_id_priv->lock);
+ spin_lock(&cm.lock);
+ /* Required for cleanup paths related cm_req_handler() */
+ if (cm_id_priv->timewait_info) {
+ cm_cleanup_timewait(cm_id_priv->timewait_info);
+ kfree(cm_id_priv->timewait_info);
+ cm_id_priv->timewait_info = NULL;
+ }
if (!list_empty(&cm_id_priv->altr_list) &&
(!cm_id_priv->altr_send_port_not_ready))
list_del(&cm_id_priv->altr_list);
if (!list_empty(&cm_id_priv->prim_list) &&
(!cm_id_priv->prim_send_port_not_ready))
list_del(&cm_id_priv->prim_list);
- spin_unlock_irq(&cm.lock);
+ spin_unlock(&cm.lock);
+ spin_unlock_irq(&cm_id_priv->lock);
cm_free_id(cm_id->local_id);
cm_deref_id(cm_id_priv);
@@ -1424,7 +1432,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
/* Verify that we're not in timewait. */
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
- if (cm_id->state != IB_CM_IDLE) {
+ if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
ret = -EINVAL;
goto out;
@@ -1435,6 +1443,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
id.local_id);
if (IS_ERR(cm_id_priv->timewait_info)) {
ret = PTR_ERR(cm_id_priv->timewait_info);
+ cm_id_priv->timewait_info = NULL;
goto out;
}
@@ -1442,12 +1451,12 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
param->ppath_sgid_attr, &cm_id_priv->av,
cm_id_priv);
if (ret)
- goto error1;
+ goto out;
if (param->alternate_path) {
ret = cm_init_av_by_path(param->alternate_path, NULL,
&cm_id_priv->alt_av, cm_id_priv);
if (ret)
- goto error1;
+ goto out;
}
cm_id->service_id = param->service_id;
cm_id->service_mask = ~cpu_to_be64(0);
@@ -1465,7 +1474,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
if (ret)
- goto error1;
+ goto out;
req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
cm_format_req(req_msg, cm_id_priv, param);
@@ -1488,7 +1497,6 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
return 0;
error2: cm_free_msg(cm_id_priv->msg);
-error1: kfree(cm_id_priv->timewait_info);
out: return ret;
}
EXPORT_SYMBOL(ib_send_cm_req);
@@ -1962,6 +1970,7 @@ static int cm_req_handler(struct cm_work *work)
id.local_id);
if (IS_ERR(cm_id_priv->timewait_info)) {
ret = PTR_ERR(cm_id_priv->timewait_info);
+ cm_id_priv->timewait_info = NULL;
goto destroy;
}
cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
@@ -1973,7 +1982,7 @@ static int cm_req_handler(struct cm_work *work)
pr_debug("%s: local_id %d, no listen_cm_id_priv\n", __func__,
be32_to_cpu(cm_id->local_id));
ret = -EINVAL;
- goto free_timeinfo;
+ goto destroy;
}
cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
@@ -2057,8 +2066,6 @@ static int cm_req_handler(struct cm_work *work)
rejected:
atomic_dec(&cm_id_priv->refcount);
cm_deref_id(listen_cm_id_priv);
-free_timeinfo:
- kfree(cm_id_priv->timewait_info);
destroy:
ib_destroy_cm_id(cm_id);
return ret;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 5c03f4701ece..8cdf933310d1 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1507,6 +1507,8 @@ static struct rdma_id_private *cma_find_listener(
{
struct rdma_id_private *id_priv, *id_priv_dev;
+ lockdep_assert_held(&lock);
+
if (!bind_list)
return ERR_PTR(-EINVAL);
@@ -1552,6 +1554,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
}
}
+ mutex_lock(&lock);
/*
* Net namespace might be getting deleted while route lookup,
* cm_id lookup is in progress. Therefore, perform netdevice
@@ -1593,6 +1596,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev);
err:
rcu_read_unlock();
+ mutex_unlock(&lock);
if (IS_ERR(id_priv) && *net_dev) {
dev_put(*net_dev);
*net_dev = NULL;
@@ -1674,19 +1678,30 @@ static void cma_release_port(struct rdma_id_private *id_priv)
mutex_unlock(&lock);
}
-static void cma_leave_roce_mc_group(struct rdma_id_private *id_priv,
- struct cma_multicast *mc)
+static void destroy_mc(struct rdma_id_private *id_priv,
+ struct cma_multicast *mc)
{
- struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
- struct net_device *ndev = NULL;
+ if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num)) {
+ ib_sa_free_multicast(mc->multicast.ib);
+ kfree(mc);
+ return;
+ }
- if (dev_addr->bound_dev_if)
- ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
- if (ndev) {
- cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, false);
- dev_put(ndev);
+ if (rdma_protocol_roce(id_priv->id.device,
+ id_priv->id.port_num)) {
+ struct rdma_dev_addr *dev_addr =
+ &id_priv->id.route.addr.dev_addr;
+ struct net_device *ndev = NULL;
+
+ if (dev_addr->bound_dev_if)
+ ndev = dev_get_by_index(dev_addr->net,
+ dev_addr->bound_dev_if);
+ if (ndev) {
+ cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, false);
+ dev_put(ndev);
+ }
+ kref_put(&mc->mcref, release_mc);
}
- kref_put(&mc->mcref, release_mc);
}
static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
@@ -1694,16 +1709,10 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
struct cma_multicast *mc;
while (!list_empty(&id_priv->mc_list)) {
- mc = container_of(id_priv->mc_list.next,
- struct cma_multicast, list);
+ mc = list_first_entry(&id_priv->mc_list, struct cma_multicast,
+ list);
list_del(&mc->list);
- if (rdma_cap_ib_mcast(id_priv->cma_dev->device,
- id_priv->id.port_num)) {
- ib_sa_free_multicast(mc->multicast.ib);
- kfree(mc);
- } else {
- cma_leave_roce_mc_group(id_priv, mc);
- }
+ destroy_mc(id_priv, mc);
}
}
@@ -2346,6 +2355,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
struct net *net = id_priv->id.route.addr.dev_addr.net;
int ret;
+ lockdep_assert_held(&lock);
+
if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
return;
@@ -3081,6 +3092,8 @@ static void cma_bind_port(struct rdma_bind_list *bind_list,
u64 sid, mask;
__be16 port;
+ lockdep_assert_held(&lock);
+
addr = cma_src_addr(id_priv);
port = htons(bind_list->port);
@@ -3109,6 +3122,8 @@ static int cma_alloc_port(enum rdma_ucm_port_space ps,
struct rdma_bind_list *bind_list;
int ret;
+ lockdep_assert_held(&lock);
+
bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
if (!bind_list)
return -ENOMEM;
@@ -3135,6 +3150,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list,
struct sockaddr *saddr = cma_src_addr(id_priv);
__be16 dport = cma_port(daddr);
+ lockdep_assert_held(&lock);
+
hlist_for_each_entry(cur_id, &bind_list->owners, node) {
struct sockaddr *cur_daddr = cma_dst_addr(cur_id);
struct sockaddr *cur_saddr = cma_src_addr(cur_id);
@@ -3174,6 +3191,8 @@ static int cma_alloc_any_port(enum rdma_ucm_port_space ps,
unsigned int rover;
struct net *net = id_priv->id.route.addr.dev_addr.net;
+ lockdep_assert_held(&lock);
+
inet_get_local_port_range(net, &low, &high);
remaining = (high - low) + 1;
rover = prandom_u32() % remaining + low;
@@ -3221,6 +3240,8 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
struct rdma_id_private *cur_id;
struct sockaddr *addr, *cur_addr;
+ lockdep_assert_held(&lock);
+
addr = cma_src_addr(id_priv);
hlist_for_each_entry(cur_id, &bind_list->owners, node) {
if (id_priv == cur_id)
@@ -3251,6 +3272,8 @@ static int cma_use_port(enum rdma_ucm_port_space ps,
unsigned short snum;
int ret;
+ lockdep_assert_held(&lock);
+
snum = ntohs(cma_port(cma_src_addr(id_priv)));
if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
return -EACCES;
@@ -4002,16 +4025,6 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
else
pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",
status);
- mutex_lock(&id_priv->qp_mutex);
- if (!status && id_priv->id.qp) {
- status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
- be16_to_cpu(multicast->rec.mlid));
- if (status)
- pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to attach QP. status %d\n",
- status);
- }
- mutex_unlock(&id_priv->qp_mutex);
-
event.status = status;
event.param.ud.private_data = mc->context;
if (!status) {
@@ -4265,6 +4278,10 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
struct cma_multicast *mc;
int ret;
+ /* Not supported for kernel QPs */
+ if (WARN_ON(id->qp))
+ return -EINVAL;
+
if (!id->device)
return -EINVAL;
@@ -4315,25 +4332,14 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
id_priv = container_of(id, struct rdma_id_private, id);
spin_lock_irq(&id_priv->lock);
list_for_each_entry(mc, &id_priv->mc_list, list) {
- if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) {
- list_del(&mc->list);
- spin_unlock_irq(&id_priv->lock);
-
- if (id->qp)
- ib_detach_mcast(id->qp,
- &mc->multicast.ib->rec.mgid,
- be16_to_cpu(mc->multicast.ib->rec.mlid));
-
- BUG_ON(id_priv->cma_dev->device != id->device);
-
- if (rdma_cap_ib_mcast(id->device, id->port_num)) {
- ib_sa_free_multicast(mc->multicast.ib);
- kfree(mc);
- } else if (rdma_protocol_roce(id->device, id->port_num)) {
- cma_leave_roce_mc_group(id_priv, mc);
- }
- return;
- }
+ if (memcmp(&mc->addr, addr, rdma_addr_size(addr)) != 0)
+ continue;
+ list_del(&mc->list);
+ spin_unlock_irq(&id_priv->lock);
+
+ WARN_ON(id_priv->cma_dev->device != id->device);
+ destroy_mc(id_priv, mc);
+ return;
}
spin_unlock_irq(&id_priv->lock);
}
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index eee38b40be99..ce183d054785 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -319,8 +319,21 @@ fail:
return ERR_PTR(err);
}
+static void drop_cma_dev(struct config_group *cgroup, struct config_item *item)
+{
+ struct config_group *group =
+ container_of(item, struct config_group, cg_item);
+ struct cma_dev_group *cma_dev_group =
+ container_of(group, struct cma_dev_group, device_group);
+
+ configfs_remove_default_groups(&cma_dev_group->ports_group);
+ configfs_remove_default_groups(&cma_dev_group->device_group);
+ config_item_put(item);
+}
+
static struct configfs_group_operations cma_subsys_group_ops = {
.make_group = make_cma_dev,
+ .drop_item = drop_cma_dev,
};
static const struct config_item_type cma_subsys_type = {
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 218411282069..a36b3b4f5c0a 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -615,10 +615,10 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
idr_unlock(&ib_mad_clients);
flush_workqueue(port_priv->wq);
- ib_cancel_rmpp_recvs(mad_agent_priv);
deref_mad_agent(mad_agent_priv);
wait_for_completion(&mad_agent_priv->comp);
+ ib_cancel_rmpp_recvs(mad_agent_priv);
ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
@@ -2920,6 +2920,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
sg_list.addr))) {
+ kfree(mad_priv);
ret = -ENOMEM;
break;
}
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index c4118bcd5103..5819a2fb027d 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -158,9 +158,9 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj,
uobj->context = NULL;
/*
- * For DESTROY the usecnt is held write locked, the caller is expected
- * to put it unlock and put the object when done with it. Only DESTROY
- * can remove the IDR handle.
+ * For DESTROY the usecnt is not changed, the caller is expected to
+ * manage it via uobj_put_destroy(). Only DESTROY can remove the IDR
+ * handle.
*/
if (reason != RDMA_REMOVE_DESTROY)
atomic_set(&uobj->usecnt, 0);
@@ -192,7 +192,7 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj,
/*
* This calls uverbs_destroy_uobject() using the RDMA_REMOVE_DESTROY
* sequence. It should only be used from command callbacks. On success the
- * caller must pair this with rdma_lookup_put_uobject(LOOKUP_WRITE). This
+ * caller must pair this with uobj_put_destroy(). This
* version requires the caller to have already obtained an
* LOOKUP_DESTROY uobject kref.
*/
@@ -203,6 +203,13 @@ int uobj_destroy(struct ib_uobject *uobj)
down_read(&ufile->hw_destroy_rwsem);
+ /*
+ * Once the uobject is destroyed by RDMA_REMOVE_DESTROY then it is left
+ * write locked as the callers put it back with UVERBS_LOOKUP_DESTROY.
+ * This is because any other concurrent thread can still see the object
+ * in the xarray due to RCU. Leaving it locked ensures nothing else will
+ * touch it.
+ */
ret = uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE);
if (ret)
goto out_unlock;
@@ -221,7 +228,7 @@ out_unlock:
/*
* uobj_get_destroy destroys the HW object and returns a handle to the uobj
* with a NULL object pointer. The caller must pair this with
- * uverbs_put_destroy.
+ * uobj_put_destroy().
*/
struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj,
u32 id, struct ib_uverbs_file *ufile)
@@ -256,7 +263,7 @@ int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id,
if (IS_ERR(uobj))
return PTR_ERR(uobj);
- rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
+ uobj_put_destroy(uobj);
return success_res;
}
@@ -381,7 +388,7 @@ lookup_get_fd_uobject(const struct uverbs_api_object *obj,
* and the caller is expected to ensure that uverbs_close_fd is never
* done while a call top lookup is possible.
*/
- if (f->f_op != fd_type->fops) {
+ if (f->f_op != fd_type->fops || uobject->ufile != ufile) {
fput(f);
return ERR_PTR(-EBADF);
}
@@ -697,7 +704,6 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj,
enum rdma_lookup_mode mode)
{
assert_uverbs_usecnt(uobj, mode);
- uobj->uapi_object->type_class->lookup_put(uobj, mode);
/*
* In order to unlock an object, either decrease its usecnt for
* read access or zero it in case of exclusive access. See
@@ -714,6 +720,7 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj,
break;
}
+ uobj->uapi_object->type_class->lookup_put(uobj, mode);
/* Pairs with the kref obtained by type->lookup_get */
uverbs_uobject_put(uobj);
}
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 2acc30c3d5b2..01052de6bedb 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -588,6 +588,7 @@ static int ucma_free_ctx(struct ucma_context *ctx)
list_move_tail(&uevent->list, &list);
}
list_del(&ctx->list);
+ events_reported = ctx->events_reported;
mutex_unlock(&ctx->file->mut);
list_for_each_entry_safe(uevent, tmp, &list, list) {
@@ -597,7 +598,6 @@ static int ucma_free_ctx(struct ucma_context *ctx)
kfree(uevent);
}
- events_reported = ctx->events_reported;
mutex_destroy(&ctx->mutex);
kfree(ctx);
return events_reported;
@@ -1476,7 +1476,9 @@ static ssize_t ucma_process_join(struct ucma_file *file,
return 0;
err3:
+ mutex_lock(&ctx->mutex);
rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
+ mutex_unlock(&ctx->mutex);
ucma_cleanup_mc_events(mc);
err2:
mutex_lock(&mut);
@@ -1644,7 +1646,9 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
cur_file = ctx->file;
if (cur_file == new_file) {
+ mutex_lock(&cur_file->mut);
resp.events_reported = ctx->events_reported;
+ mutex_unlock(&cur_file->mut);
goto response;
}
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 4bda1242df87..fd6ec56c17f9 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -356,7 +356,8 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem,
vma = find_vma(mm, ib_umem_start(umem));
if (!vma || !is_vm_hugetlb_page(vma)) {
up_read(&mm->mmap_sem);
- return -EINVAL;
+ ret_val = -EINVAL;
+ goto out_mm;
}
h = hstate_vma(vma);
umem->page_shift = huge_page_shift(h);
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index a18f3f8ad77f..471a824be86c 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -366,6 +366,11 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
mutex_lock(&file->mutex);
+ if (file->agents_dead) {
+ mutex_unlock(&file->mutex);
+ return -EIO;
+ }
+
while (list_empty(&file->recv_list)) {
mutex_unlock(&file->mutex);
@@ -379,6 +384,11 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
mutex_lock(&file->mutex);
}
+ if (file->agents_dead) {
+ mutex_unlock(&file->mutex);
+ return -EIO;
+ }
+
packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);
list_del(&packet->list);
@@ -508,7 +518,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
agent = __get_agent(file, packet->mad.hdr.id);
if (!agent) {
- ret = -EINVAL;
+ ret = -EIO;
goto err_up;
}
@@ -637,10 +647,14 @@ static __poll_t ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
/* we will always be able to post a MAD send */
__poll_t mask = EPOLLOUT | EPOLLWRNORM;
+ mutex_lock(&file->mutex);
poll_wait(filp, &file->recv_wait, wait);
if (!list_empty(&file->recv_list))
mask |= EPOLLIN | EPOLLRDNORM;
+ if (file->agents_dead)
+ mask = EPOLLERR;
+ mutex_unlock(&file->mutex);
return mask;
}
@@ -1257,6 +1271,7 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
list_for_each_entry(file, &port->file_list, port_list) {
mutex_lock(&file->mutex);
file->agents_dead = 1;
+ wake_up_interruptible(&file->recv_wait);
mutex_unlock(&file->mutex);
for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id)
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 5404717998b0..fc4b46258c75 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -360,6 +360,8 @@ static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
spin_lock_irq(&ev_queue->lock);
if (!list_empty(&ev_queue->event_list))
pollflags = EPOLLIN | EPOLLRDNORM;
+ else if (ev_queue->is_closed)
+ pollflags = EPOLLERR;
spin_unlock_irq(&ev_queue->lock);
return pollflags;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 82f309fb3ce5..e1ecd4682c09 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1617,7 +1617,7 @@ static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
if (!(rdma_protocol_ib(qp->device,
attr->alt_ah_attr.port_num) &&
rdma_protocol_ib(qp->device, port))) {
- ret = EINVAL;
+ ret = -EINVAL;
goto out;
}
}
@@ -1711,7 +1711,7 @@ int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width)
dev_put(netdev);
- if (!rc) {
+ if (!rc && lksettings.base.speed != (u32)SPEED_UNKNOWN) {
netdev_speed = lksettings.base.speed;
} else {
netdev_speed = SPEED_1000;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index c9af2d139f5c..f8c9caa8aad6 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -1838,6 +1838,7 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
goto out;
}
qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
+ qp_attr->cur_qp_state = __to_ib_qp_state(qplib_qp->cur_qp_state);
qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
qp_attr->pkey_index = qplib_qp->pkey_index;
@@ -3033,6 +3034,19 @@ static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
wc->wc_flags |= IB_WC_GRH;
}
+static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev,
+ u16 vlan_id)
+{
+ /*
+ * Check if the vlan is configured in the host. If not configured, it
+ * can be a transparent VLAN. So dont report the vlan id.
+ */
+ if (!__vlan_find_dev_deep_rcu(rdev->netdev,
+ htons(ETH_P_8021Q), vlan_id))
+ return false;
+ return true;
+}
+
static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
u16 *vid, u8 *sl)
{
@@ -3101,9 +3115,11 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
wc->src_qp = orig_cqe->src_qp;
memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
- wc->vlan_id = vlan_id;
- wc->sl = sl;
- wc->wc_flags |= IB_WC_WITH_VLAN;
+ if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
+ wc->vlan_id = vlan_id;
+ wc->sl = sl;
+ wc->wc_flags |= IB_WC_WITH_VLAN;
+ }
}
wc->port_num = 1;
wc->vendor_err = orig_cqe->status;
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 589b0d4677d5..f1b666c80f36 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -753,7 +753,8 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
struct ib_event event;
unsigned int flags;
- if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
+ if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR &&
+ rdma_is_kernel_res(&qp->ib_qp.res)) {
flags = bnxt_re_lock_cqs(qp);
bnxt_qplib_add_flush_qp(&qp->qplib_qp);
bnxt_re_unlock_cqs(qp, flags);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 539a5d44e6db..655952a6c0e6 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -725,6 +725,7 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
unmap_io:
pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
+ dpit->dbr_bar_reg_iomem = NULL;
return -ENOMEM;
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 09e7d3dd3055..336144876363 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -141,7 +141,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
attr->l2_db_size = (sb->l2_db_space_size + 1) *
(0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
- attr->max_sgid = le32_to_cpu(sb->max_gid);
+ attr->max_sgid = BNXT_QPLIB_NUM_GIDS_SUPPORTED;
bnxt_qplib_query_version(rcfw, attr->fw_ver);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 9d3e8b994945..b6e9e0ef7939 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -47,6 +47,7 @@
struct bnxt_qplib_dev_attr {
#define FW_VER_ARR_LEN 4
u8 fw_ver[FW_VER_ARR_LEN];
+#define BNXT_QPLIB_NUM_GIDS_SUPPORTED 256
u16 max_sgid;
u16 max_mrw;
u32 max_qp;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 16145b0a1458..6c1a093b164e 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -3293,7 +3293,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) {
err = pick_local_ipaddrs(dev, cm_id);
if (err)
- goto fail2;
+ goto fail3;
}
/* find a route */
@@ -3315,7 +3315,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
err = pick_local_ip6addrs(dev, cm_id);
if (err)
- goto fail2;
+ goto fail3;
}
/* find a route */
@@ -3517,13 +3517,14 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
ep->com.local_addr.ss_family == AF_INET) {
err = cxgb4_remove_server_filter(
ep->com.dev->rdev.lldi.ports[0], ep->stid,
- ep->com.dev->rdev.lldi.rxq_ids[0], 0);
+ ep->com.dev->rdev.lldi.rxq_ids[0], false);
} else {
struct sockaddr_in6 *sin6;
c4iw_init_wr_wait(ep->com.wr_waitp);
err = cxgb4_remove_server(
ep->com.dev->rdev.lldi.ports[0], ep->stid,
- ep->com.dev->rdev.lldi.rxq_ids[0], 0);
+ ep->com.dev->rdev.lldi.rxq_ids[0],
+ ep->com.local_addr.ss_family == AF_INET6);
if (err)
goto done;
err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp,
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 1fd8798d91a7..43c611aa068c 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -1012,6 +1012,9 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
rhp = to_c4iw_dev(ibdev);
+ if (entries < 1 || entries > ibdev->attrs.max_cqe)
+ return ERR_PTR(-EINVAL);
+
if (vector >= rhp->rdev.lldi.nciq)
return ERR_PTR(-EINVAL);
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index c13c0ba30f63..af974a257086 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -945,6 +945,7 @@ void c4iw_dealloc(struct uld_ctx *ctx)
static void c4iw_remove(struct uld_ctx *ctx)
{
pr_debug("c4iw_dev %p\n", ctx->dev);
+ debugfs_remove_recursive(ctx->dev->debugfs_root);
c4iw_unregister_device(ctx->dev);
c4iw_dealloc(ctx);
}
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index a9e3a11bea54..caa6a502c37e 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -2485,7 +2485,7 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
- init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
+ init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges;
init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
return 0;
diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
index 2b57ba70ddd6..c09080712485 100644
--- a/drivers/infiniband/hw/hfi1/firmware.c
+++ b/drivers/infiniband/hw/hfi1/firmware.c
@@ -1924,6 +1924,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
dd_dev_err(dd, "%s: Failed CRC check at offset %ld\n",
__func__, (ptr -
(u32 *)dd->platform_config.data));
+ ret = -EINVAL;
goto bail;
}
/* Jump the CRC DWORD */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 081aa91fc162..620eaca2b831 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -274,7 +274,6 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
ps_opcode = HNS_ROCE_WQE_OPCODE_SEND;
break;
case IB_WR_LOCAL_INV:
- break;
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
case IB_WR_LSO:
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 417de7ac0d5e..2a203e08d4c1 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -3821,6 +3821,7 @@ done:
}
qp_init_attr->cap = qp_attr->cap;
+ qp_init_attr->sq_sig_type = hr_qp->sq_signal_bits;
out:
mutex_unlock(&hr_qp->mutex);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 771eb6bd0785..0273d0404e74 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -1984,7 +1984,6 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
struct rtable *rt;
struct neighbour *neigh;
int rc = arpindex;
- struct net_device *netdev = iwdev->netdev;
__be32 dst_ipaddr = htonl(dst_ip);
__be32 src_ipaddr = htonl(src_ip);
@@ -1994,9 +1993,6 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
return rc;
}
- if (netif_is_bond_slave(netdev))
- netdev = netdev_master_upper_dev_get(netdev);
-
neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
rcu_read_lock();
@@ -2062,7 +2058,6 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
{
struct neighbour *neigh;
int rc = arpindex;
- struct net_device *netdev = iwdev->netdev;
struct dst_entry *dst;
struct sockaddr_in6 dst_addr;
struct sockaddr_in6 src_addr;
@@ -2076,16 +2071,13 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
dst = i40iw_get_dst_ipv6(&src_addr, &dst_addr);
if (!dst || dst->error) {
if (dst) {
- dst_release(dst);
i40iw_pr_err("ip6_route_output returned dst->error = %d\n",
dst->error);
+ dst_release(dst);
}
return rc;
}
- if (netif_is_bond_slave(netdev))
- netdev = netdev_master_upper_dev_get(netdev);
-
neigh = dst_neigh_lookup(dst, dst_addr.sin6_addr.in6_u.u6_addr32);
rcu_read_lock();
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
index 55a1fbf0e670..ae8b97c30665 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -534,7 +534,7 @@ void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
int arp_index;
arp_index = i40iw_arp_table(iwdev, ip_addr, ipv4, mac_addr, action);
- if (arp_index == -1)
+ if (arp_index < 0)
return;
cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
if (!cqp_request)
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 68095f00d08f..41227d956871 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -54,10 +54,6 @@
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD)
-static int push_mode;
-module_param(push_mode, int, 0644);
-MODULE_PARM_DESC(push_mode, "Low latency mode: 0=disabled (default), 1=enabled)");
-
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all");
@@ -1584,7 +1580,6 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
if (status)
goto exit;
iwdev->obj_next = iwdev->obj_mem;
- iwdev->push_mode = push_mode;
init_waitqueue_head(&iwdev->vchnl_waitq);
init_waitqueue_head(&dev->vf_reqs);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.c b/drivers/infiniband/hw/i40iw/i40iw_pble.c
index 540aab5e502d..3fafc5424e76 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_pble.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_pble.c
@@ -392,12 +392,9 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
i40iw_debug(dev, I40IW_DEBUG_PBLE, "next_fpm_addr = %llx chunk_size[%u] = 0x%x\n",
pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
pble_rsrc->unallocated_pble -= (chunk->size >> 3);
- list_add(&chunk->list, &pble_rsrc->pinfo.clist);
sd_reg_val = (sd_entry_type == I40IW_SD_TYPE_PAGED) ?
sd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa;
- if (sd_entry->valid)
- return 0;
- if (dev->is_pf) {
+ if (dev->is_pf && !sd_entry->valid) {
ret_code = i40iw_hmc_sd_one(dev, hmc_info->hmc_fn_id,
sd_reg_val, idx->sd_idx,
sd_entry->entry_type, true);
@@ -408,6 +405,7 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
}
sd_entry->valid = true;
+ list_add(&chunk->list, &pble_rsrc->pinfo.clist);
return 0;
error:
kfree(chunk);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index a5e3349b8a7c..314d19153c99 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -201,38 +201,16 @@ static int i40iw_dealloc_ucontext(struct ib_ucontext *context)
*/
static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
- struct i40iw_ucontext *ucontext;
- u64 db_addr_offset;
- u64 push_offset;
-
- ucontext = to_ucontext(context);
- if (ucontext->iwdev->sc_dev.is_pf) {
- db_addr_offset = I40IW_DB_ADDR_OFFSET;
- push_offset = I40IW_PUSH_OFFSET;
- if (vma->vm_pgoff)
- vma->vm_pgoff += I40IW_PF_FIRST_PUSH_PAGE_INDEX - 1;
- } else {
- db_addr_offset = I40IW_VF_DB_ADDR_OFFSET;
- push_offset = I40IW_VF_PUSH_OFFSET;
- if (vma->vm_pgoff)
- vma->vm_pgoff += I40IW_VF_FIRST_PUSH_PAGE_INDEX - 1;
- }
+ struct i40iw_ucontext *ucontext = to_ucontext(context);
+ u64 dbaddr;
- vma->vm_pgoff += db_addr_offset >> PAGE_SHIFT;
+ if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
+ return -EINVAL;
- if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_private_data = ucontext;
- } else {
- if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- else
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- }
+ dbaddr = I40IW_DB_ADDR_OFFSET + pci_resource_start(ucontext->iwdev->ldev->pcidev, 0);
- if (io_remap_pfn_range(vma, vma->vm_start,
- vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT),
- PAGE_SIZE, vma->vm_page_prot))
+ if (io_remap_pfn_range(vma, vma->vm_start, dbaddr >> PAGE_SHIFT, PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot)))
return -EAGAIN;
return 0;
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index 8c79a480f2b7..d3e11503e67c 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -307,6 +307,9 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
if (!sriov->is_going_down) {
id->scheduled_delete = 1;
schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
+ } else if (id->scheduled_delete) {
+ /* Adjust timeout if already scheduled */
+ mod_delayed_work(system_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
}
spin_unlock_irqrestore(&sriov->going_down_lock, flags);
spin_unlock(&sriov->id_map_lock);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 5aaa2a6c431b..418b9312fb2d 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1305,6 +1305,18 @@ static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg)
spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
}
+static void mlx4_ib_wire_comp_handler(struct ib_cq *cq, void *arg)
+{
+ unsigned long flags;
+ struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
+ struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
+
+ spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
+ if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
+ queue_work(ctx->wi_wq, &ctx->work);
+ spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
+}
+
static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
struct mlx4_ib_demux_pv_qp *tun_qp,
int index)
@@ -2000,7 +2012,8 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
cq_size *= 2;
cq_attr.cqe = cq_size;
- ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler,
+ ctx->cq = ib_create_cq(ctx->ib_dev,
+ create_tun ? mlx4_ib_tunnel_comp_handler : mlx4_ib_wire_comp_handler,
NULL, ctx, &cq_attr);
if (IS_ERR(ctx->cq)) {
ret = PTR_ERR(ctx->cq);
@@ -2037,6 +2050,7 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
+ ctx->wi_wq = to_mdev(ibdev)->sriov.demux[port - 1].wi_wq;
ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
if (ret) {
@@ -2180,7 +2194,7 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
goto err_mcg;
}
- snprintf(name, sizeof name, "mlx4_ibt%d", port);
+ snprintf(name, sizeof(name), "mlx4_ibt%d", port);
ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
if (!ctx->wq) {
pr_err("Failed to create tunnelling WQ for port %d\n", port);
@@ -2188,7 +2202,15 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
goto err_wq;
}
- snprintf(name, sizeof name, "mlx4_ibud%d", port);
+ snprintf(name, sizeof(name), "mlx4_ibwi%d", port);
+ ctx->wi_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
+ if (!ctx->wi_wq) {
+ pr_err("Failed to create wire WQ for port %d\n", port);
+ ret = -ENOMEM;
+ goto err_wiwq;
+ }
+
+ snprintf(name, sizeof(name), "mlx4_ibud%d", port);
ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
if (!ctx->ud_wq) {
pr_err("Failed to create up/down WQ for port %d\n", port);
@@ -2199,6 +2221,10 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
return 0;
err_udwq:
+ destroy_workqueue(ctx->wi_wq);
+ ctx->wi_wq = NULL;
+
+err_wiwq:
destroy_workqueue(ctx->wq);
ctx->wq = NULL;
@@ -2246,12 +2272,14 @@ static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx)
ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING;
}
flush_workqueue(ctx->wq);
+ flush_workqueue(ctx->wi_wq);
for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0);
free_pv_object(dev, i, ctx->port);
}
kfree(ctx->tun);
destroy_workqueue(ctx->ud_wq);
+ destroy_workqueue(ctx->wi_wq);
destroy_workqueue(ctx->wq);
}
}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index a19d3ad14dc3..eac4ade45611 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1606,8 +1606,9 @@ static int __mlx4_ib_create_default_rules(
int i;
for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
+ union ib_flow_spec ib_spec = {};
int ret;
- union ib_flow_spec ib_spec;
+
switch (pdefault_rules->rules_create_list[i]) {
case 0:
/* no rule */
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index e10dccc7958f..76ca67aa4015 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -464,6 +464,7 @@ struct mlx4_ib_demux_pv_ctx {
struct ib_pd *pd;
struct work_struct work;
struct workqueue_struct *wq;
+ struct workqueue_struct *wi_wq;
struct mlx4_ib_demux_pv_qp qp[2];
};
@@ -471,6 +472,7 @@ struct mlx4_ib_demux_ctx {
struct ib_device *ib_dev;
int port;
struct workqueue_struct *wq;
+ struct workqueue_struct *wi_wq;
struct workqueue_struct *ud_wq;
spinlock_t ud_lock;
atomic64_t subnet_prefix;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 6dd3cd2c2f80..73bd35d34a25 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -2807,6 +2807,7 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
int send_size;
int header_size;
int spc;
+ int err;
int i;
if (wr->wr.opcode != IB_WR_SEND)
@@ -2841,7 +2842,9 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
sqp->ud_header.lrh.virtual_lane = 0;
sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
- ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
+ err = ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
+ if (err)
+ return err;
sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
@@ -3128,9 +3131,14 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr,
}
sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
if (!sqp->qp.ibqp.qp_num)
- ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey);
+ err = ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index,
+ &pkey);
else
- ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, &pkey);
+ err = ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index,
+ &pkey);
+ if (err)
+ return err;
+
sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 02f36ab72ad4..c89aec834972 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -328,6 +328,7 @@ static bool devx_is_obj_modify_cmd(const void *in)
case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
case MLX5_CMD_OP_RST2INIT_QP:
case MLX5_CMD_OP_INIT2RTR_QP:
+ case MLX5_CMD_OP_INIT2INIT_QP:
case MLX5_CMD_OP_RTR2RTS_QP:
case MLX5_CMD_OP_RTS2RTS_QP:
case MLX5_CMD_OP_SQERR2RTS_QP:
@@ -571,7 +572,9 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
break;
case MLX5_CMD_OP_CREATE_TIR:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
+ *obj_id = MLX5_GET(create_tir_out, out, tirn);
+ MLX5_SET(destroy_tir_in, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
+ MLX5_SET(destroy_tir_in, din, tirn, *obj_id);
break;
case MLX5_CMD_OP_CREATE_TIS:
MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 2db34f7b5ced..13513466df01 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1070,12 +1070,10 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
resp.tunnel_offloads_caps |=
MLX5_IB_TUNNELED_OFFLOADS_GRE;
- if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
- MLX5_FLEX_PROTO_CW_MPLS_GRE)
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre))
resp.tunnel_offloads_caps |=
MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE;
- if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
- MLX5_FLEX_PROTO_CW_MPLS_UDP)
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_udp))
resp.tunnel_offloads_caps |=
MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
}
@@ -6096,7 +6094,7 @@ int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
if (err)
- mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
+ mlx5_free_bfreg(dev->mdev, &dev->bfreg);
return err;
}
@@ -6341,6 +6339,7 @@ static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
if (bound) {
rdma_roce_rescan_device(&dev->ib_dev);
+ mpi->ibdev->ib_active = true;
break;
}
}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 4fc9278d0dde..10f6ae4f8f3f 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -4887,7 +4887,9 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
rdma_ah_set_path_bits(ah_attr, path->grh_mlid & 0x7f);
rdma_ah_set_static_rate(ah_attr,
path->static_rate ? path->static_rate - 5 : 0);
- if (path->grh_mlid & (1 << 7)) {
+
+ if (path->grh_mlid & (1 << 7) ||
+ ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
u32 tc_fl = be32_to_cpu(path->tclass_flowlabel);
rdma_ah_set_grh(ah_attr, NULL,
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index a6531ffe29a6..098653b8157e 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -609,7 +609,7 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
entry->byte_len = MTHCA_ATOMIC_BYTE_LEN;
break;
default:
- entry->opcode = MTHCA_OPCODE_INVALID;
+ entry->opcode = 0xFF;
break;
}
} else {
@@ -808,8 +808,10 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
}
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
- if (IS_ERR(mailbox))
+ if (IS_ERR(mailbox)) {
+ err = PTR_ERR(mailbox);
goto err_out_arm;
+ }
cq_context = mailbox->buf;
@@ -851,9 +853,9 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
}
spin_lock_irq(&dev->cq_table.lock);
- if (mthca_array_set(&dev->cq_table.cq,
- cq->cqn & (dev->limits.num_cqs - 1),
- cq)) {
+ err = mthca_array_set(&dev->cq_table.cq,
+ cq->cqn & (dev->limits.num_cqs - 1), cq);
+ if (err) {
spin_unlock_irq(&dev->cq_table.lock);
goto err_out_free_mr;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 220a3e4717a3..e23575861f28 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -105,7 +105,6 @@ enum {
MTHCA_OPCODE_ATOMIC_CS = 0x11,
MTHCA_OPCODE_ATOMIC_FA = 0x12,
MTHCA_OPCODE_BIND_MW = 0x18,
- MTHCA_OPCODE_INVALID = 0xff
};
enum {
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index d1680d3b5825..2a82661620fe 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -604,7 +604,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
/* Part 2 - check capabilities */
- page_size = ~dev->attr.page_size_caps + 1;
+ page_size = ~qed_attr->page_size_caps + 1;
if (page_size > PAGE_SIZE) {
DP_ERR(dev,
"Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index a2d708dceb8d..cca12100c583 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -351,10 +351,10 @@ struct qedr_srq_hwq_info {
u32 wqe_prod;
u32 sge_prod;
u32 wr_prod_cnt;
- u32 wr_cons_cnt;
+ atomic_t wr_cons_cnt;
u32 num_elems;
- u32 *virt_prod_pair_addr;
+ struct rdma_srq_producers *virt_prod_pair_addr;
dma_addr_t phy_prod_pair_addr;
};
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
index 93b16237b767..1f1d6a000e5c 100644
--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -128,8 +128,17 @@ qedr_iw_issue_event(void *context,
if (params->cm_info) {
event.ird = params->cm_info->ird;
event.ord = params->cm_info->ord;
- event.private_data_len = params->cm_info->private_data_len;
- event.private_data = (void *)params->cm_info->private_data;
+ /* Only connect_request and reply have valid private data
+ * the rest of the events this may be left overs from
+ * connection establishment. CONNECT_REQUEST is issued via
+ * qedr_iw_mpa_request
+ */
+ if (event_type == IW_CM_EVENT_CONNECT_REPLY) {
+ event.private_data_len =
+ params->cm_info->private_data_len;
+ event.private_data =
+ (void *)params->cm_info->private_data;
+ }
}
if (ep->cm_id)
@@ -451,10 +460,10 @@ qedr_addr6_resolve(struct qedr_dev *dev,
if ((!dst) || dst->error) {
if (dst) {
- dst_release(dst);
DP_ERR(dev,
"ip6_route_output returned dst->error = %d\n",
dst->error);
+ dst_release(dst);
}
return -EINVAL;
}
@@ -668,6 +677,7 @@ int qedr_iw_destroy_listen(struct iw_cm_id *cm_id)
listener->qed_handle);
cm_id->rem_ref(cm_id);
+ kfree(listener);
return rc;
}
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 38fe2f741375..f847f0a9f204 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -2522,7 +2522,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
qp_attr->cap.max_recv_wr = qp->rq.max_wr;
qp_attr->cap.max_send_sge = qp->sq.max_sges;
qp_attr->cap.max_recv_sge = qp->rq.max_sges;
- qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
+ qp_attr->cap.max_inline_data = dev->attr.max_inline;
qp_init_attr->cap = qp_attr->cap;
qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
@@ -3577,7 +3577,7 @@ static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq)
* count and consumer count and subtract it from max
* work request supported so that we get elements left.
*/
- used = hw_srq->wr_prod_cnt - hw_srq->wr_cons_cnt;
+ used = hw_srq->wr_prod_cnt - (u32)atomic_read(&hw_srq->wr_cons_cnt);
return hw_srq->max_wr - used;
}
@@ -3592,7 +3592,6 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
unsigned long flags;
int status = 0;
u32 num_sge;
- u32 offset;
spin_lock_irqsave(&srq->lock, flags);
@@ -3605,7 +3604,8 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
if (!qedr_srq_elem_left(hw_srq) ||
wr->num_sge > srq->hw_srq.max_sges) {
DP_ERR(dev, "Can't post WR (%d,%d) || (%d > %d)\n",
- hw_srq->wr_prod_cnt, hw_srq->wr_cons_cnt,
+ hw_srq->wr_prod_cnt,
+ atomic_read(&hw_srq->wr_cons_cnt),
wr->num_sge, srq->hw_srq.max_sges);
status = -ENOMEM;
*bad_wr = wr;
@@ -3639,22 +3639,20 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
hw_srq->sge_prod++;
}
- /* Flush WQE and SGE information before
+ /* Update WQE and SGE information before
* updating producer.
*/
- wmb();
+ dma_wmb();
/* SRQ producer is 8 bytes. Need to update SGE producer index
* in first 4 bytes and need to update WQE producer in
* next 4 bytes.
*/
- *srq->hw_srq.virt_prod_pair_addr = hw_srq->sge_prod;
- offset = offsetof(struct rdma_srq_producers, wqe_prod);
- *((u8 *)srq->hw_srq.virt_prod_pair_addr + offset) =
- hw_srq->wqe_prod;
+ srq->hw_srq.virt_prod_pair_addr->sge_prod = hw_srq->sge_prod;
+ /* Make sure sge producer is updated first */
+ dma_wmb();
+ srq->hw_srq.virt_prod_pair_addr->wqe_prod = hw_srq->wqe_prod;
- /* Flush producer after updating it. */
- wmb();
wr = wr->next;
}
@@ -4077,7 +4075,7 @@ static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp,
} else {
__process_resp_one(dev, qp, cq, wc, resp, wr_id);
}
- srq->hw_srq.wr_cons_cnt++;
+ atomic_inc(&srq->hw_srq.wr_cons_cnt);
return 1;
}
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index d831f3e61ae8..2626205780ee 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -756,7 +756,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
qib_dev_err(dd,
"Skipping linkcontrol sysfs info, (err %d) port %u\n",
ret, port_num);
- goto bail;
+ goto bail_link;
}
kobject_uevent(&ppd->pport_kobj, KOBJ_ADD);
@@ -766,7 +766,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
qib_dev_err(dd,
"Skipping sl2vl sysfs info, (err %d) port %u\n",
ret, port_num);
- goto bail_link;
+ goto bail_sl;
}
kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD);
@@ -776,7 +776,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
qib_dev_err(dd,
"Skipping diag_counters sysfs info, (err %d) port %u\n",
ret, port_num);
- goto bail_sl;
+ goto bail_diagc;
}
kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD);
@@ -789,7 +789,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
qib_dev_err(dd,
"Skipping Congestion Control sysfs info, (err %d) port %u\n",
ret, port_num);
- goto bail_diagc;
+ goto bail_cc;
}
kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
@@ -871,6 +871,7 @@ void qib_verbs_unregister_sysfs(struct qib_devdata *dd)
&cc_table_bin_attr);
kobject_put(&ppd->pport_cc_kobj);
}
+ kobject_put(&ppd->diagc_kobj);
kobject_put(&ppd->sl2vl_kobj);
kobject_put(&ppd->pport_kobj);
}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index e611f133aa97..e6c11b5a1669 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -212,6 +212,7 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
}
usnic_uiom_free_dev_list(dev_list);
+ dev_list = NULL;
}
/* Try to find resources on an unused vf */
@@ -236,6 +237,8 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
qp_grp_check:
if (IS_ERR_OR_NULL(qp_grp)) {
usnic_err("Failed to allocate qp_grp\n");
+ if (usnic_ib_share_vf)
+ usnic_uiom_free_dev_list(dev_list);
return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
}
return qp_grp;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index ed99f0a08dc4..0a414c5329ce 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -833,7 +833,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "PCI BAR region not MMIO\n");
ret = -ENOMEM;
- goto err_free_device;
+ goto err_disable_pdev;
}
ret = pci_request_regions(pdev, DRV_NAME);
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 17e4abc067af..541ee30727aa 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -95,9 +95,7 @@ struct rvt_dev_info *rvt_alloc_device(size_t size, int nports)
if (!rdi)
return rdi;
- rdi->ports = kcalloc(nports,
- sizeof(struct rvt_ibport **),
- GFP_KERNEL);
+ rdi->ports = kcalloc(nports, sizeof(*rdi->ports), GFP_KERNEL);
if (!rdi->ports)
ib_dealloc_device(&rdi->ibdev);
diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
index 67ae960ab523..1fa19a77583e 100644
--- a/drivers/infiniband/sw/rxe/Kconfig
+++ b/drivers/infiniband/sw/rxe/Kconfig
@@ -3,6 +3,7 @@ config RDMA_RXE
depends on INET && PCI && INFINIBAND
depends on !64BIT || ARCH_DMA_ADDR_T_64BIT
select NET_UDP_TUNNEL
+ select CRYPTO
select CRYPTO_CRC32
select DMA_VIRT_OPS
---help---
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 10999fa69281..6589ff51eaf5 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -121,6 +121,8 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
rxe->attr.max_fast_reg_page_list_len = RXE_MAX_FMR_PAGE_LIST_LEN;
rxe->attr.max_pkeys = RXE_MAX_PKEYS;
rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY;
+ addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid,
+ rxe->ndev->dev_addr);
rxe->max_ucontext = RXE_MAX_UCONTEXT;
}
@@ -163,9 +165,6 @@ static int rxe_init_ports(struct rxe_dev *rxe)
rxe_init_port_param(port);
- if (!port->attr.pkey_tbl_len || !port->attr.gid_tbl_len)
- return -EINVAL;
-
port->pkey_tbl = kcalloc(port->attr.pkey_tbl_len,
sizeof(*port->pkey_tbl), GFP_KERNEL);
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index dff605fdf60f..2cca89ca08cd 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -203,6 +203,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
vaddr = page_address(sg_page(sg));
if (!vaddr) {
pr_warn("null vaddr\n");
+ ib_umem_release(umem);
err = -ENOMEM;
goto err1;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 54add70c22b5..04bfc36cc8d7 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -154,10 +154,12 @@ static struct dst_entry *rxe_find_route6(struct net_device *ndev,
memcpy(&fl6.daddr, daddr, sizeof(*daddr));
fl6.flowi6_proto = IPPROTO_UDP;
- if (unlikely(ipv6_stub->ipv6_dst_lookup(sock_net(recv_sockets.sk6->sk),
- recv_sockets.sk6->sk, &ndst, &fl6))) {
+ ndst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(recv_sockets.sk6->sk),
+ recv_sockets.sk6->sk, &fl6,
+ NULL);
+ if (unlikely(IS_ERR(ndst))) {
pr_err_ratelimited("no route to %pI6\n", daddr);
- goto put;
+ return NULL;
}
if (unlikely(ndst->error)) {
@@ -498,6 +500,11 @@ int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb)
void rxe_loopback(struct sk_buff *skb)
{
+ if (skb->protocol == htons(ETH_P_IP))
+ skb_pull(skb, sizeof(struct iphdr));
+ else
+ skb_pull(skb, sizeof(struct ipv6hdr));
+
rxe_rcv(skb);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 230697fa31fe..41c9ede98c26 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -250,6 +250,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
if (err) {
vfree(qp->sq.queue->buf);
kfree(qp->sq.queue);
+ qp->sq.queue = NULL;
return err;
}
@@ -303,6 +304,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
if (err) {
vfree(qp->rq.queue->buf);
kfree(qp->rq.queue);
+ qp->rq.queue = NULL;
return err;
}
}
@@ -363,6 +365,11 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
err2:
rxe_queue_cleanup(qp->sq.queue);
err1:
+ qp->pd = NULL;
+ qp->rcq = NULL;
+ qp->scq = NULL;
+ qp->srq = NULL;
+
if (srq)
rxe_drop_ref(srq);
rxe_drop_ref(scq);
@@ -583,15 +590,16 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
int err;
if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
- int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);
+ int max_rd_atomic = attr->max_rd_atomic ?
+ roundup_pow_of_two(attr->max_rd_atomic) : 0;
qp->attr.max_rd_atomic = max_rd_atomic;
atomic_set(&qp->req.rd_atomic, max_rd_atomic);
}
if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
- int max_dest_rd_atomic =
- __roundup_pow_of_two(attr->max_dest_rd_atomic);
+ int max_dest_rd_atomic = attr->max_dest_rd_atomic ?
+ roundup_pow_of_two(attr->max_dest_rd_atomic) : 0;
qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index 695a607e2d14..d94e2c5bc317 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -36,21 +36,26 @@
#include "rxe.h"
#include "rxe_loc.h"
+/* check that QP matches packet opcode type and is in a valid state */
static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
struct rxe_qp *qp)
{
+ unsigned int pkt_type;
+
if (unlikely(!qp->valid))
goto err1;
+ pkt_type = pkt->opcode & 0xe0;
+
switch (qp_type(qp)) {
case IB_QPT_RC:
- if (unlikely((pkt->opcode & IB_OPCODE_RC) != 0)) {
+ if (unlikely(pkt_type != IB_OPCODE_RC)) {
pr_warn_ratelimited("bad qp type\n");
goto err1;
}
break;
case IB_QPT_UC:
- if (unlikely(!(pkt->opcode & IB_OPCODE_UC))) {
+ if (unlikely(pkt_type != IB_OPCODE_UC)) {
pr_warn_ratelimited("bad qp type\n");
goto err1;
}
@@ -58,7 +63,7 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
case IB_QPT_UD:
case IB_QPT_SMI:
case IB_QPT_GSI:
- if (unlikely(!(pkt->opcode & IB_OPCODE_UD))) {
+ if (unlikely(pkt_type != IB_OPCODE_UD)) {
pr_warn_ratelimited("bad qp type\n");
goto err1;
}
@@ -332,10 +337,14 @@ err1:
static int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
{
+ struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
const struct ib_gid_attr *gid_attr;
union ib_gid dgid;
union ib_gid *pdgid;
+ if (pkt->mask & RXE_LOOPBACK_MASK)
+ return 0;
+
if (skb->protocol == htons(ETH_P_IP)) {
ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
(struct in6_addr *)&dgid);
@@ -368,7 +377,7 @@ void rxe_rcv(struct sk_buff *skb)
if (unlikely(skb->len < pkt->offset + RXE_BTH_BYTES))
goto drop;
- if (unlikely(rxe_match_dgid(rxe, skb) < 0)) {
+ if (rxe_match_dgid(rxe, skb) < 0) {
pr_warn_ratelimited("failed matching dgid\n");
goto drop;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 1c1eae0ef8c2..63db49144f62 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -664,7 +664,8 @@ next_wqe:
}
if (unlikely(qp_type(qp) == IB_QPT_RC &&
- qp->req.psn > (qp->comp.psn + RXE_MAX_UNACKED_PSNS))) {
+ psn_compare(qp->req.psn, (qp->comp.psn +
+ RXE_MAX_UNACKED_PSNS)) > 0)) {
qp->req.wait_psn = 1;
goto exit;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index f5b1e0ad6142..f7f9caaec7d6 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -733,6 +733,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
unsigned int mask;
unsigned int length = 0;
int i;
+ struct ib_send_wr *next;
while (wr) {
mask = wr_opcode_mask(wr->opcode, qp);
@@ -749,6 +750,8 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
break;
}
+ next = wr->next;
+
length = 0;
for (i = 0; i < wr->num_sge; i++)
length += wr->sg_list[i].length;
@@ -759,7 +762,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
*bad_wr = wr;
break;
}
- wr = wr->next;
+ wr = next;
}
rxe_run_task(&qp->req.task, 1);
@@ -1143,7 +1146,7 @@ static ssize_t parent_show(struct device *device,
struct rxe_dev *rxe = container_of(device, struct rxe_dev,
ib_dev.dev);
- return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
+ return scnprintf(buf, PAGE_SIZE, "%s\n", rxe_parent_name(rxe, 1));
}
static DEVICE_ATTR_RO(parent);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index b22d02c9de90..ef1222101705 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -377,8 +377,12 @@ struct ipoib_dev_priv {
struct ipoib_rx_buf *rx_ring;
struct ipoib_tx_buf *tx_ring;
+ /* cyclic ring variables for managing tx_ring, for UD only */
unsigned int tx_head;
unsigned int tx_tail;
+ /* cyclic ring variables for counting overall outstanding send WRs */
+ unsigned int global_tx_head;
+ unsigned int global_tx_tail;
struct ib_sge tx_sge[MAX_SKB_FRAGS + 1];
struct ib_ud_wr tx_wr;
struct ib_wc send_wc[MAX_SEND_CQE];
@@ -509,7 +513,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev);
int ipoib_ib_dev_open_default(struct net_device *dev);
int ipoib_ib_dev_open(struct net_device *dev);
-int ipoib_ib_dev_stop(struct net_device *dev);
+void ipoib_ib_dev_stop(struct net_device *dev);
void ipoib_ib_dev_up(struct net_device *dev);
void ipoib_ib_dev_down(struct net_device *dev);
int ipoib_ib_dev_stop_default(struct net_device *dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index aa9dcfc36cd3..196f1e6b5396 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -756,7 +756,8 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
return;
}
- if ((priv->tx_head - priv->tx_tail) == ipoib_sendq_size - 1) {
+ if ((priv->global_tx_head - priv->global_tx_tail) ==
+ ipoib_sendq_size - 1) {
ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
tx->qp->qp_num);
netif_stop_queue(dev);
@@ -786,7 +787,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
} else {
netif_trans_update(dev);
++tx->tx_head;
- ++priv->tx_head;
+ ++priv->global_tx_head;
}
}
@@ -820,10 +821,11 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
netif_tx_lock(dev);
++tx->tx_tail;
- ++priv->tx_tail;
+ ++priv->global_tx_tail;
if (unlikely(netif_queue_stopped(dev) &&
- (priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1 &&
+ ((priv->global_tx_head - priv->global_tx_tail) <=
+ ipoib_sendq_size >> 1) &&
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
netif_wake_queue(dev);
@@ -1233,8 +1235,9 @@ timeout:
dev_kfree_skb_any(tx_req->skb);
netif_tx_lock_bh(p->dev);
++p->tx_tail;
- ++priv->tx_tail;
- if (unlikely(priv->tx_head - priv->tx_tail == ipoib_sendq_size >> 1) &&
+ ++priv->global_tx_tail;
+ if (unlikely((priv->global_tx_head - priv->global_tx_tail) <=
+ ipoib_sendq_size >> 1) &&
netif_queue_stopped(p->dev) &&
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
netif_wake_queue(p->dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 9006a13af1de..82b9c5b6e3e6 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -406,9 +406,11 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
dev_kfree_skb_any(tx_req->skb);
++priv->tx_tail;
+ ++priv->global_tx_tail;
if (unlikely(netif_queue_stopped(dev) &&
- ((priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1) &&
+ ((priv->global_tx_head - priv->global_tx_tail) <=
+ ipoib_sendq_size >> 1) &&
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
netif_wake_queue(dev);
@@ -633,7 +635,8 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
else
priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
/* increase the tx_head after send success, but use it for queue state */
- if (priv->tx_head - priv->tx_tail == ipoib_sendq_size - 1) {
+ if ((priv->global_tx_head - priv->global_tx_tail) ==
+ ipoib_sendq_size - 1) {
ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
netif_stop_queue(dev);
}
@@ -661,18 +664,18 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
rc = priv->tx_head;
++priv->tx_head;
+ ++priv->global_tx_head;
}
return rc;
}
-static void __ipoib_reap_ah(struct net_device *dev)
+static void ipoib_reap_dead_ahs(struct ipoib_dev_priv *priv)
{
- struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_ah *ah, *tah;
LIST_HEAD(remove_list);
unsigned long flags;
- netif_tx_lock_bh(dev);
+ netif_tx_lock_bh(priv->dev);
spin_lock_irqsave(&priv->lock, flags);
list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
@@ -683,37 +686,37 @@ static void __ipoib_reap_ah(struct net_device *dev)
}
spin_unlock_irqrestore(&priv->lock, flags);
- netif_tx_unlock_bh(dev);
+ netif_tx_unlock_bh(priv->dev);
}
void ipoib_reap_ah(struct work_struct *work)
{
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
- struct net_device *dev = priv->dev;
- __ipoib_reap_ah(dev);
+ ipoib_reap_dead_ahs(priv);
if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
queue_delayed_work(priv->wq, &priv->ah_reap_task,
round_jiffies_relative(HZ));
}
-static void ipoib_flush_ah(struct net_device *dev)
+static void ipoib_start_ah_reaper(struct ipoib_dev_priv *priv)
{
- struct ipoib_dev_priv *priv = ipoib_priv(dev);
-
- cancel_delayed_work(&priv->ah_reap_task);
- flush_workqueue(priv->wq);
- ipoib_reap_ah(&priv->ah_reap_task.work);
+ clear_bit(IPOIB_STOP_REAPER, &priv->flags);
+ queue_delayed_work(priv->wq, &priv->ah_reap_task,
+ round_jiffies_relative(HZ));
}
-static void ipoib_stop_ah(struct net_device *dev)
+static void ipoib_stop_ah_reaper(struct ipoib_dev_priv *priv)
{
- struct ipoib_dev_priv *priv = ipoib_priv(dev);
-
set_bit(IPOIB_STOP_REAPER, &priv->flags);
- ipoib_flush_ah(dev);
+ cancel_delayed_work(&priv->ah_reap_task);
+ /*
+ * After ipoib_stop_ah_reaper() we always go through
+ * ipoib_reap_dead_ahs() which ensures the work is really stopped and
+ * does a final flush out of the dead_ah's list
+ */
}
static int recvs_pending(struct net_device *dev)
@@ -807,6 +810,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(tx_req->skb);
++priv->tx_tail;
+ ++priv->global_tx_tail;
}
for (i = 0; i < ipoib_recvq_size; ++i) {
@@ -841,18 +845,6 @@ timeout:
return 0;
}
-int ipoib_ib_dev_stop(struct net_device *dev)
-{
- struct ipoib_dev_priv *priv = ipoib_priv(dev);
-
- priv->rn_ops->ndo_stop(dev);
-
- clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
- ipoib_flush_ah(dev);
-
- return 0;
-}
-
int ipoib_ib_dev_open_default(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
@@ -896,10 +888,7 @@ int ipoib_ib_dev_open(struct net_device *dev)
return -1;
}
- clear_bit(IPOIB_STOP_REAPER, &priv->flags);
- queue_delayed_work(priv->wq, &priv->ah_reap_task,
- round_jiffies_relative(HZ));
-
+ ipoib_start_ah_reaper(priv);
if (priv->rn_ops->ndo_open(dev)) {
pr_warn("%s: Failed to open dev\n", dev->name);
goto dev_stop;
@@ -910,13 +899,20 @@ int ipoib_ib_dev_open(struct net_device *dev)
return 0;
dev_stop:
- set_bit(IPOIB_STOP_REAPER, &priv->flags);
- cancel_delayed_work(&priv->ah_reap_task);
- set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
- ipoib_ib_dev_stop(dev);
+ ipoib_stop_ah_reaper(priv);
return -1;
}
+void ipoib_ib_dev_stop(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ priv->rn_ops->ndo_stop(dev);
+
+ clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+ ipoib_stop_ah_reaper(priv);
+}
+
void ipoib_pkey_dev_check_presence(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
@@ -1227,7 +1223,7 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
ipoib_mcast_dev_flush(dev);
if (oper_up)
set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
- ipoib_flush_ah(dev);
+ ipoib_reap_dead_ahs(priv);
}
if (level >= IPOIB_FLUSH_NORMAL)
@@ -1302,7 +1298,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
* the neighbor garbage collection is stopped and reaped.
* That should all be done now, so make a final ah flush.
*/
- ipoib_stop_ah(dev);
+ ipoib_reap_dead_ahs(priv);
clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index d8cb5bbe6eb5..d0c35eb687ae 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1188,9 +1188,11 @@ static void ipoib_timeout(struct net_device *dev)
ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
jiffies_to_msecs(jiffies - dev_trans_start(dev)));
- ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
- netif_queue_stopped(dev),
- priv->tx_head, priv->tx_tail);
+ ipoib_warn(priv,
+ "queue stopped %d, tx_head %u, tx_tail %u, global_tx_head %u, global_tx_tail %u\n",
+ netif_queue_stopped(dev), priv->tx_head, priv->tx_tail,
+ priv->global_tx_head, priv->global_tx_tail);
+
/* XXX reset QP, etc. */
}
@@ -1705,7 +1707,7 @@ static int ipoib_dev_init_default(struct net_device *dev)
goto out_rx_ring_cleanup;
}
- /* priv->tx_head, tx_tail & tx_outstanding are already 0 */
+ /* priv->tx_head, tx_tail and global_tx_tail/head are already 0 */
if (ipoib_transport_dev_init(dev, priv->ca)) {
pr_warn("%s: ipoib_transport_dev_init failed\n",
@@ -1977,6 +1979,8 @@ static void ipoib_ndo_uninit(struct net_device *dev)
/* no more works over the priv->wq */
if (priv->wq) {
+ /* See ipoib_mcast_carrier_on_task() */
+ WARN_ON(test_bit(IPOIB_FLAG_OPER_UP, &priv->flags));
flush_workqueue(priv->wq);
destroy_workqueue(priv->wq);
priv->wq = NULL;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index bc979a85a505..6090f1ce0c56 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2301,6 +2301,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n",
sdev->device->name, port_num);
mutex_unlock(&sport->mutex);
+ ret = -EINVAL;
goto reject;
}
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 4263e905cafb..3362dcb3ec0e 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -348,20 +348,6 @@ static int evdev_fasync(int fd, struct file *file, int on)
return fasync_helper(fd, file, on, &client->fasync);
}
-static int evdev_flush(struct file *file, fl_owner_t id)
-{
- struct evdev_client *client = file->private_data;
- struct evdev *evdev = client->evdev;
-
- mutex_lock(&evdev->mutex);
-
- if (evdev->exist && !client->revoked)
- input_flush_device(&evdev->handle, file);
-
- mutex_unlock(&evdev->mutex);
- return 0;
-}
-
static void evdev_free(struct device *dev)
{
struct evdev *evdev = container_of(dev, struct evdev, dev);
@@ -475,6 +461,10 @@ static int evdev_release(struct inode *inode, struct file *file)
unsigned int i;
mutex_lock(&evdev->mutex);
+
+ if (evdev->exist && !client->revoked)
+ input_flush_device(&evdev->handle, file);
+
evdev_ungrab(evdev, client);
mutex_unlock(&evdev->mutex);
@@ -1336,7 +1326,6 @@ static const struct file_operations evdev_fops = {
.compat_ioctl = evdev_ioctl_compat,
#endif
.fasync = evdev_fasync,
- .flush = evdev_flush,
.llseek = no_llseek,
};
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 4c1e427dfabb..7ef6e1c165e3 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -460,7 +460,7 @@ static int joydev_handle_JSIOCSAXMAP(struct joydev *joydev,
if (IS_ERR(abspam))
return PTR_ERR(abspam);
- for (i = 0; i < joydev->nabs; i++) {
+ for (i = 0; i < len && i < joydev->nabs; i++) {
if (abspam[i] > ABS_MAX) {
retval = -EINVAL;
goto out;
@@ -484,6 +484,9 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev,
int i;
int retval = 0;
+ if (len % sizeof(*keypam))
+ return -EINVAL;
+
len = min(len, sizeof(joydev->keypam));
/* Validate the map. */
@@ -491,7 +494,7 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev,
if (IS_ERR(keypam))
return PTR_ERR(keypam);
- for (i = 0; i < joydev->nkey; i++) {
+ for (i = 0; i < (len / 2) && i < joydev->nkey; i++) {
if (keypam[i] > KEY_MAX || keypam[i] < BTN_MISC) {
retval = -EINVAL;
goto out;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index aa4e431cbcd3..eacb8af8b4fc 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -229,9 +229,17 @@ static const struct xpad_device {
{ 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
- { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a0, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a1, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a2, "PDP Wired Controller for Xbox One - Crimson Red", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a7, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a8, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02ad, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02b3, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02b8, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
@@ -255,6 +263,7 @@ static const struct xpad_device {
{ 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
{ 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
{ 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
+ { 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 },
{ 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
{ 0x12ab, 0x0303, "Mortal Kombat Klassic FightStick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
@@ -309,6 +318,10 @@ static const struct xpad_device {
{ 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 },
+ { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
+ { 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE },
+ { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
{ 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
@@ -432,6 +445,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */
XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */
XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */
+ XPAD_XBOX360_VENDOR(0x1209), /* Ardwiino Controllers */
XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
@@ -441,8 +455,12 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
+ XPAD_XBOX360_VENDOR(0x20d6), /* PowerA Controllers */
+ XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */
XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
+ XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */
+ XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */
{ }
};
@@ -473,6 +491,16 @@ static const u8 xboxone_fw2015_init[] = {
};
/*
+ * This packet is required for Xbox One S (0x045e:0x02ea)
+ * and Xbox One Elite Series 2 (0x045e:0x0b00) pads to
+ * initialize the controller that was previously used in
+ * Bluetooth mode.
+ */
+static const u8 xboxone_s_init[] = {
+ 0x05, 0x20, 0x00, 0x0f, 0x06
+};
+
+/*
* This packet is required for the Titanfall 2 Xbox One pads
* (0x0e6f:0x0165) to finish initialization and for Hori pads
* (0x0f0d:0x0067) to make the analog sticks work.
@@ -530,6 +558,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
+ XBOXONE_INIT_PKT(0x045e, 0x02ea, xboxone_s_init),
+ XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init),
XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init1),
XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init2),
XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index d56001181598..1edf0e8322cc 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -183,6 +183,7 @@ static void cros_ec_keyb_process(struct cros_ec_keyb *ckdev,
"changed: [r%d c%d]: byte %02x\n",
row, col, new_state);
+ input_event(idev, EV_MSC, MSC_SCAN, pos);
input_report_key(idev, keycodes[pos],
new_state);
}
diff --git a/drivers/input/keyboard/dlink-dir685-touchkeys.c b/drivers/input/keyboard/dlink-dir685-touchkeys.c
index 88e321b76397..6fe4062e3ac2 100644
--- a/drivers/input/keyboard/dlink-dir685-touchkeys.c
+++ b/drivers/input/keyboard/dlink-dir685-touchkeys.c
@@ -142,7 +142,7 @@ MODULE_DEVICE_TABLE(of, dir685_tk_of_match);
static struct i2c_driver dir685_tk_i2c_driver = {
.driver = {
- .name = "dlin-dir685-touchkeys",
+ .name = "dlink-dir685-touchkeys",
.of_match_table = of_match_ptr(dir685_tk_of_match),
},
.probe = dir685_tk_probe,
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index f77b295e0123..01788a78041b 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -257,8 +257,8 @@ static int ep93xx_keypad_probe(struct platform_device *pdev)
}
keypad->irq = platform_get_irq(pdev, 0);
- if (!keypad->irq) {
- err = -ENXIO;
+ if (keypad->irq < 0) {
+ err = keypad->irq;
goto failed_free;
}
diff --git a/drivers/input/keyboard/nspire-keypad.c b/drivers/input/keyboard/nspire-keypad.c
index c7f26fa3034c..cf138d836eec 100644
--- a/drivers/input/keyboard/nspire-keypad.c
+++ b/drivers/input/keyboard/nspire-keypad.c
@@ -96,9 +96,15 @@ static irqreturn_t nspire_keypad_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int nspire_keypad_chip_init(struct nspire_keypad *keypad)
+static int nspire_keypad_open(struct input_dev *input)
{
+ struct nspire_keypad *keypad = input_get_drvdata(input);
unsigned long val = 0, cycles_per_us, delay_cycles, row_delay_cycles;
+ int error;
+
+ error = clk_prepare_enable(keypad->clk);
+ if (error)
+ return error;
cycles_per_us = (clk_get_rate(keypad->clk) / 1000000);
if (cycles_per_us == 0)
@@ -124,30 +130,6 @@ static int nspire_keypad_chip_init(struct nspire_keypad *keypad)
keypad->int_mask = 1 << 1;
writel(keypad->int_mask, keypad->reg_base + KEYPAD_INTMSK);
- /* Disable GPIO interrupts to prevent hanging on touchpad */
- /* Possibly used to detect touchpad events */
- writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT);
- /* Acknowledge existing interrupts */
- writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS);
-
- return 0;
-}
-
-static int nspire_keypad_open(struct input_dev *input)
-{
- struct nspire_keypad *keypad = input_get_drvdata(input);
- int error;
-
- error = clk_prepare_enable(keypad->clk);
- if (error)
- return error;
-
- error = nspire_keypad_chip_init(keypad);
- if (error) {
- clk_disable_unprepare(keypad->clk);
- return error;
- }
-
return 0;
}
@@ -155,6 +137,11 @@ static void nspire_keypad_close(struct input_dev *input)
{
struct nspire_keypad *keypad = input_get_drvdata(input);
+ /* Disable interrupts */
+ writel(0, keypad->reg_base + KEYPAD_INTMSK);
+ /* Acknowledge existing interrupts */
+ writel(~0, keypad->reg_base + KEYPAD_INT);
+
clk_disable_unprepare(keypad->clk);
}
@@ -215,6 +202,25 @@ static int nspire_keypad_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ error = clk_prepare_enable(keypad->clk);
+ if (error) {
+ dev_err(&pdev->dev, "failed to enable clock\n");
+ return error;
+ }
+
+ /* Disable interrupts */
+ writel(0, keypad->reg_base + KEYPAD_INTMSK);
+ /* Acknowledge existing interrupts */
+ writel(~0, keypad->reg_base + KEYPAD_INT);
+
+ /* Disable GPIO interrupts to prevent hanging on touchpad */
+ /* Possibly used to detect touchpad events */
+ writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT);
+ /* Acknowledge existing GPIO interrupts */
+ writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS);
+
+ clk_disable_unprepare(keypad->clk);
+
input_set_drvdata(input, keypad);
input->id.bustype = BUS_HOST;
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index 840e53732753..adb1ecc969ee 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -199,12 +199,8 @@ static int omap4_keypad_open(struct input_dev *input)
return 0;
}
-static void omap4_keypad_close(struct input_dev *input)
+static void omap4_keypad_stop(struct omap4_keypad *keypad_data)
{
- struct omap4_keypad *keypad_data = input_get_drvdata(input);
-
- disable_irq(keypad_data->irq);
-
/* Disable interrupts and wake-up events */
kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
OMAP4_VAL_IRQDISABLE);
@@ -213,7 +209,15 @@ static void omap4_keypad_close(struct input_dev *input)
/* clear pending interrupts */
kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));
+}
+
+static void omap4_keypad_close(struct input_dev *input)
+{
+ struct omap4_keypad *keypad_data;
+ keypad_data = input_get_drvdata(input);
+ disable_irq(keypad_data->irq);
+ omap4_keypad_stop(keypad_data);
enable_irq(keypad_data->irq);
pm_runtime_put_sync(input->dev.parent);
@@ -236,13 +240,37 @@ static int omap4_keypad_parse_dt(struct device *dev,
return 0;
}
+static int omap4_keypad_check_revision(struct device *dev,
+ struct omap4_keypad *keypad_data)
+{
+ unsigned int rev;
+
+ rev = __raw_readl(keypad_data->base + OMAP4_KBD_REVISION);
+ rev &= 0x03 << 30;
+ rev >>= 30;
+ switch (rev) {
+ case KBD_REVISION_OMAP4:
+ keypad_data->reg_offset = 0x00;
+ keypad_data->irqreg_offset = 0x00;
+ break;
+ case KBD_REVISION_OMAP5:
+ keypad_data->reg_offset = 0x10;
+ keypad_data->irqreg_offset = 0x0c;
+ break;
+ default:
+ dev_err(dev, "Keypad reports unsupported revision %d", rev);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int omap4_keypad_probe(struct platform_device *pdev)
{
struct omap4_keypad *keypad_data;
struct input_dev *input_dev;
struct resource *res;
unsigned int max_keys;
- int rev;
int irq;
int error;
@@ -253,10 +281,8 @@ static int omap4_keypad_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (!irq) {
- dev_err(&pdev->dev, "no keyboard irq assigned\n");
- return -EINVAL;
- }
+ if (irq < 0)
+ return irq;
keypad_data = kzalloc(sizeof(struct omap4_keypad), GFP_KERNEL);
if (!keypad_data) {
@@ -284,41 +310,33 @@ static int omap4_keypad_probe(struct platform_device *pdev)
goto err_release_mem;
}
+ pm_runtime_enable(&pdev->dev);
/*
* Enable clocks for the keypad module so that we can read
* revision register.
*/
- pm_runtime_enable(&pdev->dev);
error = pm_runtime_get_sync(&pdev->dev);
if (error) {
dev_err(&pdev->dev, "pm_runtime_get_sync() failed\n");
- goto err_unmap;
- }
- rev = __raw_readl(keypad_data->base + OMAP4_KBD_REVISION);
- rev &= 0x03 << 30;
- rev >>= 30;
- switch (rev) {
- case KBD_REVISION_OMAP4:
- keypad_data->reg_offset = 0x00;
- keypad_data->irqreg_offset = 0x00;
- break;
- case KBD_REVISION_OMAP5:
- keypad_data->reg_offset = 0x10;
- keypad_data->irqreg_offset = 0x0c;
- break;
- default:
- dev_err(&pdev->dev,
- "Keypad reports unsupported revision %d", rev);
- error = -EINVAL;
- goto err_pm_put_sync;
+ pm_runtime_put_noidle(&pdev->dev);
+ } else {
+ error = omap4_keypad_check_revision(&pdev->dev,
+ keypad_data);
+ if (!error) {
+ /* Ensure device does not raise interrupts */
+ omap4_keypad_stop(keypad_data);
+ }
+ pm_runtime_put_sync(&pdev->dev);
}
+ if (error)
+ goto err_pm_disable;
/* input device allocation */
keypad_data->input = input_dev = input_allocate_device();
if (!input_dev) {
error = -ENOMEM;
- goto err_pm_put_sync;
+ goto err_pm_disable;
}
input_dev->name = pdev->name;
@@ -364,28 +382,25 @@ static int omap4_keypad_probe(struct platform_device *pdev)
goto err_free_keymap;
}
- device_init_wakeup(&pdev->dev, true);
- pm_runtime_put_sync(&pdev->dev);
-
error = input_register_device(keypad_data->input);
if (error < 0) {
dev_err(&pdev->dev, "failed to register input device\n");
- goto err_pm_disable;
+ goto err_free_irq;
}
+ device_init_wakeup(&pdev->dev, true);
platform_set_drvdata(pdev, keypad_data);
+
return 0;
-err_pm_disable:
- pm_runtime_disable(&pdev->dev);
+err_free_irq:
free_irq(keypad_data->irq, keypad_data);
err_free_keymap:
kfree(keypad_data->keymap);
err_free_input:
input_free_device(input_dev);
-err_pm_put_sync:
- pm_runtime_put_sync(&pdev->dev);
-err_unmap:
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
iounmap(keypad_data->base);
err_release_mem:
release_mem_region(res->start, resource_size(res));
diff --git a/drivers/input/keyboard/sunkbd.c b/drivers/input/keyboard/sunkbd.c
index ad5d7f94f95a..1c7aa86c92ab 100644
--- a/drivers/input/keyboard/sunkbd.c
+++ b/drivers/input/keyboard/sunkbd.c
@@ -111,7 +111,8 @@ static irqreturn_t sunkbd_interrupt(struct serio *serio,
switch (data) {
case SUNKBD_RET_RESET:
- schedule_work(&sunkbd->tq);
+ if (sunkbd->enabled)
+ schedule_work(&sunkbd->tq);
sunkbd->reset = -1;
break;
@@ -212,16 +213,12 @@ static int sunkbd_initialize(struct sunkbd *sunkbd)
}
/*
- * sunkbd_reinit() sets leds and beeps to a state the computer remembers they
- * were in.
+ * sunkbd_set_leds_beeps() sets leds and beeps to a state the computer remembers
+ * they were in.
*/
-static void sunkbd_reinit(struct work_struct *work)
+static void sunkbd_set_leds_beeps(struct sunkbd *sunkbd)
{
- struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq);
-
- wait_event_interruptible_timeout(sunkbd->wait, sunkbd->reset >= 0, HZ);
-
serio_write(sunkbd->serio, SUNKBD_CMD_SETLED);
serio_write(sunkbd->serio,
(!!test_bit(LED_CAPSL, sunkbd->dev->led) << 3) |
@@ -234,11 +231,39 @@ static void sunkbd_reinit(struct work_struct *work)
SUNKBD_CMD_BELLOFF - !!test_bit(SND_BELL, sunkbd->dev->snd));
}
+
+/*
+ * sunkbd_reinit() wait for the keyboard reset to complete and restores state
+ * of leds and beeps.
+ */
+
+static void sunkbd_reinit(struct work_struct *work)
+{
+ struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq);
+
+ /*
+ * It is OK that we check sunkbd->enabled without pausing serio,
+ * as we only want to catch true->false transition that will
+ * happen once and we will be woken up for it.
+ */
+ wait_event_interruptible_timeout(sunkbd->wait,
+ sunkbd->reset >= 0 || !sunkbd->enabled,
+ HZ);
+
+ if (sunkbd->reset >= 0 && sunkbd->enabled)
+ sunkbd_set_leds_beeps(sunkbd);
+}
+
static void sunkbd_enable(struct sunkbd *sunkbd, bool enable)
{
serio_pause_rx(sunkbd->serio);
sunkbd->enabled = enable;
serio_continue_rx(sunkbd->serio);
+
+ if (!enable) {
+ wake_up_interruptible(&sunkbd->wait);
+ cancel_work_sync(&sunkbd->tq);
+ }
}
/*
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
index f9f98ef1d98e..8677dbe0fd20 100644
--- a/drivers/input/keyboard/twl4030_keypad.c
+++ b/drivers/input/keyboard/twl4030_keypad.c
@@ -63,7 +63,7 @@ struct twl4030_keypad {
bool autorepeat;
unsigned int n_rows;
unsigned int n_cols;
- unsigned int irq;
+ int irq;
struct device *dbg_dev;
struct input_dev *input;
@@ -389,10 +389,8 @@ static int twl4030_kp_probe(struct platform_device *pdev)
}
kp->irq = platform_get_irq(pdev, 0);
- if (!kp->irq) {
- dev_err(&pdev->dev, "no keyboard irq assigned\n");
- return -EINVAL;
- }
+ if (kp->irq < 0)
+ return kp->irq;
error = matrix_keypad_build_keymap(keymap_data, NULL,
TWL4030_MAX_ROWS,
diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c
index a3e79bf5a04b..3695dd7dbb9b 100644
--- a/drivers/input/misc/adxl34x.c
+++ b/drivers/input/misc/adxl34x.c
@@ -696,7 +696,7 @@ struct adxl34x *adxl34x_probe(struct device *dev, int irq,
struct input_dev *input_dev;
const struct adxl34x_platform_data *pdata;
int err, range, i;
- unsigned char revid;
+ int revid;
if (!irq) {
dev_err(dev, "no IRQ?\n");
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index 23c191a2a071..cf4d507efaf6 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -571,12 +571,15 @@ static int cm109_input_open(struct input_dev *idev)
dev->ctl_data->byte[HID_OR2] = dev->keybit;
dev->ctl_data->byte[HID_OR3] = 0x00;
+ dev->ctl_urb_pending = 1;
error = usb_submit_urb(dev->urb_ctl, GFP_KERNEL);
- if (error)
+ if (error) {
+ dev->ctl_urb_pending = 0;
dev_err(&dev->intf->dev, "%s: usb_submit_urb (urb_ctl) failed %d\n",
__func__, error);
- else
+ } else {
dev->open = 1;
+ }
mutex_unlock(&dev->pm_mutex);
diff --git a/drivers/input/mouse/cyapa_gen6.c b/drivers/input/mouse/cyapa_gen6.c
index c1b524ab4623..ba50f5713423 100644
--- a/drivers/input/mouse/cyapa_gen6.c
+++ b/drivers/input/mouse/cyapa_gen6.c
@@ -573,7 +573,7 @@ static int cyapa_pip_retrieve_data_structure(struct cyapa *cyapa,
memset(&cmd, 0, sizeof(cmd));
put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &cmd.head.addr);
- put_unaligned_le16(sizeof(cmd), &cmd.head.length - 2);
+ put_unaligned_le16(sizeof(cmd) - 2, &cmd.head.length);
cmd.head.report_id = PIP_APP_CMD_REPORT_ID;
cmd.head.cmd_code = PIP_RETRIEVE_DATA_STRUCTURE;
put_unaligned_le16(read_offset, &cmd.read_offset);
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index d3ff1fc09af7..a9040c0fb4c3 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -2044,7 +2044,7 @@ static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp)
{
int type = *((unsigned int *)kp->arg);
- return sprintf(buffer, "%s", psmouse_protocol_by_type(type)->name);
+ return sprintf(buffer, "%s\n", psmouse_protocol_by_type(type)->name);
}
static int __init psmouse_init(void)
diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
index 1d6010d463e2..022a8cb58a06 100644
--- a/drivers/input/mouse/sentelic.c
+++ b/drivers/input/mouse/sentelic.c
@@ -454,7 +454,7 @@ static ssize_t fsp_attr_set_setreg(struct psmouse *psmouse, void *data,
fsp_reg_write_enable(psmouse, false);
- return count;
+ return retval;
}
PSMOUSE_DEFINE_WO_ATTR(setreg, S_IWUSR, NULL, fsp_attr_set_setreg);
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index d9042d0566ab..c6d393114502 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -173,6 +173,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN005b", /* P50 */
"LEN005e", /* T560 */
"LEN006c", /* T470s */
+ "LEN007a", /* T470s */
"LEN0071", /* T480 */
"LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
"LEN0073", /* X1 Carbon G5 (Elantech) */
@@ -181,6 +182,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN0093", /* T480 */
"LEN0096", /* X280 */
"LEN0097", /* X280 -> ALPS trackpoint */
+ "LEN0099", /* X1 Extreme 1st */
"LEN009b", /* T580 */
"LEN200f", /* T450s */
"LEN2044", /* L470 */
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index 6590d10f166f..e46865785409 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -20,10 +20,12 @@
#include "trackpoint.h"
static const char * const trackpoint_variants[] = {
- [TP_VARIANT_IBM] = "IBM",
- [TP_VARIANT_ALPS] = "ALPS",
- [TP_VARIANT_ELAN] = "Elan",
- [TP_VARIANT_NXP] = "NXP",
+ [TP_VARIANT_IBM] = "IBM",
+ [TP_VARIANT_ALPS] = "ALPS",
+ [TP_VARIANT_ELAN] = "Elan",
+ [TP_VARIANT_NXP] = "NXP",
+ [TP_VARIANT_JYT_SYNAPTICS] = "JYT_Synaptics",
+ [TP_VARIANT_SYNAPTICS] = "Synaptics",
};
/*
@@ -283,6 +285,8 @@ static int trackpoint_start_protocol(struct psmouse *psmouse,
case TP_VARIANT_ALPS:
case TP_VARIANT_ELAN:
case TP_VARIANT_NXP:
+ case TP_VARIANT_JYT_SYNAPTICS:
+ case TP_VARIANT_SYNAPTICS:
if (variant_id)
*variant_id = param[0];
if (firmware_id)
diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
index 538986e5ac5b..4ebcdf802e9a 100644
--- a/drivers/input/mouse/trackpoint.h
+++ b/drivers/input/mouse/trackpoint.h
@@ -27,10 +27,12 @@
* 0x01 was the original IBM trackpoint, others implement very limited
* subset of trackpoint features.
*/
-#define TP_VARIANT_IBM 0x01
-#define TP_VARIANT_ALPS 0x02
-#define TP_VARIANT_ELAN 0x03
-#define TP_VARIANT_NXP 0x04
+#define TP_VARIANT_IBM 0x01
+#define TP_VARIANT_ALPS 0x02
+#define TP_VARIANT_ELAN 0x03
+#define TP_VARIANT_NXP 0x04
+#define TP_VARIANT_JYT_SYNAPTICS 0x05
+#define TP_VARIANT_SYNAPTICS 0x06
/*
* Commands
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 162526a0d463..ac6a20f7afdf 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -208,7 +208,7 @@ static irqreturn_t rmi_irq_fn(int irq, void *dev_id)
if (count) {
kfree(attn_data.data);
- attn_data.data = NULL;
+ drvdata->attn_data.data = NULL;
}
if (!kfifo_is_empty(&drvdata->attn_fifo))
@@ -1213,7 +1213,8 @@ static int rmi_driver_probe(struct device *dev)
if (data->input) {
rmi_driver_set_input_name(rmi_dev, data->input);
if (!rmi_dev->xport->input) {
- if (input_register_device(data->input)) {
+ retval = input_register_device(data->input);
+ if (retval) {
dev_err(dev, "%s: Failed to register input device.\n",
__func__);
goto err_destroy_functions;
diff --git a/drivers/input/serio/hil_mlc.c b/drivers/input/serio/hil_mlc.c
index e1423f7648d6..4c039e4125d9 100644
--- a/drivers/input/serio/hil_mlc.c
+++ b/drivers/input/serio/hil_mlc.c
@@ -74,7 +74,7 @@ EXPORT_SYMBOL(hil_mlc_unregister);
static LIST_HEAD(hil_mlcs);
static DEFINE_RWLOCK(hil_mlcs_lock);
static struct timer_list hil_mlcs_kicker;
-static int hil_mlcs_probe;
+static int hil_mlcs_probe, hil_mlc_stop;
static void hil_mlcs_process(unsigned long unused);
static DECLARE_TASKLET_DISABLED(hil_mlcs_tasklet, hil_mlcs_process, 0);
@@ -702,9 +702,13 @@ static int hilse_donode(hil_mlc *mlc)
if (!mlc->ostarted) {
mlc->ostarted = 1;
mlc->opacket = pack;
- mlc->out(mlc);
+ rc = mlc->out(mlc);
nextidx = HILSEN_DOZE;
write_unlock_irqrestore(&mlc->lock, flags);
+ if (rc) {
+ hil_mlc_stop = 1;
+ return 1;
+ }
break;
}
mlc->ostarted = 0;
@@ -715,8 +719,13 @@ static int hilse_donode(hil_mlc *mlc)
case HILSE_CTS:
write_lock_irqsave(&mlc->lock, flags);
- nextidx = mlc->cts(mlc) ? node->bad : node->good;
+ rc = mlc->cts(mlc);
+ nextidx = rc ? node->bad : node->good;
write_unlock_irqrestore(&mlc->lock, flags);
+ if (rc) {
+ hil_mlc_stop = 1;
+ return 1;
+ }
break;
default:
@@ -780,6 +789,12 @@ static void hil_mlcs_process(unsigned long unused)
static void hil_mlcs_timer(struct timer_list *unused)
{
+ if (hil_mlc_stop) {
+ /* could not send packet - stop immediately. */
+ pr_warn(PREFIX "HIL seems stuck - Disabling HIL MLC.\n");
+ return;
+ }
+
hil_mlcs_probe = 1;
tasklet_schedule(&hil_mlcs_tasklet);
/* Re-insert the periodic task. */
diff --git a/drivers/input/serio/hp_sdc_mlc.c b/drivers/input/serio/hp_sdc_mlc.c
index 232d30c825bd..3e85e9039374 100644
--- a/drivers/input/serio/hp_sdc_mlc.c
+++ b/drivers/input/serio/hp_sdc_mlc.c
@@ -210,7 +210,7 @@ static int hp_sdc_mlc_cts(hil_mlc *mlc)
priv->tseq[2] = 1;
priv->tseq[3] = 0;
priv->tseq[4] = 0;
- __hp_sdc_enqueue_transaction(&priv->trans);
+ return __hp_sdc_enqueue_transaction(&priv->trans);
busy:
return 1;
done:
@@ -219,7 +219,7 @@ static int hp_sdc_mlc_cts(hil_mlc *mlc)
return 0;
}
-static void hp_sdc_mlc_out(hil_mlc *mlc)
+static int hp_sdc_mlc_out(hil_mlc *mlc)
{
struct hp_sdc_mlc_priv_s *priv;
@@ -234,7 +234,7 @@ static void hp_sdc_mlc_out(hil_mlc *mlc)
do_data:
if (priv->emtestmode) {
up(&mlc->osem);
- return;
+ return 0;
}
/* Shouldn't be sending commands when loop may be busy */
BUG_ON(down_trylock(&mlc->csem));
@@ -296,7 +296,7 @@ static void hp_sdc_mlc_out(hil_mlc *mlc)
BUG_ON(down_trylock(&mlc->csem));
}
enqueue:
- hp_sdc_enqueue_transaction(&priv->trans);
+ return hp_sdc_enqueue_transaction(&priv->trans);
}
static int __init hp_sdc_mlc_init(void)
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 136f6e7bf797..f20e54f41dde 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -224,6 +224,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
},
},
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
+ },
+ },
{ }
};
@@ -430,6 +436,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
},
},
{
+ /* Lenovo XiaoXin Air 12 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "80UN"),
+ },
+ },
+ {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"),
@@ -534,6 +547,25 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
},
},
+ {
+ /*
+ * Acer Aspire 5738z
+ * Touchpad stops working in mux mode when dis- + re-enabled
+ * with the touchpad enable/disable toggle hotkey
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"),
+ },
+ },
+ {
+ /* Entroware Proteus */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Entroware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"),
+ },
+ },
{ }
};
@@ -560,6 +592,11 @@ static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
},
+ }, {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
+ },
},
{ }
};
@@ -586,6 +623,48 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
},
},
{
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A114-31"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A314-31"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-31"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-132"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-332"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-432"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate Spin B118-RN"),
+ },
+ },
+ {
/* Advent 4211 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "DIXONSXP"),
@@ -655,6 +734,21 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
},
},
+ {
+ /* Lenovo ThinkPad Twist S230u */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"),
+ },
+ },
+ {
+ /* Entroware Proteus */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Entroware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"),
+ },
+ },
{ }
};
@@ -684,6 +778,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nopnp_table[] = {
DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
},
},
+ {
+ /* Acer Aspire 5 A515 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "Grumpy_PK"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "PK"),
+ },
+ },
{ }
};
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 95a78ccbd847..c60593c8d2be 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -125,6 +125,7 @@ module_param_named(unmask_kbd_data, i8042_unmask_kbd_data, bool, 0600);
MODULE_PARM_DESC(unmask_kbd_data, "Unconditional enable (may reveal sensitive data) of normally sanitize-filtered kbd data traffic debug log [pre-condition: i8042.debug=1 enabled]");
#endif
+static bool i8042_present;
static bool i8042_bypass_aux_irq_test;
static char i8042_kbd_firmware_id[128];
static char i8042_aux_firmware_id[128];
@@ -345,6 +346,9 @@ int i8042_command(unsigned char *param, int command)
unsigned long flags;
int retval;
+ if (!i8042_present)
+ return -1;
+
spin_lock_irqsave(&i8042_lock, flags);
retval = __i8042_command(param, command);
spin_unlock_irqrestore(&i8042_lock, flags);
@@ -1468,7 +1472,8 @@ static int __init i8042_setup_aux(void)
if (error)
goto err_free_ports;
- if (aux_enable())
+ error = aux_enable();
+ if (error)
goto err_free_irq;
i8042_aux_irq_registered = true;
@@ -1613,12 +1618,15 @@ static int __init i8042_init(void)
err = i8042_platform_init();
if (err)
- return err;
+ return (err == -ENODEV) ? 0 : err;
err = i8042_controller_check();
if (err)
goto err_platform_exit;
+ /* Set this before creating the dev to allow i8042_command to work right away */
+ i8042_present = true;
+
pdev = platform_create_bundle(&i8042_driver, i8042_probe, NULL, 0, NULL, 0);
if (IS_ERR(pdev)) {
err = PTR_ERR(pdev);
@@ -1637,6 +1645,9 @@ static int __init i8042_init(void)
static void __exit i8042_exit(void)
{
+ if (!i8042_present)
+ return;
+
platform_device_unregister(i8042_platform_device);
platform_driver_unregister(&i8042_driver);
i8042_platform_exit();
diff --git a/drivers/input/serio/sun4i-ps2.c b/drivers/input/serio/sun4i-ps2.c
index 04b96fe39339..46512b4d686a 100644
--- a/drivers/input/serio/sun4i-ps2.c
+++ b/drivers/input/serio/sun4i-ps2.c
@@ -210,7 +210,6 @@ static int sun4i_ps2_probe(struct platform_device *pdev)
struct sun4i_ps2data *drvdata;
struct serio *serio;
struct device *dev = &pdev->dev;
- unsigned int irq;
int error;
drvdata = kzalloc(sizeof(struct sun4i_ps2data), GFP_KERNEL);
@@ -263,14 +262,12 @@ static int sun4i_ps2_probe(struct platform_device *pdev)
writel(0, drvdata->reg_base + PS2_REG_GCTL);
/* Get IRQ for the device */
- irq = platform_get_irq(pdev, 0);
- if (!irq) {
- dev_err(dev, "no IRQ found\n");
- error = -ENXIO;
+ drvdata->irq = platform_get_irq(pdev, 0);
+ if (drvdata->irq < 0) {
+ error = drvdata->irq;
goto err_disable_clk;
}
- drvdata->irq = irq;
drvdata->serio = serio;
drvdata->dev = dev;
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 2a80675cfd94..de400d76df55 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -95,6 +95,7 @@ config TOUCHSCREEN_AD7879_SPI
config TOUCHSCREEN_ADC
tristate "Generic ADC based resistive touchscreen"
depends on IIO
+ select IIO_BUFFER
select IIO_BUFFER_CB
help
Say Y here if you want to use the generic ADC
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index a2f45aefce08..b536768234b7 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -35,6 +35,7 @@
#include <linux/regulator/consumer.h>
#include <linux/module.h>
#include <asm/irq.h>
+#include <asm/unaligned.h>
/*
* This code has been heavily tested on a Nokia 770, and lightly
@@ -199,6 +200,26 @@ struct ads7846 {
#define REF_ON (READ_12BIT_DFR(x, 1, 1))
#define REF_OFF (READ_12BIT_DFR(y, 0, 0))
+static int get_pendown_state(struct ads7846 *ts)
+{
+ if (ts->get_pendown_state)
+ return ts->get_pendown_state();
+
+ return !gpio_get_value(ts->gpio_pendown);
+}
+
+static void ads7846_report_pen_up(struct ads7846 *ts)
+{
+ struct input_dev *input = ts->input;
+
+ input_report_key(input, BTN_TOUCH, 0);
+ input_report_abs(input, ABS_PRESSURE, 0);
+ input_sync(input);
+
+ ts->pendown = false;
+ dev_vdbg(&ts->spi->dev, "UP\n");
+}
+
/* Must be called with ts->lock held */
static void ads7846_stop(struct ads7846 *ts)
{
@@ -215,6 +236,10 @@ static void ads7846_stop(struct ads7846 *ts)
static void ads7846_restart(struct ads7846 *ts)
{
if (!ts->disabled && !ts->suspended) {
+ /* Check if pen was released since last stop */
+ if (ts->pendown && !get_pendown_state(ts))
+ ads7846_report_pen_up(ts);
+
/* Tell IRQ thread that it may poll the device. */
ts->stopped = false;
mb();
@@ -410,7 +435,7 @@ static int ads7845_read12_ser(struct device *dev, unsigned command)
if (status == 0) {
/* BE12 value, then padding */
- status = be16_to_cpu(*((u16 *)&req->sample[1]));
+ status = get_unaligned_be16(&req->sample[1]);
status = status >> 3;
status &= 0x0fff;
}
@@ -605,14 +630,6 @@ static const struct attribute_group ads784x_attr_group = {
/*--------------------------------------------------------------------------*/
-static int get_pendown_state(struct ads7846 *ts)
-{
- if (ts->get_pendown_state)
- return ts->get_pendown_state();
-
- return !gpio_get_value(ts->gpio_pendown);
-}
-
static void null_wait_for_sync(void)
{
}
@@ -785,10 +802,11 @@ static void ads7846_report_state(struct ads7846 *ts)
/* compute touch pressure resistance using equation #2 */
Rt = z2;
Rt -= z1;
- Rt *= x;
Rt *= ts->x_plate_ohms;
+ Rt = DIV_ROUND_CLOSEST(Rt, 16);
+ Rt *= x;
Rt /= z1;
- Rt = (Rt + 2047) >> 12;
+ Rt = DIV_ROUND_CLOSEST(Rt, 256);
} else {
Rt = 0;
}
@@ -871,16 +889,8 @@ static irqreturn_t ads7846_irq(int irq, void *handle)
msecs_to_jiffies(TS_POLL_PERIOD));
}
- if (ts->pendown && !ts->stopped) {
- struct input_dev *input = ts->input;
-
- input_report_key(input, BTN_TOUCH, 0);
- input_report_abs(input, ABS_PRESSURE, 0);
- input_sync(input);
-
- ts->pendown = false;
- dev_vdbg(&ts->spi->dev, "UP\n");
- }
+ if (ts->pendown && !ts->stopped)
+ ads7846_report_pen_up(ts);
return IRQ_HANDLED;
}
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index d21ca39b0fdb..adfae2d88707 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -41,6 +41,7 @@
#include <linux/of.h>
#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
+#include <linux/uuid.h>
#include <asm/unaligned.h>
/* Device, Driver information */
@@ -1131,6 +1132,40 @@ static void elants_i2c_power_off(void *_data)
}
}
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id i2c_hid_ids[] = {
+ {"ACPI0C50", 0 },
+ {"PNP0C50", 0 },
+ { },
+};
+
+static const guid_t i2c_hid_guid =
+ GUID_INIT(0x3CDFF6F7, 0x4267, 0x4555,
+ 0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE);
+
+static bool elants_acpi_is_hid_device(struct device *dev)
+{
+ acpi_handle handle = ACPI_HANDLE(dev);
+ union acpi_object *obj;
+
+ if (acpi_match_device_ids(ACPI_COMPANION(dev), i2c_hid_ids))
+ return false;
+
+ obj = acpi_evaluate_dsm_typed(handle, &i2c_hid_guid, 1, 1, NULL, ACPI_TYPE_INTEGER);
+ if (obj) {
+ ACPI_FREE(obj);
+ return true;
+ }
+
+ return false;
+}
+#else
+static bool elants_acpi_is_hid_device(struct device *dev)
+{
+ return false;
+}
+#endif
+
static int elants_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1139,9 +1174,14 @@ static int elants_i2c_probe(struct i2c_client *client,
unsigned long irqflags;
int error;
+ /* Don't bind to i2c-hid compatible devices, these are handled by the i2c-hid drv. */
+ if (elants_acpi_is_hid_device(&client->dev)) {
+ dev_warn(&client->dev, "This device appears to be an I2C-HID device, not binding\n");
+ return -ENODEV;
+ }
+
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
- dev_err(&client->dev,
- "%s: i2c check functionality error\n", DEVICE_NAME);
+ dev_err(&client->dev, "I2C check functionality error\n");
return -ENXIO;
}
diff --git a/drivers/input/touchscreen/elo.c b/drivers/input/touchscreen/elo.c
index 7f2942f3cec6..0f3146bcfcd0 100644
--- a/drivers/input/touchscreen/elo.c
+++ b/drivers/input/touchscreen/elo.c
@@ -345,8 +345,10 @@ static int elo_connect(struct serio *serio, struct serio_driver *drv)
switch (elo->id) {
case 0: /* 10-byte protocol */
- if (elo_setup_10(elo))
+ if (elo_setup_10(elo)) {
+ err = -EIO;
goto fail3;
+ }
break;
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index b20ba6599273..7e480e236421 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -137,6 +137,18 @@ static const struct dmi_system_id rotated_screen[] = {
},
},
{
+ .ident = "Teclast X98 Pro",
+ .matches = {
+ /*
+ * Only match BIOS date, because the manufacturers
+ * BIOS does not report the board name at all
+ * (sometimes)...
+ */
+ DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
+ DMI_MATCH(DMI_BIOS_DATE, "10/28/2015"),
+ },
+ },
+ {
.ident = "WinBook TW100",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c
index c10fc594f94d..6bfe42a11452 100644
--- a/drivers/input/touchscreen/imx6ul_tsc.c
+++ b/drivers/input/touchscreen/imx6ul_tsc.c
@@ -538,20 +538,25 @@ static int __maybe_unused imx6ul_tsc_resume(struct device *dev)
mutex_lock(&input_dev->mutex);
- if (input_dev->users) {
- retval = clk_prepare_enable(tsc->adc_clk);
- if (retval)
- goto out;
-
- retval = clk_prepare_enable(tsc->tsc_clk);
- if (retval) {
- clk_disable_unprepare(tsc->adc_clk);
- goto out;
- }
+ if (!input_dev->users)
+ goto out;
- retval = imx6ul_tsc_init(tsc);
+ retval = clk_prepare_enable(tsc->adc_clk);
+ if (retval)
+ goto out;
+
+ retval = clk_prepare_enable(tsc->tsc_clk);
+ if (retval) {
+ clk_disable_unprepare(tsc->adc_clk);
+ goto out;
}
+ retval = imx6ul_tsc_init(tsc);
+ if (retval) {
+ clk_disable_unprepare(tsc->tsc_clk);
+ clk_disable_unprepare(tsc->adc_clk);
+ goto out;
+ }
out:
mutex_unlock(&input_dev->mutex);
return retval;
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index a5ab774da4cc..fb28fd2d6f1c 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -54,6 +54,7 @@
enum mms_type {
TYPE_MMS114 = 114,
TYPE_MMS152 = 152,
+ TYPE_MMS345L = 345,
};
struct mms114_data {
@@ -91,15 +92,15 @@ static int __mms114_read_reg(struct mms114_data *data, unsigned int reg,
if (reg <= MMS114_MODE_CONTROL && reg + len > MMS114_MODE_CONTROL)
BUG();
- /* Write register: use repeated start */
+ /* Write register */
xfer[0].addr = client->addr;
- xfer[0].flags = I2C_M_TEN | I2C_M_NOSTART;
+ xfer[0].flags = client->flags & I2C_M_TEN;
xfer[0].len = 1;
xfer[0].buf = &buf;
/* Read data */
xfer[1].addr = client->addr;
- xfer[1].flags = I2C_M_RD;
+ xfer[1].flags = (client->flags & I2C_M_TEN) | I2C_M_RD;
xfer[1].len = len;
xfer[1].buf = val;
@@ -250,6 +251,15 @@ static int mms114_get_version(struct mms114_data *data)
int error;
switch (data->type) {
+ case TYPE_MMS345L:
+ error = __mms114_read_reg(data, MMS152_FW_REV, 3, buf);
+ if (error)
+ return error;
+
+ dev_info(dev, "TSP FW Rev: bootloader 0x%x / core 0x%x / config 0x%x\n",
+ buf[0], buf[1], buf[2]);
+ break;
+
case TYPE_MMS152:
error = __mms114_read_reg(data, MMS152_FW_REV, 3, buf);
if (error)
@@ -287,8 +297,8 @@ static int mms114_setup_regs(struct mms114_data *data)
if (error < 0)
return error;
- /* MMS152 has no configuration or power on registers */
- if (data->type == TYPE_MMS152)
+ /* Only MMS114 has configuration and power on registers */
+ if (data->type != TYPE_MMS114)
return 0;
error = mms114_set_active(data, true);
@@ -428,10 +438,8 @@ static int mms114_probe(struct i2c_client *client,
const void *match_data;
int error;
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_PROTOCOL_MANGLING)) {
- dev_err(&client->dev,
- "Need i2c bus that supports protocol mangling\n");
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "Not supported I2C adapter\n");
return -ENODEV;
}
@@ -600,6 +608,9 @@ static const struct of_device_id mms114_dt_match[] = {
}, {
.compatible = "melfas,mms152",
.data = (void *)TYPE_MMS152,
+ }, {
+ .compatible = "melfas,mms345l",
+ .data = (void *)TYPE_MMS345L,
},
{ }
};
diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c
index 05c1054330b7..1a8c7403cfe6 100644
--- a/drivers/input/touchscreen/raydium_i2c_ts.c
+++ b/drivers/input/touchscreen/raydium_i2c_ts.c
@@ -419,6 +419,7 @@ static int raydium_i2c_write_object(struct i2c_client *client,
enum raydium_bl_ack state)
{
int error;
+ static const u8 cmd[] = { 0xFF, 0x39 };
error = raydium_i2c_send(client, RM_CMD_BOOT_WRT, data, len);
if (error) {
@@ -427,7 +428,7 @@ static int raydium_i2c_write_object(struct i2c_client *client,
return error;
}
- error = raydium_i2c_send(client, RM_CMD_BOOT_ACK, NULL, 0);
+ error = raydium_i2c_send(client, RM_CMD_BOOT_ACK, cmd, sizeof(cmd));
if (error) {
dev_err(&client->dev, "Ack obj command failed: %d\n", error);
return error;
diff --git a/drivers/input/touchscreen/s6sy761.c b/drivers/input/touchscreen/s6sy761.c
index b63d7fdf0cd2..85a1f465c097 100644
--- a/drivers/input/touchscreen/s6sy761.c
+++ b/drivers/input/touchscreen/s6sy761.c
@@ -145,8 +145,8 @@ static void s6sy761_report_coordinates(struct s6sy761_data *sdata,
u8 major = event[4];
u8 minor = event[5];
u8 z = event[6] & S6SY761_MASK_Z;
- u16 x = (event[1] << 3) | ((event[3] & S6SY761_MASK_X) >> 4);
- u16 y = (event[2] << 3) | (event[3] & S6SY761_MASK_Y);
+ u16 x = (event[1] << 4) | ((event[3] & S6SY761_MASK_X) >> 4);
+ u16 y = (event[2] << 4) | (event[3] & S6SY761_MASK_Y);
input_mt_slot(sdata->input, tid);
diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
index 06f0eb04a8fd..a787a6aefc69 100644
--- a/drivers/input/touchscreen/silead.c
+++ b/drivers/input/touchscreen/silead.c
@@ -28,6 +28,7 @@
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/irq.h>
#include <linux/regulator/consumer.h>
@@ -343,10 +344,8 @@ static int silead_ts_get_id(struct i2c_client *client)
error = i2c_smbus_read_i2c_block_data(client, SILEAD_REG_ID,
sizeof(chip_id), (u8 *)&chip_id);
- if (error < 0) {
- dev_err(&client->dev, "Chip ID read error %d\n", error);
+ if (error < 0)
return error;
- }
data->chip_id = le32_to_cpu(chip_id);
dev_info(&client->dev, "Silead chip ID: 0x%8X", data->chip_id);
@@ -359,12 +358,49 @@ static int silead_ts_setup(struct i2c_client *client)
int error;
u32 status;
+ /*
+ * Some buggy BIOS-es bring up the chip in a stuck state where it
+ * blocks the I2C bus. The following steps are necessary to
+ * unstuck the chip / bus:
+ * 1. Turn off the Silead chip.
+ * 2. Try to do an I2C transfer with the chip, this will fail in
+ * response to which the I2C-bus-driver will call:
+ * i2c_recover_bus() which will unstuck the I2C-bus. Note the
+ * unstuck-ing of the I2C bus only works if we first drop the
+ * chip off the bus by turning it off.
+ * 3. Turn the chip back on.
+ *
+ * On the x86/ACPI systems were this problem is seen, step 1. and
+ * 3. require making ACPI calls and dealing with ACPI Power
+ * Resources. The workaround below runtime-suspends the chip to
+ * turn it off, leaving it up to the ACPI subsystem to deal with
+ * this.
+ */
+
+ if (device_property_read_bool(&client->dev,
+ "silead,stuck-controller-bug")) {
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_allow(&client->dev);
+
+ pm_runtime_suspend(&client->dev);
+
+ dev_warn(&client->dev, FW_BUG "Stuck I2C bus: please ignore the next 'controller timed out' error\n");
+ silead_ts_get_id(client);
+
+ /* The forbid will also resume the device */
+ pm_runtime_forbid(&client->dev);
+ pm_runtime_disable(&client->dev);
+ }
+
silead_ts_set_power(client, SILEAD_POWER_OFF);
silead_ts_set_power(client, SILEAD_POWER_ON);
error = silead_ts_get_id(client);
- if (error)
+ if (error) {
+ dev_err(&client->dev, "Chip ID read error %d\n", error);
return error;
+ }
error = silead_ts_init(client);
if (error)
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
index b6f95f20f924..cd8805d71d97 100644
--- a/drivers/input/touchscreen/stmfts.c
+++ b/drivers/input/touchscreen/stmfts.c
@@ -479,7 +479,7 @@ static ssize_t stmfts_sysfs_hover_enable_write(struct device *dev,
mutex_lock(&sdata->mutex);
- if (value & sdata->hover_enabled)
+ if (value && sdata->hover_enabled)
goto out;
if (sdata->running)
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index caa3aca2ea54..d5956854ac98 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -778,6 +778,7 @@ static int sur40_probe(struct usb_interface *interface,
dev_err(&interface->dev,
"Unable to register video controls.");
v4l2_ctrl_handler_free(&sur40->hdl);
+ error = sur40->hdl.error;
goto err_unreg_v4l2;
}
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 48304e26f988..d939c1798518 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -195,6 +195,7 @@ static const struct usb_device_id usbtouch_devices[] = {
#endif
#ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH
+ {USB_DEVICE(0x255e, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
{USB_DEVICE(0x595a, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
{USB_DEVICE(0x6615, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
{USB_DEVICE(0x6615, 0x0012), .driver_info = DEVTYPE_IRTOUCH_HIRES},
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 0783f44e9afe..5d5941aca16a 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1348,24 +1348,26 @@ static void increase_address_space(struct protection_domain *domain,
unsigned long flags;
u64 *pte;
+ pte = (void *)get_zeroed_page(gfp);
+ if (!pte)
+ return;
+
spin_lock_irqsave(&domain->lock, flags);
if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
/* address space already 64 bit large */
goto out;
- pte = (void *)get_zeroed_page(gfp);
- if (!pte)
- goto out;
-
*pte = PM_LEVEL_PDE(domain->mode,
iommu_virt_to_phys(domain->pt_root));
domain->pt_root = pte;
domain->mode += 1;
domain->updated = true;
+ pte = NULL;
out:
spin_unlock_irqrestore(&domain->lock, flags);
+ free_page((unsigned long)pte);
return;
}
@@ -4508,9 +4510,10 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
if (!fn)
return -ENOMEM;
iommu->ir_domain = irq_domain_create_tree(fn, &amd_ir_domain_ops, iommu);
- irq_domain_free_fwnode(fn);
- if (!iommu->ir_domain)
+ if (!iommu->ir_domain) {
+ irq_domain_free_fwnode(fn);
return -ENOMEM;
+ }
iommu->ir_domain->parent = arch_get_ir_parent_domain();
iommu->msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain,
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 465f28a7844c..c7d0bb3b4a30 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1334,8 +1334,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
}
case IVHD_DEV_ACPI_HID: {
u16 devid;
- u8 hid[ACPIHID_HID_LEN] = {0};
- u8 uid[ACPIHID_UID_LEN] = {0};
+ u8 hid[ACPIHID_HID_LEN];
+ u8 uid[ACPIHID_UID_LEN];
int ret;
if (h->type != 0x40) {
@@ -1352,6 +1352,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
}
+ uid[0] = '\0';
switch (e->uidf) {
case UID_NOT_PRESENT:
@@ -1366,8 +1367,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case UID_IS_CHARACTER:
- memcpy(uid, (u8 *)(&e->uid), ACPIHID_UID_LEN - 1);
- uid[ACPIHID_UID_LEN - 1] = '\0';
+ memcpy(uid, &e->uid, e->uidl);
+ uid[e->uidl] = '\0';
break;
default:
@@ -2836,7 +2837,7 @@ static int __init parse_amd_iommu_intr(char *str)
{
for (; *str; ++str) {
if (strncmp(str, "legacy", 6) == 0) {
- amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
+ amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
break;
}
if (strncmp(str, "vapic", 5) == 0) {
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 69f3d4c95b53..0948c425d652 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -259,7 +259,7 @@
#define DTE_IRQ_REMAP_INTCTL_MASK (0x3ULL << 60)
#define DTE_IRQ_TABLE_LEN_MASK (0xfULL << 1)
#define DTE_IRQ_REMAP_INTCTL (2ULL << 60)
-#define DTE_IRQ_TABLE_LEN (8ULL << 1)
+#define DTE_IRQ_TABLE_LEN (9ULL << 1)
#define DTE_IRQ_REMAP_ENABLE 1ULL
#define PAGE_MODE_NONE 0x00
@@ -352,7 +352,7 @@
#define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL)
#define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL)
-#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0xfffffULL)
+#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0x1fffffULL)
#define DTE_GCR3_INDEX_A 0
#define DTE_GCR3_INDEX_B 1
@@ -410,7 +410,11 @@ extern bool amd_iommu_np_cache;
/* Only true if all IOMMUs support device IOTLBs */
extern bool amd_iommu_iotlb_sup;
-#define MAX_IRQS_PER_TABLE 256
+/*
+ * AMD IOMMU hardware only support 512 IRTEs despite
+ * the architectural limitation of 2048 entries.
+ */
+#define MAX_IRQS_PER_TABLE 512
#define IRQ_TABLE_ALIGNMENT 128
struct irq_remap_table {
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 58da65df03f5..7a59a8ebac10 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -776,6 +776,13 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
might_sleep();
+ /*
+ * When memory encryption is active the device is likely not in a
+ * direct-mapped domain. Forbid using IOMMUv2 functionality for now.
+ */
+ if (mem_encrypt_active())
+ return -ENODEV;
+
if (!amd_iommu_v2_supported())
return -ENODEV;
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index d936ff765fe4..bc565f8e8ac2 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1029,8 +1029,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
{
struct intel_iommu *iommu;
u32 ver, sts;
- int agaw = 0;
- int msagaw = 0;
+ int agaw = -1;
+ int msagaw = -1;
int err;
if (!drhd->reg_base_addr) {
@@ -1055,17 +1055,28 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
}
err = -EINVAL;
- agaw = iommu_calculate_agaw(iommu);
- if (agaw < 0) {
- pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
- iommu->seq_id);
- goto err_unmap;
+ if (cap_sagaw(iommu->cap) == 0) {
+ pr_info("%s: No supported address widths. Not attempting DMA translation.\n",
+ iommu->name);
+ drhd->ignored = 1;
}
- msagaw = iommu_calculate_max_sagaw(iommu);
- if (msagaw < 0) {
- pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
- iommu->seq_id);
- goto err_unmap;
+
+ if (!drhd->ignored) {
+ agaw = iommu_calculate_agaw(iommu);
+ if (agaw < 0) {
+ pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
+ iommu->seq_id);
+ drhd->ignored = 1;
+ }
+ }
+ if (!drhd->ignored) {
+ msagaw = iommu_calculate_max_sagaw(iommu);
+ if (msagaw < 0) {
+ pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
+ iommu->seq_id);
+ drhd->ignored = 1;
+ agaw = -1;
+ }
}
iommu->agaw = agaw;
iommu->msagaw = msagaw;
@@ -1092,7 +1103,12 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
raw_spin_lock_init(&iommu->register_lock);
- if (intel_iommu_enabled) {
+ /*
+ * This is only for hotplug; at boot time intel_iommu_enabled won't
+ * be set yet. When intel_iommu_init() runs, it registers the units
+ * present at boot time, then sets intel_iommu_enabled.
+ */
+ if (intel_iommu_enabled && !drhd->ignored) {
err = iommu_device_sysfs_add(&iommu->iommu, NULL,
intel_iommu_groups,
"%s", iommu->name);
@@ -1103,13 +1119,16 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
err = iommu_device_register(&iommu->iommu);
if (err)
- goto err_unmap;
+ goto err_sysfs;
}
drhd->iommu = iommu;
+ iommu->drhd = drhd;
return 0;
+err_sysfs:
+ iommu_device_sysfs_remove(&iommu->iommu);
err_unmap:
unmap_iommu(iommu);
error_free_seq_id:
@@ -1121,7 +1140,7 @@ error:
static void free_iommu(struct intel_iommu *iommu)
{
- if (intel_iommu_enabled) {
+ if (intel_iommu_enabled && !iommu->drhd->ignored) {
iommu_device_unregister(&iommu->iommu);
iommu_device_sysfs_remove(&iommu->iommu);
}
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 1bd0cd7168df..4bf6049dd2c7 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1302,13 +1302,17 @@ static int exynos_iommu_of_xlate(struct device *dev,
return -ENODEV;
data = platform_get_drvdata(sysmmu);
- if (!data)
+ if (!data) {
+ put_device(&sysmmu->dev);
return -ENODEV;
+ }
if (!owner) {
owner = kzalloc(sizeof(*owner), GFP_KERNEL);
- if (!owner)
+ if (!owner) {
+ put_device(&sysmmu->dev);
return -ENOMEM;
+ }
INIT_LIST_HEAD(&owner->controllers);
mutex_init(&owner->rpm_lock);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 2a83fc31dd47..d2166dfc8b3f 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3364,6 +3364,12 @@ static int __init init_dmars(void)
if (!ecap_pass_through(iommu->ecap))
hw_pass_through = 0;
+
+ if (!intel_iommu_strict && cap_caching_mode(iommu->cap)) {
+ pr_info("Disable batched IOTLB flush due to virtualization");
+ intel_iommu_strict = 1;
+ }
+
#ifdef CONFIG_INTEL_IOMMU_SVM
if (pasid_enabled(iommu))
intel_svm_init(iommu);
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 5944d3b4dca3..ef3aadec980e 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -620,14 +620,15 @@ static irqreturn_t prq_event_thread(int irq, void *d)
* any faults on kernel addresses. */
if (!svm->mm)
goto bad_req;
- /* If the mm is already defunct, don't handle faults. */
- if (!mmget_not_zero(svm->mm))
- goto bad_req;
/* If address is not canonical, return invalid response */
if (!is_canonical_address(address))
goto bad_req;
+ /* If the mm is already defunct, don't handle faults. */
+ if (!mmget_not_zero(svm->mm))
+ goto bad_req;
+
down_read(&svm->mm->mmap_sem);
vma = find_extend_vma(svm->mm, address);
if (!vma || address < vma->vm_start)
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 967450bd421a..cd2e5b44119a 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -479,12 +479,18 @@ static void iommu_enable_irq_remapping(struct intel_iommu *iommu)
/* Enable interrupt-remapping */
iommu->gcmd |= DMA_GCMD_IRE;
- iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
-
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_IRES), sts);
+ /* Block compatibility-format MSIs */
+ if (sts & DMA_GSTS_CFIS) {
+ iommu->gcmd &= ~DMA_GCMD_CFI;
+ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
+ readl, !(sts & DMA_GSTS_CFIS), sts);
+ }
+
/*
* With CFI clear in the Global Command register, we should be
* protected from dangerous (i.e. compatibility) interrupts
@@ -536,8 +542,8 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
0, INTR_REMAP_TABLE_ENTRIES,
fn, &intel_ir_domain_ops,
iommu);
- irq_domain_free_fwnode(fn);
if (!iommu->ir_domain) {
+ irq_domain_free_fwnode(fn);
pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
goto out_free_bitmap;
}
@@ -601,13 +607,21 @@ out_free_table:
static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
{
+ struct fwnode_handle *fn;
+
if (iommu && iommu->ir_table) {
if (iommu->ir_msi_domain) {
+ fn = iommu->ir_msi_domain->fwnode;
+
irq_domain_remove(iommu->ir_msi_domain);
+ irq_domain_free_fwnode(fn);
iommu->ir_msi_domain = NULL;
}
if (iommu->ir_domain) {
+ fn = iommu->ir_domain->fwnode;
+
irq_domain_remove(iommu->ir_domain);
+ irq_domain_free_fwnode(fn);
iommu->ir_domain = NULL;
}
free_pages((unsigned long)iommu->ir_table->base,
@@ -1359,6 +1373,8 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
irq_data = irq_domain_get_irq_data(domain, virq + i);
irq_cfg = irqd_cfg(irq_data);
if (!irq_data || !irq_cfg) {
+ if (!i)
+ kfree(data);
ret = -EINVAL;
goto out_free_data;
}
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 00e1c908cd8e..85ef6c9bc898 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -392,7 +392,7 @@ struct iommu_group *iommu_group_alloc(void)
NULL, "%d", group->id);
if (ret) {
ida_simple_remove(&iommu_group_ida, group->id);
- kfree(group);
+ kobject_put(&group->kobj);
return ERR_PTR(ret);
}
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 34c058c24b9d..ce5cd05253db 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -814,7 +814,9 @@ iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
for (i = 0 ; i < mag->size; ++i) {
struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
- BUG_ON(!iova);
+ if (WARN_ON(!iova))
+ continue;
+
private_free_iova(iovad, iova);
}
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index 50217548c3b8..5ce55fabc9d8 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -101,8 +101,11 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
mutex_lock(&iommu_debug_lock);
bytes = omap_iommu_dump_ctx(obj, p, count);
+ if (bytes < 0)
+ goto err;
bytes = simple_read_from_buffer(userbuf, count, ppos, buf, bytes);
+err:
mutex_unlock(&iommu_debug_lock);
kfree(buf);
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
index 9a6ed5eeaad1..b0a4a3d2f60e 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/qcom_iommu.c
@@ -797,8 +797,11 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
qcom_iommu->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res)
+ if (res) {
qcom_iommu->local_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qcom_iommu->local_base))
+ return PTR_ERR(qcom_iommu->local_base);
+ }
qcom_iommu->iface_clk = devm_clk_get(dev, "iface");
if (IS_ERR(qcom_iommu->iface_clk)) {
diff --git a/drivers/ipack/carriers/tpci200.c b/drivers/ipack/carriers/tpci200.c
index 8a9c169b6f99..b5eec18ad59a 100644
--- a/drivers/ipack/carriers/tpci200.c
+++ b/drivers/ipack/carriers/tpci200.c
@@ -309,6 +309,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
"(bn 0x%X, sn 0x%X) failed to map driver user space!",
tpci200->info->pdev->bus->number,
tpci200->info->pdev->devfn);
+ res = -ENOMEM;
goto out_release_mem8_space;
}
diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c
index 23a3b877f7f1..ede02dc2bcd0 100644
--- a/drivers/irqchip/irq-alpine-msi.c
+++ b/drivers/irqchip/irq-alpine-msi.c
@@ -165,8 +165,7 @@ static int alpine_msix_middle_domain_alloc(struct irq_domain *domain,
return 0;
err_sgi:
- while (--i >= 0)
- irq_domain_free_irqs_parent(domain, virq, i);
+ irq_domain_free_irqs_parent(domain, virq, i - 1);
alpine_msix_free_sgi(priv, sgi, nr_irqs);
return err;
}
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index f9b73336a39e..cd58c123f547 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -49,7 +49,6 @@
#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
-#define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3)
#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
@@ -2458,6 +2457,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
{
msi_alloc_info_t *info = args;
struct its_device *its_dev = info->scratchpad[0].ptr;
+ struct irq_data *irqd;
irq_hw_number_t hwirq;
int err;
int i;
@@ -2473,7 +2473,9 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
irq_domain_set_hwirq_and_chip(domain, virq + i,
hwirq + i, &its_irq_chip, its_dev);
- irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
+ irqd = irq_get_irq_data(virq + i);
+ irqd_set_single_target(irqd);
+ irqd_set_affinity_on_activate(irqd);
pr_debug("ID:%d pID:%d vID:%d\n",
(int)(hwirq + i - its_dev->event_map.lpi_base),
(int)(hwirq + i), virq + i);
@@ -2858,12 +2860,18 @@ static int its_vpe_set_irqchip_state(struct irq_data *d,
return 0;
}
+static int its_vpe_retrigger(struct irq_data *d)
+{
+ return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
+}
+
static struct irq_chip its_vpe_irq_chip = {
.name = "GICv4-vpe",
.irq_mask = its_vpe_mask_irq,
.irq_unmask = its_vpe_unmask_irq,
.irq_eoi = irq_chip_eoi_parent,
.irq_set_affinity = its_vpe_set_affinity,
+ .irq_retrigger = its_vpe_retrigger,
.irq_set_irqchip_state = its_vpe_set_irqchip_state,
.irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
};
@@ -3231,9 +3239,6 @@ static int its_save_disable(void)
list_for_each_entry(its, &its_nodes, entry) {
void __iomem *base;
- if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
- continue;
-
base = its->base;
its->ctlr_save = readl_relaxed(base + GITS_CTLR);
err = its_force_quiescent(base);
@@ -3252,9 +3257,6 @@ err:
list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
void __iomem *base;
- if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
- continue;
-
base = its->base;
writel_relaxed(its->ctlr_save, base + GITS_CTLR);
}
@@ -3274,9 +3276,6 @@ static void its_restore_enable(void)
void __iomem *base;
int i;
- if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
- continue;
-
base = its->base;
/*
@@ -3284,7 +3283,10 @@ static void its_restore_enable(void)
* don't restore it since writing to CBASER or BASER<n>
* registers is undefined according to the GIC v3 ITS
* Specification.
+ *
+ * Firmware resuming with the ITS enabled is terminally broken.
*/
+ WARN_ON(readl_relaxed(base + GITS_CTLR) & GITS_CTLR_ENABLE);
ret = its_force_quiescent(base);
if (ret) {
pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
@@ -3549,9 +3551,6 @@ static int __init its_probe_one(struct resource *res,
ctlr |= GITS_CTLR_ImDe;
writel_relaxed(ctlr, its->base + GITS_CTLR);
- if (GITS_TYPER_HCC(typer))
- its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE;
-
err = its_init_domain(handle, its);
if (err)
goto out_free_tables;
diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
index fbfa7ff6deb1..9d011281d4b5 100644
--- a/drivers/irqchip/irq-gic-v3-mbi.c
+++ b/drivers/irqchip/irq-gic-v3-mbi.c
@@ -297,7 +297,7 @@ int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent)
reg = of_get_property(np, "mbi-alias", NULL);
if (reg) {
mbi_phys_base = of_translate_address(np, reg);
- if (mbi_phys_base == OF_BAD_ADDR) {
+ if (mbi_phys_base == (phys_addr_t)OF_BAD_ADDR) {
ret = -ENXIO;
goto err_free_mbi;
}
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index ced10c44b68a..b5417012afc8 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -324,10 +324,8 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
- void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
- unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
- u32 val, mask, bit;
- unsigned long flags;
+ void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d);
+ unsigned int cpu;
if (!force)
cpu = cpumask_any_and(mask_val, cpu_online_mask);
@@ -337,13 +335,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
return -EINVAL;
- gic_lock_irqsave(flags);
- mask = 0xff << shift;
- bit = gic_cpu_map[cpu] << shift;
- val = readl_relaxed(reg) & ~mask;
- writel_relaxed(val | bit, reg);
- gic_unlock_irqrestore(flags);
-
+ writeb_relaxed(gic_cpu_map[cpu], reg);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
return IRQ_SET_MASK_OK_DONE;
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index f7fdbf5d183b..c98358be0bc8 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -231,10 +231,16 @@ static int mbigen_irq_domain_alloc(struct irq_domain *domain,
return 0;
}
+static void mbigen_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ platform_msi_domain_free(domain, virq, nr_irqs);
+}
+
static const struct irq_domain_ops mbigen_domain_ops = {
.translate = mbigen_domain_translate,
.alloc = mbigen_irq_domain_alloc,
- .free = irq_domain_free_irqs_common,
+ .free = mbigen_irq_domain_free,
};
static int mbigen_of_create_domain(struct platform_device *pdev,
diff --git a/drivers/irqchip/irq-mips-cpu.c b/drivers/irqchip/irq-mips-cpu.c
index 66f97fde13d8..51e09f6c653c 100644
--- a/drivers/irqchip/irq-mips-cpu.c
+++ b/drivers/irqchip/irq-mips-cpu.c
@@ -201,6 +201,13 @@ static int mips_cpu_ipi_alloc(struct irq_domain *domain, unsigned int virq,
if (ret)
return ret;
+ ret = irq_domain_set_hwirq_and_chip(domain->parent, virq + i, hwirq,
+ &mips_mt_cpu_irq_controller,
+ NULL);
+
+ if (ret)
+ return ret;
+
ret = irq_set_irq_type(virq + i, IRQ_TYPE_LEVEL_HIGH);
if (ret)
return ret;
diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c
index 90aaf190157f..42455f31b061 100644
--- a/drivers/irqchip/irq-mtk-sysirq.c
+++ b/drivers/irqchip/irq-mtk-sysirq.c
@@ -23,7 +23,7 @@
#include <linux/spinlock.h>
struct mtk_sysirq_chip_data {
- spinlock_t lock;
+ raw_spinlock_t lock;
u32 nr_intpol_bases;
void __iomem **intpol_bases;
u32 *intpol_words;
@@ -45,7 +45,7 @@ static int mtk_sysirq_set_type(struct irq_data *data, unsigned int type)
reg_index = chip_data->which_word[hwirq];
offset = hwirq & 0x1f;
- spin_lock_irqsave(&chip_data->lock, flags);
+ raw_spin_lock_irqsave(&chip_data->lock, flags);
value = readl_relaxed(base + reg_index * 4);
if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_EDGE_FALLING) {
if (type == IRQ_TYPE_LEVEL_LOW)
@@ -61,7 +61,7 @@ static int mtk_sysirq_set_type(struct irq_data *data, unsigned int type)
data = data->parent_data;
ret = data->chip->irq_set_type(data, type);
- spin_unlock_irqrestore(&chip_data->lock, flags);
+ raw_spin_unlock_irqrestore(&chip_data->lock, flags);
return ret;
}
@@ -220,7 +220,7 @@ static int __init mtk_sysirq_of_init(struct device_node *node,
ret = -ENOMEM;
goto out_free_which_word;
}
- spin_lock_init(&chip_data->lock);
+ raw_spin_lock_init(&chip_data->lock);
return 0;
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 97b27f338c30..f605470855f1 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -382,6 +382,16 @@ static void stm32_irq_ack(struct irq_data *d)
irq_gc_unlock(gc);
}
+/* directly set the target bit without reading first. */
+static inline void stm32_exti_write_bit(struct irq_data *d, u32 reg)
+{
+ struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ void __iomem *base = chip_data->host_data->base;
+ u32 val = BIT(d->hwirq % IRQS_PER_BANK);
+
+ writel_relaxed(val, base + reg);
+}
+
static inline u32 stm32_exti_set_bit(struct irq_data *d, u32 reg)
{
struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
@@ -415,9 +425,9 @@ static void stm32_exti_h_eoi(struct irq_data *d)
raw_spin_lock(&chip_data->rlock);
- stm32_exti_set_bit(d, stm32_bank->rpr_ofst);
+ stm32_exti_write_bit(d, stm32_bank->rpr_ofst);
if (stm32_bank->fpr_ofst != UNDEF_REG)
- stm32_exti_set_bit(d, stm32_bank->fpr_ofst);
+ stm32_exti_write_bit(d, stm32_bank->fpr_ofst);
raw_spin_unlock(&chip_data->rlock);
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 928858dada75..f1386733d3bc 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -6,6 +6,7 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqchip/versatile-fpga.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
@@ -68,12 +69,16 @@ static void fpga_irq_unmask(struct irq_data *d)
static void fpga_irq_handle(struct irq_desc *desc)
{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
struct fpga_irq_data *f = irq_desc_get_handler_data(desc);
- u32 status = readl(f->base + IRQ_STATUS);
+ u32 status;
+
+ chained_irq_enter(chip, desc);
+ status = readl(f->base + IRQ_STATUS);
if (status == 0) {
do_bad_IRQ(desc);
- return;
+ goto out;
}
do {
@@ -82,6 +87,9 @@ static void fpga_irq_handle(struct irq_desc *desc)
status &= ~(1 << irq);
generic_handle_irq(irq_find_mapping(f->domain, irq));
} while (status);
+
+out:
+ chained_irq_exit(chip, desc);
}
/*
@@ -204,6 +212,9 @@ int __init fpga_irq_of_init(struct device_node *node,
if (of_property_read_u32(node, "valid-mask", &valid_mask))
valid_mask = 0;
+ writel(clear_mask, base + IRQ_ENABLE_CLEAR);
+ writel(clear_mask, base + FIQ_ENABLE_CLEAR);
+
/* Some chips are cascaded from a parent IRQ */
parent_irq = irq_of_parse_and_map(node, 0);
if (!parent_irq) {
@@ -213,9 +224,6 @@ int __init fpga_irq_of_init(struct device_node *node,
fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node);
- writel(clear_mask, base + IRQ_ENABLE_CLEAR);
- writel(clear_mask, base + FIQ_ENABLE_CLEAR);
-
/*
* On Versatile AB/PB, some secondary interrupts have a direct
* pass-thru to the primary controller for IRQs 20 and 22-31 which need
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index a4ceb61c5b60..18de41a266eb 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -846,7 +846,7 @@ EXPORT_SYMBOL(capi20_put_message);
* Return value: CAPI result code
*/
-u16 capi20_get_manufacturer(u32 contr, u8 *buf)
+u16 capi20_get_manufacturer(u32 contr, u8 buf[CAPI_MANUFACTURER_LEN])
{
struct capi_ctr *ctr;
u16 ret;
@@ -916,7 +916,7 @@ EXPORT_SYMBOL(capi20_get_version);
* Return value: CAPI result code
*/
-u16 capi20_get_serial(u32 contr, u8 *serial)
+u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN])
{
struct capi_ctr *ctr;
u16 ret;
diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
index 3e01012be4ab..95a0d728eecc 100644
--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
@@ -645,17 +645,19 @@ static void
release_io(struct inf_hw *hw)
{
if (hw->cfg.mode) {
- if (hw->cfg.p) {
+ if (hw->cfg.mode == AM_MEMIO) {
release_mem_region(hw->cfg.start, hw->cfg.size);
- iounmap(hw->cfg.p);
+ if (hw->cfg.p)
+ iounmap(hw->cfg.p);
} else
release_region(hw->cfg.start, hw->cfg.size);
hw->cfg.mode = AM_NONE;
}
if (hw->addr.mode) {
- if (hw->addr.p) {
+ if (hw->addr.mode == AM_MEMIO) {
release_mem_region(hw->addr.start, hw->addr.size);
- iounmap(hw->addr.p);
+ if (hw->addr.p)
+ iounmap(hw->addr.p);
} else
release_region(hw->addr.start, hw->addr.size);
hw->addr.mode = AM_NONE;
@@ -685,9 +687,12 @@ setup_io(struct inf_hw *hw)
(ulong)hw->cfg.start, (ulong)hw->cfg.size);
return err;
}
- if (hw->ci->cfg_mode == AM_MEMIO)
- hw->cfg.p = ioremap(hw->cfg.start, hw->cfg.size);
hw->cfg.mode = hw->ci->cfg_mode;
+ if (hw->ci->cfg_mode == AM_MEMIO) {
+ hw->cfg.p = ioremap(hw->cfg.start, hw->cfg.size);
+ if (!hw->cfg.p)
+ return -ENOMEM;
+ }
if (debug & DEBUG_HW)
pr_notice("%s: IO cfg %lx (%lu bytes) mode%d\n",
hw->name, (ulong)hw->cfg.start,
@@ -712,9 +717,12 @@ setup_io(struct inf_hw *hw)
(ulong)hw->addr.start, (ulong)hw->addr.size);
return err;
}
- if (hw->ci->addr_mode == AM_MEMIO)
- hw->addr.p = ioremap(hw->addr.start, hw->addr.size);
hw->addr.mode = hw->ci->addr_mode;
+ if (hw->ci->addr_mode == AM_MEMIO) {
+ hw->addr.p = ioremap(hw->addr.start, hw->addr.size);
+ if (!hw->addr.p)
+ return -ENOMEM;
+ }
if (debug & DEBUG_HW)
pr_notice("%s: IO addr %lx (%lu bytes) mode%d\n",
hw->name, (ulong)hw->addr.start,
diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
index 4d78f870435e..71e635d6c64a 100644
--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
+++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
@@ -710,7 +710,7 @@ isac_release(struct isac_hw *isac)
{
if (isac->type & IPAC_TYPE_ISACX)
WriteISAC(isac, ISACX_MASK, 0xff);
- else
+ else if (isac->type != 0)
WriteISAC(isac, ISAC_MASK, 0xff);
if (isac->dch.timer.function != NULL) {
del_timer(&isac->dch.timer);
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index 5acf6ab67cd3..6f60aced11c5 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -52,10 +52,7 @@ static const struct w6692map w6692_map[] =
{W6692_USR, "USR W6692"}
};
-#ifndef PCI_VENDOR_ID_USR
-#define PCI_VENDOR_ID_USR 0x16ec
#define PCI_DEVICE_ID_USR_6692 0x3409
-#endif
struct w6692_ch {
struct bchannel bch;
diff --git a/drivers/isdn/mISDN/Kconfig b/drivers/isdn/mISDN/Kconfig
index c0730d5c734d..fb61181a5c4f 100644
--- a/drivers/isdn/mISDN/Kconfig
+++ b/drivers/isdn/mISDN/Kconfig
@@ -12,6 +12,7 @@ if MISDN != n
config MISDN_DSP
tristate "Digital Audio Processing of transparent data"
depends on MISDN
+ select BITREVERSE
help
Enable support for digital audio processing capability.
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 3c7e3487b373..4e63dd2bfcf8 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -173,6 +173,7 @@ void led_classdev_suspend(struct led_classdev *led_cdev)
{
led_cdev->flags |= LED_SUSPENDED;
led_set_brightness_nopm(led_cdev, 0);
+ flush_work(&led_cdev->set_brightness_work);
}
EXPORT_SYMBOL_GPL(led_classdev_suspend);
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 005b839f6eb9..ec4c957c36b6 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -317,14 +317,15 @@ void led_trigger_event(struct led_trigger *trig,
enum led_brightness brightness)
{
struct led_classdev *led_cdev;
+ unsigned long flags;
if (!trig)
return;
- read_lock(&trig->leddev_list_lock);
+ read_lock_irqsave(&trig->leddev_list_lock, flags);
list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list)
led_set_brightness(led_cdev, brightness);
- read_unlock(&trig->leddev_list_lock);
+ read_unlock_irqrestore(&trig->leddev_list_lock, flags);
}
EXPORT_SYMBOL_GPL(led_trigger_event);
@@ -335,11 +336,12 @@ static void led_trigger_blink_setup(struct led_trigger *trig,
int invert)
{
struct led_classdev *led_cdev;
+ unsigned long flags;
if (!trig)
return;
- read_lock(&trig->leddev_list_lock);
+ read_lock_irqsave(&trig->leddev_list_lock, flags);
list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) {
if (oneshot)
led_blink_set_oneshot(led_cdev, delay_on, delay_off,
@@ -347,7 +349,7 @@ static void led_trigger_blink_setup(struct led_trigger *trig,
else
led_blink_set(led_cdev, delay_on, delay_off);
}
- read_unlock(&trig->leddev_list_lock);
+ read_unlock_irqrestore(&trig->leddev_list_lock, flags);
}
void led_trigger_blink(struct led_trigger *trig,
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index 77a104d2b124..13f414ff6fd0 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -207,21 +207,33 @@ static int pm860x_led_probe(struct platform_device *pdev)
data->cdev.brightness_set_blocking = pm860x_led_set;
mutex_init(&data->lock);
- ret = devm_led_classdev_register(chip->dev, &data->cdev);
+ ret = led_classdev_register(chip->dev, &data->cdev);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register LED: %d\n", ret);
return ret;
}
pm860x_led_set(&data->cdev, 0);
+
+ platform_set_drvdata(pdev, data);
+
return 0;
}
+static int pm860x_led_remove(struct platform_device *pdev)
+{
+ struct pm860x_led *data = platform_get_drvdata(pdev);
+
+ led_classdev_unregister(&data->cdev);
+
+ return 0;
+}
static struct platform_driver pm860x_led_driver = {
.driver = {
.name = "88pm860x-led",
},
.probe = pm860x_led_probe,
+ .remove = pm860x_led_remove,
};
module_platform_driver(pm860x_led_driver);
diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c
index 2cfd9389ee96..b944ae828004 100644
--- a/drivers/leds/leds-bcm6328.c
+++ b/drivers/leds/leds-bcm6328.c
@@ -336,7 +336,7 @@ static int bcm6328_led(struct device *dev, struct device_node *nc, u32 reg,
led->cdev.brightness_set = bcm6328_led_set;
led->cdev.blink_set = bcm6328_blink_set;
- rc = led_classdev_register(dev, &led->cdev);
+ rc = devm_led_classdev_register(dev, &led->cdev);
if (rc < 0)
return rc;
diff --git a/drivers/leds/leds-bcm6358.c b/drivers/leds/leds-bcm6358.c
index b2cc06618abe..a86ab6197a4e 100644
--- a/drivers/leds/leds-bcm6358.c
+++ b/drivers/leds/leds-bcm6358.c
@@ -141,7 +141,7 @@ static int bcm6358_led(struct device *dev, struct device_node *nc, u32 reg,
led->cdev.brightness_set = bcm6358_led_set;
- rc = led_classdev_register(dev, &led->cdev);
+ rc = devm_led_classdev_register(dev, &led->cdev);
if (rc < 0)
return rc;
diff --git a/drivers/leds/leds-da903x.c b/drivers/leds/leds-da903x.c
index 5ff7d72f73aa..ecc265bb69a0 100644
--- a/drivers/leds/leds-da903x.c
+++ b/drivers/leds/leds-da903x.c
@@ -113,12 +113,23 @@ static int da903x_led_probe(struct platform_device *pdev)
led->flags = pdata->flags;
led->master = pdev->dev.parent;
- ret = devm_led_classdev_register(led->master, &led->cdev);
+ ret = led_classdev_register(led->master, &led->cdev);
if (ret) {
dev_err(&pdev->dev, "failed to register LED %d\n", id);
return ret;
}
+ platform_set_drvdata(pdev, led);
+
+ return 0;
+}
+
+static int da903x_led_remove(struct platform_device *pdev)
+{
+ struct da903x_led *led = platform_get_drvdata(pdev);
+
+ led_classdev_unregister(&led->cdev);
+
return 0;
}
@@ -127,6 +138,7 @@ static struct platform_driver da903x_led_driver = {
.name = "da903x-led",
},
.probe = da903x_led_probe,
+ .remove = da903x_led_remove,
};
module_platform_driver(da903x_led_driver);
diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c
index 72224b599ffc..c1e562a4d6ad 100644
--- a/drivers/leds/leds-lm3533.c
+++ b/drivers/leds/leds-lm3533.c
@@ -698,7 +698,7 @@ static int lm3533_led_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, led);
- ret = devm_led_classdev_register(pdev->dev.parent, &led->cdev);
+ ret = led_classdev_register(pdev->dev.parent, &led->cdev);
if (ret) {
dev_err(&pdev->dev, "failed to register LED %d\n", pdev->id);
return ret;
@@ -708,13 +708,18 @@ static int lm3533_led_probe(struct platform_device *pdev)
ret = lm3533_led_setup(led, pdata);
if (ret)
- return ret;
+ goto err_deregister;
ret = lm3533_ctrlbank_enable(&led->cb);
if (ret)
- return ret;
+ goto err_deregister;
return 0;
+
+err_deregister:
+ led_classdev_unregister(&led->cdev);
+
+ return ret;
}
static int lm3533_led_remove(struct platform_device *pdev)
@@ -724,6 +729,7 @@ static int lm3533_led_remove(struct platform_device *pdev)
dev_dbg(&pdev->dev, "%s\n", __func__);
lm3533_ctrlbank_disable(&led->cb);
+ led_classdev_unregister(&led->cdev);
return 0;
}
diff --git a/drivers/leds/leds-lm355x.c b/drivers/leds/leds-lm355x.c
index 6cb94f9a2f3f..b9c60dd2b132 100644
--- a/drivers/leds/leds-lm355x.c
+++ b/drivers/leds/leds-lm355x.c
@@ -168,18 +168,19 @@ static int lm355x_chip_init(struct lm355x_chip_data *chip)
/* input and output pins configuration */
switch (chip->type) {
case CHIP_LM3554:
- reg_val = pdata->pin_tx2 | pdata->ntc_pin;
+ reg_val = (u32)pdata->pin_tx2 | (u32)pdata->ntc_pin;
ret = regmap_update_bits(chip->regmap, 0xE0, 0x28, reg_val);
if (ret < 0)
goto out;
- reg_val = pdata->pass_mode;
+ reg_val = (u32)pdata->pass_mode;
ret = regmap_update_bits(chip->regmap, 0xA0, 0x04, reg_val);
if (ret < 0)
goto out;
break;
case CHIP_LM3556:
- reg_val = pdata->pin_tx2 | pdata->ntc_pin | pdata->pass_mode;
+ reg_val = (u32)pdata->pin_tx2 | (u32)pdata->ntc_pin |
+ (u32)pdata->pass_mode;
ret = regmap_update_bits(chip->regmap, 0x0A, 0xC4, reg_val);
if (ret < 0)
goto out;
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index fd64df5a57a5..e5413d93f0a1 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -320,7 +320,7 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
usleep_range(3000, 6000);
ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
if (ret)
- return ret;
+ goto out;
status &= LP5523_ENG_STATUS_MASK;
if (status != LP5523_ENG_STATUS_MASK) {
diff --git a/drivers/leds/leds-mlxreg.c b/drivers/leds/leds-mlxreg.c
index 1ee48cb21df9..022e973dc7c3 100644
--- a/drivers/leds/leds-mlxreg.c
+++ b/drivers/leds/leds-mlxreg.c
@@ -209,8 +209,8 @@ static int mlxreg_led_config(struct mlxreg_led_priv_data *priv)
brightness = LED_OFF;
led_data->base_color = MLXREG_LED_GREEN_SOLID;
}
- sprintf(led_data->led_cdev_name, "%s:%s", "mlxreg",
- data->label);
+ snprintf(led_data->led_cdev_name, sizeof(led_data->led_cdev_name),
+ "mlxreg:%s", data->label);
led_cdev->name = led_data->led_cdev_name;
led_cdev->brightness = brightness;
led_cdev->max_brightness = LED_ON;
diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c
index c5798b92e4d3..d926edcb04ee 100644
--- a/drivers/leds/leds-wm831x-status.c
+++ b/drivers/leds/leds-wm831x-status.c
@@ -273,12 +273,23 @@ static int wm831x_status_probe(struct platform_device *pdev)
drvdata->cdev.blink_set = wm831x_status_blink_set;
drvdata->cdev.groups = wm831x_status_groups;
- ret = devm_led_classdev_register(wm831x->dev, &drvdata->cdev);
+ ret = led_classdev_register(wm831x->dev, &drvdata->cdev);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register LED: %d\n", ret);
return ret;
}
+ platform_set_drvdata(pdev, drvdata);
+
+ return 0;
+}
+
+static int wm831x_status_remove(struct platform_device *pdev)
+{
+ struct wm831x_status *drvdata = platform_get_drvdata(pdev);
+
+ led_classdev_unregister(&drvdata->cdev);
+
return 0;
}
@@ -287,6 +298,7 @@ static struct platform_driver wm831x_status_driver = {
.name = "wm831x-status",
},
.probe = wm831x_status_probe,
+ .remove = wm831x_status_remove,
};
module_platform_driver(wm831x_status_driver);
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index 439bf90d084d..20706da7aa1c 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -19,6 +19,7 @@ if NVM
config NVM_PBLK
tristate "Physical Block Device Open-Channel SSD target"
+ select CRC32
help
Allows an open-channel SSD to be exposed as a block device to the
host. The target assumes the device exposes raw flash and must be
diff --git a/drivers/macintosh/windfarm_pm112.c b/drivers/macintosh/windfarm_pm112.c
index fec91db1142e..6ece5c1a7840 100644
--- a/drivers/macintosh/windfarm_pm112.c
+++ b/drivers/macintosh/windfarm_pm112.c
@@ -133,14 +133,6 @@ static int create_cpu_loop(int cpu)
s32 tmax;
int fmin;
- /* Get PID params from the appropriate SAT */
- hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL);
- if (hdr == NULL) {
- printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n");
- return -EINVAL;
- }
- piddata = (struct smu_sdbp_cpupiddata *)&hdr[1];
-
/* Get FVT params to get Tmax; if not found, assume default */
hdr = smu_sat_get_sdb_partition(chip, 0xC4 + core, NULL);
if (hdr) {
@@ -153,6 +145,16 @@ static int create_cpu_loop(int cpu)
if (tmax < cpu_all_tmax)
cpu_all_tmax = tmax;
+ kfree(hdr);
+
+ /* Get PID params from the appropriate SAT */
+ hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL);
+ if (hdr == NULL) {
+ printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n");
+ return -EINVAL;
+ }
+ piddata = (struct smu_sdbp_cpupiddata *)&hdr[1];
+
/*
* Darwin has a minimum fan speed of 1000 rpm for the 4-way and
* 515 for the 2-way. That appears to be overkill, so for now,
@@ -175,6 +177,9 @@ static int create_cpu_loop(int cpu)
pid.min = fmin;
wf_cpu_pid_init(&cpu_pid[cpu], &pid);
+
+ kfree(hdr);
+
return 0;
}
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 055c90b8253c..10a559cfb7ea 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -85,9 +85,12 @@ static void msg_submit(struct mbox_chan *chan)
exit:
spin_unlock_irqrestore(&chan->lock, flags);
- if (!err && (chan->txdone_method & TXDONE_BY_POLL))
- /* kick start the timer immediately to avoid delays */
- hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
+ /* kick start the timer immediately to avoid delays */
+ if (!err && (chan->txdone_method & TXDONE_BY_POLL)) {
+ /* but only if not already active */
+ if (!hrtimer_active(&chan->mbox->poll_hrt))
+ hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
+ }
}
static void tx_tick(struct mbox_chan *chan, int r)
@@ -125,11 +128,10 @@ static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
struct mbox_chan *chan = &mbox->chans[i];
if (chan->active_req && chan->cl) {
+ resched = true;
txdone = chan->mbox->ops->last_tx_done(chan);
if (txdone)
tx_tick(chan, 0);
- else
- resched = true;
}
}
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 2a2f189dd37c..6a380ed4919a 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -264,7 +264,7 @@ struct bcache_device {
#define BCACHE_DEV_UNLINK_DONE 2
#define BCACHE_DEV_WB_RUNNING 3
#define BCACHE_DEV_RATE_DW_RUNNING 4
- unsigned int nr_stripes;
+ int nr_stripes;
unsigned int stripe_size;
atomic_t *stripe_sectors_dirty;
unsigned long *full_dirty_stripes;
@@ -585,6 +585,7 @@ struct cache_set {
*/
wait_queue_head_t btree_cache_wait;
struct task_struct *btree_cache_alloc_lock;
+ spinlock_t btree_cannibalize_lock;
/*
* When we free a btree node, we increment the gen of the bucket the
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 268f1b685084..ec48cf86cab6 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -321,7 +321,7 @@ int bch_btree_keys_alloc(struct btree_keys *b,
b->page_order = page_order;
- t->data = (void *) __get_free_pages(gfp, b->page_order);
+ t->data = (void *) __get_free_pages(__GFP_COMP|gfp, b->page_order);
if (!t->data)
goto err;
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index bb40bd66a10e..e388e7bb7b5d 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -830,7 +830,7 @@ int bch_btree_cache_alloc(struct cache_set *c)
mutex_init(&c->verify_lock);
c->verify_ondisk = (void *)
- __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
+ __get_free_pages(GFP_KERNEL|__GFP_COMP, ilog2(bucket_pages(c)));
c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
@@ -876,15 +876,17 @@ out:
static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
{
- struct task_struct *old;
-
- old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
- if (old && old != current) {
+ spin_lock(&c->btree_cannibalize_lock);
+ if (likely(c->btree_cache_alloc_lock == NULL)) {
+ c->btree_cache_alloc_lock = current;
+ } else if (c->btree_cache_alloc_lock != current) {
if (op)
prepare_to_wait(&c->btree_cache_wait, &op->wait,
TASK_UNINTERRUPTIBLE);
+ spin_unlock(&c->btree_cannibalize_lock);
return -EINTR;
}
+ spin_unlock(&c->btree_cannibalize_lock);
return 0;
}
@@ -919,10 +921,12 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
*/
static void bch_cannibalize_unlock(struct cache_set *c)
{
+ spin_lock(&c->btree_cannibalize_lock);
if (c->btree_cache_alloc_lock == current) {
c->btree_cache_alloc_lock = NULL;
wake_up(&c->btree_cache_wait);
}
+ spin_unlock(&c->btree_cannibalize_lock);
}
static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
@@ -1432,7 +1436,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
if (__set_blocks(n1, n1->keys + n2->keys,
block_bytes(b->c)) >
btree_blocks(new_nodes[i]))
- goto out_nocoalesce;
+ goto out_unlock_nocoalesce;
keys = n2->keys;
/* Take the key of the node we're getting rid of */
@@ -1461,7 +1465,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
if (__bch_keylist_realloc(&keylist,
bkey_u64s(&new_nodes[i]->key)))
- goto out_nocoalesce;
+ goto out_unlock_nocoalesce;
bch_btree_node_write(new_nodes[i], &cl);
bch_keylist_add(&keylist, &new_nodes[i]->key);
@@ -1507,6 +1511,10 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
/* Invalidated our iterator */
return -EINTR;
+out_unlock_nocoalesce:
+ for (i = 0; i < nodes; i++)
+ mutex_unlock(&new_nodes[i]->write_lock);
+
out_nocoalesce:
closure_sync(&cl);
bch_keylist_free(&keylist);
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 7bb15cddca5e..182c2b7bd960 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -864,8 +864,8 @@ int bch_journal_alloc(struct cache_set *c)
j->w[1].c = c;
if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
- !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
- !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))
+ !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)) ||
+ !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)))
return -ENOMEM;
return 0;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 5b5cbfadd003..7787ec42f81e 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -775,7 +775,9 @@ static void bcache_device_free(struct bcache_device *d)
bcache_device_detach(d);
if (disk) {
- if (disk->flags & GENHD_FL_UP)
+ bool disk_added = (disk->flags & GENHD_FL_UP) != 0;
+
+ if (disk_added)
del_gendisk(disk);
if (disk->queue)
@@ -783,7 +785,8 @@ static void bcache_device_free(struct bcache_device *d)
ida_simple_remove(&bcache_device_idx,
first_minor_to_idx(disk->first_minor));
- put_disk(disk);
+ if (disk_added)
+ put_disk(disk);
}
bioset_exit(&d->bio_split);
@@ -1690,7 +1693,7 @@ void bch_cache_set_unregister(struct cache_set *c)
}
#define alloc_bucket_pages(gfp, c) \
- ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c))))
+ ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(bucket_pages(c))))
struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
{
@@ -1734,6 +1737,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
sema_init(&c->sb_write_mutex, 1);
mutex_init(&c->bucket_lock);
init_waitqueue_head(&c->btree_cache_wait);
+ spin_lock_init(&c->btree_cannibalize_lock);
init_waitqueue_head(&c->bucket_wait);
init_waitqueue_head(&c->gc_wait);
sema_init(&c->uuid_write_mutex, 1);
@@ -2010,7 +2014,14 @@ found:
sysfs_create_link(&c->kobj, &ca->kobj, buf))
goto err;
- if (ca->sb.seq > c->sb.seq) {
+ /*
+ * A special case is both ca->sb.seq and c->sb.seq are 0,
+ * such condition happens on a new created cache device whose
+ * super block is never flushed yet. In this case c->sb.version
+ * and other members should be updated too, otherwise we will
+ * have a mistaken super block version in cache set.
+ */
+ if (ca->sb.seq > c->sb.seq || c->sb.seq == 0) {
c->sb.version = ca->sb.version;
memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16);
c->sb.flags = ca->sb.flags;
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index b5fc3c6c7178..aa58833fb012 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -506,15 +506,19 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
uint64_t offset, int nr_sectors)
{
struct bcache_device *d = c->devices[inode];
- unsigned int stripe_offset, stripe, sectors_dirty;
+ unsigned int stripe_offset, sectors_dirty;
+ int stripe;
if (!d)
return;
+ stripe = offset_to_stripe(d, offset);
+ if (stripe < 0)
+ return;
+
if (UUID_FLASH_ONLY(&c->uuids[inode]))
atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors);
- stripe = offset_to_stripe(d, offset);
stripe_offset = offset & (d->stripe_size - 1);
while (nr_sectors) {
@@ -554,12 +558,12 @@ static bool dirty_pred(struct keybuf *buf, struct bkey *k)
static void refill_full_stripes(struct cached_dev *dc)
{
struct keybuf *buf = &dc->writeback_keys;
- unsigned int start_stripe, stripe, next_stripe;
+ unsigned int start_stripe, next_stripe;
+ int stripe;
bool wrapped = false;
stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
-
- if (stripe >= dc->disk.nr_stripes)
+ if (stripe < 0)
stripe = 0;
start_stripe = stripe;
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index e75dc33339f6..b902e574c5c4 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -28,10 +28,22 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
return ret;
}
-static inline unsigned int offset_to_stripe(struct bcache_device *d,
+static inline int offset_to_stripe(struct bcache_device *d,
uint64_t offset)
{
do_div(offset, d->stripe_size);
+
+ /* d->nr_stripes is in range [1, INT_MAX] */
+ if (unlikely(offset >= d->nr_stripes)) {
+ pr_err("Invalid stripe %llu (>= nr_stripes %d).\n",
+ offset, d->nr_stripes);
+ return -EINVAL;
+ }
+
+ /*
+ * Here offset is definitly smaller than INT_MAX,
+ * return it as int will never overflow.
+ */
return offset;
}
@@ -39,7 +51,10 @@ static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
uint64_t offset,
unsigned int nr_sectors)
{
- unsigned int stripe = offset_to_stripe(&dc->disk, offset);
+ int stripe = offset_to_stripe(&dc->disk, offset);
+
+ if (stripe < 0)
+ return false;
while (1) {
if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index dc385b70e4c3..b3b799f84dcc 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1463,6 +1463,10 @@ EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
{
sector_t s = i_size_read(c->bdev->bd_inode) >> SECTOR_SHIFT;
+ if (s >= c->start)
+ s -= c->start;
+ else
+ s = 0;
if (likely(c->sectors_per_block_bits >= 0))
s >>= c->sectors_per_block_bits;
else
@@ -1471,6 +1475,12 @@ sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
}
EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
+struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
+{
+ return c->dm_io;
+}
+EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client);
+
sector_t dm_bufio_get_block_number(struct dm_buffer *b)
{
return b->block;
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 151aa95775be..af6d4f898e4c 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -537,12 +537,16 @@ static int __create_persistent_data_objects(struct dm_cache_metadata *cmd,
CACHE_MAX_CONCURRENT_LOCKS);
if (IS_ERR(cmd->bm)) {
DMERR("could not create block manager");
- return PTR_ERR(cmd->bm);
+ r = PTR_ERR(cmd->bm);
+ cmd->bm = NULL;
+ return r;
}
r = __open_or_format_metadata(cmd, may_format_device);
- if (r)
+ if (r) {
dm_block_manager_destroy(cmd->bm);
+ cmd->bm = NULL;
+ }
return r;
}
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 7e426e4d1352..8cda3f7ddbae 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -110,6 +110,10 @@ struct mapped_device {
/* zero-length flush that will be cloned and submitted to targets */
struct bio flush_bio;
+ int swap_bios;
+ struct semaphore swap_bios_semaphore;
+ struct mutex swap_bios_lock;
+
struct dm_stats stats;
struct kthread_worker kworker;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 750f8b34e693..85559f772d0d 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -2852,6 +2852,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
wake_up_process(cc->write_thread);
ti->num_flush_bios = 1;
+ ti->limit_swap_bios = true;
return 0;
@@ -3078,7 +3079,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
limits->max_segment_size = PAGE_SIZE;
limits->logical_block_size =
- max_t(unsigned short, limits->logical_block_size, cc->sector_size);
+ max_t(unsigned, limits->logical_block_size, cc->sector_size);
limits->physical_block_size =
max_t(unsigned, limits->physical_block_size, cc->sector_size);
limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index 8e48920a3ffa..c596fc564a58 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -46,6 +46,7 @@ struct writeset {
static void writeset_free(struct writeset *ws)
{
vfree(ws->bits);
+ ws->bits = NULL;
}
static int setup_on_disk_bitset(struct dm_disk_bitset *info,
@@ -70,8 +71,6 @@ static size_t bitset_size(unsigned nr_bits)
*/
static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks)
{
- ws->md.nr_bits = nr_blocks;
- ws->md.root = INVALID_WRITESET_ROOT;
ws->bits = vzalloc(bitset_size(nr_blocks));
if (!ws->bits) {
DMERR("%s: couldn't allocate in memory bitset", __func__);
@@ -84,12 +83,14 @@ static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks)
/*
* Wipes the in-core bitset, and creates a new on disk bitset.
*/
-static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws)
+static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws,
+ dm_block_t nr_blocks)
{
int r;
- memset(ws->bits, 0, bitset_size(ws->md.nr_bits));
+ memset(ws->bits, 0, bitset_size(nr_blocks));
+ ws->md.nr_bits = nr_blocks;
r = setup_on_disk_bitset(info, ws->md.nr_bits, &ws->md.root);
if (r) {
DMERR("%s: setup_on_disk_bitset failed", __func__);
@@ -133,7 +134,7 @@ static int writeset_test_and_set(struct dm_disk_bitset *info,
{
int r;
- if (!test_and_set_bit(block, ws->bits)) {
+ if (!test_bit(block, ws->bits)) {
r = dm_bitset_set_bit(info, ws->md.root, block, &ws->md.root);
if (r) {
/* FIXME: fail mode */
@@ -387,7 +388,7 @@ static void ws_dec(void *context, const void *value)
static int ws_eq(void *context, const void *value1, const void *value2)
{
- return !memcmp(value1, value2, sizeof(struct writeset_metadata));
+ return !memcmp(value1, value2, sizeof(struct writeset_disk));
}
/*----------------------------------------------------------------*/
@@ -563,6 +564,15 @@ static int open_metadata(struct era_metadata *md)
}
disk = dm_block_data(sblock);
+
+ /* Verify the data block size hasn't changed */
+ if (le32_to_cpu(disk->data_block_size) != md->block_size) {
+ DMERR("changing the data block size (from %u to %llu) is not supported",
+ le32_to_cpu(disk->data_block_size), md->block_size);
+ r = -EINVAL;
+ goto bad;
+ }
+
r = dm_tm_open_with_sm(md->bm, SUPERBLOCK_LOCATION,
disk->metadata_space_map_root,
sizeof(disk->metadata_space_map_root),
@@ -574,10 +584,10 @@ static int open_metadata(struct era_metadata *md)
setup_infos(md);
- md->block_size = le32_to_cpu(disk->data_block_size);
md->nr_blocks = le32_to_cpu(disk->nr_blocks);
md->current_era = le32_to_cpu(disk->current_era);
+ ws_unpack(&disk->current_writeset, &md->current_writeset->md);
md->writeset_tree_root = le64_to_cpu(disk->writeset_tree_root);
md->era_array_root = le64_to_cpu(disk->era_array_root);
md->metadata_snap = le64_to_cpu(disk->metadata_snap);
@@ -745,6 +755,12 @@ static int metadata_digest_lookup_writeset(struct era_metadata *md,
ws_unpack(&disk, &d->writeset);
d->value = cpu_to_le32(key);
+ /*
+ * We initialise another bitset info to avoid any caching side effects
+ * with the previous one.
+ */
+ dm_disk_bitset_init(md->tm, &d->info);
+
d->nr_bits = min(d->writeset.nr_bits, md->nr_blocks);
d->current_bit = 0;
d->step = metadata_digest_transcribe_writeset;
@@ -758,12 +774,6 @@ static int metadata_digest_start(struct era_metadata *md, struct digest *d)
return 0;
memset(d, 0, sizeof(*d));
-
- /*
- * We initialise another bitset info to avoid any caching side
- * effects with the previous one.
- */
- dm_disk_bitset_init(md->tm, &d->info);
d->step = metadata_digest_lookup_writeset;
return 0;
@@ -801,6 +811,8 @@ static struct era_metadata *metadata_open(struct block_device *bdev,
static void metadata_close(struct era_metadata *md)
{
+ writeset_free(&md->writesets[0]);
+ writeset_free(&md->writesets[1]);
destroy_persistent_data_objects(md);
kfree(md);
}
@@ -838,6 +850,7 @@ static int metadata_resize(struct era_metadata *md, void *arg)
r = writeset_alloc(&md->writesets[1], *new_size);
if (r) {
DMERR("%s: writeset_alloc failed for writeset 1", __func__);
+ writeset_free(&md->writesets[0]);
return r;
}
@@ -848,6 +861,8 @@ static int metadata_resize(struct era_metadata *md, void *arg)
&value, &md->era_array_root);
if (r) {
DMERR("%s: dm_array_resize failed", __func__);
+ writeset_free(&md->writesets[0]);
+ writeset_free(&md->writesets[1]);
return r;
}
@@ -869,7 +884,6 @@ static int metadata_era_archive(struct era_metadata *md)
}
ws_pack(&md->current_writeset->md, &value);
- md->current_writeset->md.root = INVALID_WRITESET_ROOT;
keys[0] = md->current_era;
__dm_bless_for_disk(&value);
@@ -881,6 +895,7 @@ static int metadata_era_archive(struct era_metadata *md)
return r;
}
+ md->current_writeset->md.root = INVALID_WRITESET_ROOT;
md->archived_writesets = true;
return 0;
@@ -897,7 +912,7 @@ static int metadata_new_era(struct era_metadata *md)
int r;
struct writeset *new_writeset = next_writeset(md);
- r = writeset_init(&md->bitset_info, new_writeset);
+ r = writeset_init(&md->bitset_info, new_writeset, md->nr_blocks);
if (r) {
DMERR("%s: writeset_init failed", __func__);
return r;
@@ -950,7 +965,7 @@ static int metadata_commit(struct era_metadata *md)
int r;
struct dm_block *sblock;
- if (md->current_writeset->md.root != SUPERBLOCK_LOCATION) {
+ if (md->current_writeset->md.root != INVALID_WRITESET_ROOT) {
r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root,
&md->current_writeset->md.root);
if (r) {
@@ -1225,8 +1240,10 @@ static void process_deferred_bios(struct era *era)
int r;
struct bio_list deferred_bios, marked_bios;
struct bio *bio;
+ struct blk_plug plug;
bool commit_needed = false;
bool failed = false;
+ struct writeset *ws = era->md->current_writeset;
bio_list_init(&deferred_bios);
bio_list_init(&marked_bios);
@@ -1236,9 +1253,11 @@ static void process_deferred_bios(struct era *era)
bio_list_init(&era->deferred_bios);
spin_unlock(&era->deferred_lock);
+ if (bio_list_empty(&deferred_bios))
+ return;
+
while ((bio = bio_list_pop(&deferred_bios))) {
- r = writeset_test_and_set(&era->md->bitset_info,
- era->md->current_writeset,
+ r = writeset_test_and_set(&era->md->bitset_info, ws,
get_block(era, bio));
if (r < 0) {
/*
@@ -1246,7 +1265,6 @@ static void process_deferred_bios(struct era *era)
* FIXME: finish.
*/
failed = true;
-
} else if (r == 0)
commit_needed = true;
@@ -1262,9 +1280,19 @@ static void process_deferred_bios(struct era *era)
if (failed)
while ((bio = bio_list_pop(&marked_bios)))
bio_io_error(bio);
- else
- while ((bio = bio_list_pop(&marked_bios)))
+ else {
+ blk_start_plug(&plug);
+ while ((bio = bio_list_pop(&marked_bios))) {
+ /*
+ * Only update the in-core writeset if the on-disk one
+ * was updated too.
+ */
+ if (commit_needed)
+ set_bit(get_block(era, bio), ws->bits);
generic_make_request(bio);
+ }
+ blk_finish_plug(&plug);
+ }
}
static void process_rpc_calls(struct era *era)
@@ -1485,15 +1513,6 @@ static int era_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
era->md = md;
- era->nr_blocks = calc_nr_blocks(era);
-
- r = metadata_resize(era->md, &era->nr_blocks);
- if (r) {
- ti->error = "couldn't resize metadata";
- era_destroy(era);
- return -ENOMEM;
- }
-
era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
if (!era->wq) {
ti->error = "could not create workqueue for metadata object";
@@ -1570,16 +1589,24 @@ static int era_preresume(struct dm_target *ti)
dm_block_t new_size = calc_nr_blocks(era);
if (era->nr_blocks != new_size) {
- r = in_worker1(era, metadata_resize, &new_size);
- if (r)
+ r = metadata_resize(era->md, &new_size);
+ if (r) {
+ DMERR("%s: metadata_resize failed", __func__);
+ return r;
+ }
+
+ r = metadata_commit(era->md);
+ if (r) {
+ DMERR("%s: metadata_commit failed", __func__);
return r;
+ }
era->nr_blocks = new_size;
}
start_worker(era);
- r = in_worker0(era, metadata_new_era);
+ r = in_worker0(era, metadata_era_rollover);
if (r) {
DMERR("%s: metadata_era_rollover failed", __func__);
return r;
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index d75a4ce7d12a..cffd42317272 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -240,6 +240,7 @@ struct dm_integrity_c {
bool journal_uptodate;
bool just_formatted;
+ bool legacy_recalculate;
struct alg_spec internal_hash_alg;
struct alg_spec journal_crypt_alg;
@@ -345,6 +346,14 @@ static int dm_integrity_failed(struct dm_integrity_c *ic)
return READ_ONCE(ic->failed);
}
+static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic)
+{
+ if ((ic->internal_hash_alg.key || ic->journal_mac_alg.key) &&
+ !ic->legacy_recalculate)
+ return true;
+ return false;
+}
+
static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
unsigned j, unsigned char seq)
{
@@ -1153,12 +1162,52 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se
return 0;
}
-static void dm_integrity_flush_buffers(struct dm_integrity_c *ic)
+struct flush_request {
+ struct dm_io_request io_req;
+ struct dm_io_region io_reg;
+ struct dm_integrity_c *ic;
+ struct completion comp;
+};
+
+static void flush_notify(unsigned long error, void *fr_)
+{
+ struct flush_request *fr = fr_;
+ if (unlikely(error != 0))
+ dm_integrity_io_error(fr->ic, "flusing disk cache", -EIO);
+ complete(&fr->comp);
+}
+
+static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data)
{
int r;
+
+ struct flush_request fr;
+
+ if (!ic->meta_dev)
+ flush_data = false;
+ if (flush_data) {
+ fr.io_req.bi_op = REQ_OP_WRITE,
+ fr.io_req.bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
+ fr.io_req.mem.type = DM_IO_KMEM,
+ fr.io_req.mem.ptr.addr = NULL,
+ fr.io_req.notify.fn = flush_notify,
+ fr.io_req.notify.context = &fr;
+ fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio),
+ fr.io_reg.bdev = ic->dev->bdev,
+ fr.io_reg.sector = 0,
+ fr.io_reg.count = 0,
+ fr.ic = ic;
+ init_completion(&fr.comp);
+ r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL);
+ BUG_ON(r);
+ }
+
r = dm_bufio_write_dirty_buffers(ic->bufio);
if (unlikely(r))
dm_integrity_io_error(ic, "writing tags", r);
+
+ if (flush_data)
+ wait_for_completion(&fr.comp);
}
static void sleep_on_endio_wait(struct dm_integrity_c *ic)
@@ -1846,7 +1895,7 @@ static void integrity_commit(struct work_struct *w)
flushes = bio_list_get(&ic->flush_bio_list);
if (unlikely(ic->mode != 'J')) {
spin_unlock_irq(&ic->endio_wait.lock);
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, true);
goto release_flush_bios;
}
@@ -2057,7 +2106,7 @@ skip_io:
complete_journal_op(&comp);
wait_for_completion_io(&comp.comp);
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, true);
}
static void integrity_writer(struct work_struct *w)
@@ -2068,7 +2117,7 @@ static void integrity_writer(struct work_struct *w)
unsigned prev_free_sectors;
/* the following test is not needed, but it tests the replay code */
- if (unlikely(dm_suspended(ic->ti)) && !ic->meta_dev)
+ if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev)
return;
spin_lock_irq(&ic->endio_wait.lock);
@@ -2099,7 +2148,7 @@ static void recalc_write_super(struct dm_integrity_c *ic)
{
int r;
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, false);
if (dm_integrity_failed(ic))
return;
@@ -2127,7 +2176,7 @@ static void integrity_recalc(struct work_struct *w)
next_chunk:
- if (unlikely(dm_suspended(ic->ti)))
+ if (unlikely(dm_post_suspending(ic->ti)))
goto unlock_ret;
range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
@@ -2409,7 +2458,7 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
if (ic->meta_dev)
queue_work(ic->writer_wq, &ic->writer_work);
drain_workqueue(ic->writer_wq);
- dm_integrity_flush_buffers(ic);
+ dm_integrity_flush_buffers(ic, true);
}
BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
@@ -2463,6 +2512,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
arg_count += !!ic->internal_hash_alg.alg_string;
arg_count += !!ic->journal_crypt_alg.alg_string;
arg_count += !!ic->journal_mac_alg.alg_string;
+ arg_count += ic->legacy_recalculate;
DMEMIT("%s %llu %u %c %u", ic->dev->name, (unsigned long long)ic->start,
ic->tag_size, ic->mode, arg_count);
if (ic->meta_dev)
@@ -2476,6 +2526,8 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
DMEMIT(" commit_time:%u", ic->autocommit_msec);
+ if (ic->legacy_recalculate)
+ DMEMIT(" legacy_recalculate");
#define EMIT_ALG(a, n) \
do { \
@@ -3078,7 +3130,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
unsigned extra_args;
struct dm_arg_set as;
static const struct dm_arg _args[] = {
- {0, 9, "Invalid number of feature args"},
+ {0, 12, "Invalid number of feature args"},
};
unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
bool recalculate;
@@ -3208,6 +3260,8 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
} else if (!strcmp(opt_string, "recalculate")) {
recalculate = true;
+ } else if (!strcmp(opt_string, "legacy_recalculate")) {
+ ic->legacy_recalculate = true;
} else {
r = -EINVAL;
ti->error = "Invalid argument";
@@ -3475,6 +3529,20 @@ try_smaller_buffer:
r = -ENOMEM;
goto bad;
}
+ } else {
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
+ ti->error = "Recalculate can only be specified with internal_hash";
+ r = -EINVAL;
+ goto bad;
+ }
+ }
+
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
+ le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors &&
+ dm_integrity_disable_recalculate(ic)) {
+ ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\"";
+ r = -EOPNOTSUPP;
+ goto bad;
}
ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index f666778ad237..17cbad58834f 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -529,7 +529,7 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
* Grab our output buffer.
*/
nl = orig_nl = get_result_buffer(param, param_size, &len);
- if (len < needed) {
+ if (len < needed || len < sizeof(nl->dev)) {
param->flags |= DM_BUFFER_FULL_FLAG;
goto out;
}
@@ -1575,6 +1575,7 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para
if (!argc) {
DMWARN("Empty message received.");
+ r = -EINVAL;
goto out_argv;
}
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index def4f6ec290b..c1ad84f3414c 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -586,10 +586,12 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
/* Do we need to select a new pgpath? */
pgpath = READ_ONCE(m->current_pgpath);
- queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
- if (!pgpath || !queue_io)
+ if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
+ /* MPATHF_QUEUE_IO might have been cleared by choose_pgpath. */
+ queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
+
if ((pgpath && queue_io) ||
(!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
/* Queue for the daemon to resubmit */
@@ -1866,7 +1868,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
int r;
current_pgpath = READ_ONCE(m->current_pgpath);
- if (!current_pgpath)
+ if (!current_pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
current_pgpath = choose_pgpath(m, 0);
if (current_pgpath) {
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 23de59a692c5..b16332917220 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1892,6 +1892,14 @@ static bool rs_takeover_requested(struct raid_set *rs)
return rs->md.new_level != rs->md.level;
}
+/* True if layout is set to reshape. */
+static bool rs_is_layout_change(struct raid_set *rs, bool use_mddev)
+{
+ return (use_mddev ? rs->md.delta_disks : rs->delta_disks) ||
+ rs->md.new_layout != rs->md.layout ||
+ rs->md.new_chunk_sectors != rs->md.chunk_sectors;
+}
+
/* True if @rs is requested to reshape by ctr */
static bool rs_reshape_requested(struct raid_set *rs)
{
@@ -1904,9 +1912,7 @@ static bool rs_reshape_requested(struct raid_set *rs)
if (rs_is_raid0(rs))
return false;
- change = mddev->new_layout != mddev->layout ||
- mddev->new_chunk_sectors != mddev->chunk_sectors ||
- rs->delta_disks;
+ change = rs_is_layout_change(rs, false);
/* Historical case to support raid1 reshape without delta disks */
if (rs_is_raid1(rs)) {
@@ -2843,7 +2849,7 @@ static sector_t _get_reshape_sectors(struct raid_set *rs)
}
/*
- *
+ * Reshape:
* - change raid layout
* - change chunk size
* - add disks
@@ -2953,6 +2959,20 @@ static int rs_setup_reshape(struct raid_set *rs)
}
/*
+ * If the md resync thread has updated superblock with max reshape position
+ * at the end of a reshape but not (yet) reset the layout configuration
+ * changes -> reset the latter.
+ */
+static void rs_reset_inconclusive_reshape(struct raid_set *rs)
+{
+ if (!rs_is_reshaping(rs) && rs_is_layout_change(rs, true)) {
+ rs_set_cur(rs);
+ rs->md.delta_disks = 0;
+ rs->md.reshape_backwards = 0;
+ }
+}
+
+/*
* Enable/disable discard support on RAID set depending on
* RAID level and discard properties of underlying RAID members.
*/
@@ -3221,11 +3241,14 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (r)
goto bad;
+ /* Catch any inconclusive reshape superblock content. */
+ rs_reset_inconclusive_reshape(rs);
+
/* Start raid set read-only and assumed clean to change in raid_resume() */
rs->md.ro = 1;
rs->md.in_sync = 1;
- /* Keep array frozen */
+ /* Keep array frozen until resume. */
set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
/* Has to be held on running the array */
@@ -3239,7 +3262,6 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
r = md_start(&rs->md);
-
if (r) {
ti->error = "Failed to start raid array";
mddev_unlock(&rs->md);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 4d36373e1c0f..2957a3763f01 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -95,9 +95,6 @@ static void dm_old_stop_queue(struct request_queue *q)
static void dm_mq_stop_queue(struct request_queue *q)
{
- if (blk_mq_queue_stopped(q))
- return;
-
blk_mq_quiesce_queue(q);
}
@@ -834,6 +831,7 @@ out_tag_set:
blk_mq_free_tag_set(md->tag_set);
out_kfree_tag_set:
kfree(md->tag_set);
+ md->tag_set = NULL;
return err;
}
@@ -843,6 +841,7 @@ void dm_mq_cleanup_mapped_device(struct mapped_device *md)
if (md->tag_set) {
blk_mq_free_tag_set(md->tag_set);
kfree(md->tag_set);
+ md->tag_set = NULL;
}
}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index d3f28a9e3fd9..52101e5c7258 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -137,6 +137,11 @@ struct dm_snapshot {
* for them to be committed.
*/
struct bio_list bios_queued_during_merge;
+
+ /*
+ * Flush data after merge.
+ */
+ struct bio flush_bio;
};
/*
@@ -789,7 +794,7 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new)
static uint32_t __minimum_chunk_size(struct origin *o)
{
struct dm_snapshot *snap;
- unsigned chunk_size = 0;
+ unsigned chunk_size = rounddown_pow_of_two(UINT_MAX);
if (o)
list_for_each_entry(snap, &o->snapshots, list)
@@ -1061,6 +1066,17 @@ shut:
static void error_bios(struct bio *bio);
+static int flush_data(struct dm_snapshot *s)
+{
+ struct bio *flush_bio = &s->flush_bio;
+
+ bio_reset(flush_bio);
+ bio_set_dev(flush_bio, s->origin->bdev);
+ flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+
+ return submit_bio_wait(flush_bio);
+}
+
static void merge_callback(int read_err, unsigned long write_err, void *context)
{
struct dm_snapshot *s = context;
@@ -1074,6 +1090,11 @@ static void merge_callback(int read_err, unsigned long write_err, void *context)
goto shut;
}
+ if (flush_data(s) < 0) {
+ DMERR("Flush after merge failed: shutting down merge");
+ goto shut;
+ }
+
if (s->store->type->commit_merge(s->store,
s->num_merging_chunks) < 0) {
DMERR("Write error in exception store: shutting down merge");
@@ -1198,6 +1219,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->first_merging_chunk = 0;
s->num_merging_chunks = 0;
bio_list_init(&s->bios_queued_during_merge);
+ bio_init(&s->flush_bio, NULL, 0);
/* Allocate hash table for COW data */
if (init_hash_tables(s)) {
@@ -1264,6 +1286,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (!s->store->chunk_size) {
ti->error = "Chunk size not set";
+ r = -EINVAL;
goto bad_read_metadata;
}
@@ -1391,6 +1414,8 @@ static void snapshot_dtr(struct dm_target *ti)
mutex_destroy(&s->lock);
+ bio_uninit(&s->flush_bio);
+
dm_put_device(ti, s->cow);
dm_put_device(ti, s->origin);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 36275c59e4e7..71d3fdbce50a 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -431,14 +431,23 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
{
int r;
dev_t dev;
+ unsigned int major, minor;
+ char dummy;
struct dm_dev_internal *dd;
struct dm_table *t = ti->table;
BUG_ON(!t);
- dev = dm_get_dev_t(path);
- if (!dev)
- return -ENODEV;
+ if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
+ /* Extract the major/minor numbers */
+ dev = MKDEV(major, minor);
+ if (MAJOR(dev) != major || MINOR(dev) != minor)
+ return -EOVERFLOW;
+ } else {
+ dev = dm_get_dev_t(path);
+ if (!dev)
+ return -ENODEV;
+ }
dd = find_device(&t->devices, dev);
if (!dd) {
@@ -882,10 +891,10 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
}
EXPORT_SYMBOL_GPL(dm_table_set_type);
-static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
+static int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- return bdev_dax_supported(dev->bdev, PAGE_SIZE);
+ return !bdev_dax_supported(dev->bdev, PAGE_SIZE);
}
static bool dm_table_supports_dax(struct dm_table *t)
@@ -901,7 +910,7 @@ static bool dm_table_supports_dax(struct dm_table *t)
return false;
if (!ti->type->iterate_devices ||
- !ti->type->iterate_devices(ti, device_supports_dax, NULL))
+ ti->type->iterate_devices(ti, device_not_dax_capable, NULL))
return false;
}
@@ -1336,12 +1345,6 @@ void dm_table_event_callback(struct dm_table *t,
void dm_table_event(struct dm_table *t)
{
- /*
- * You can no longer call dm_table_event() from interrupt
- * context, use a bottom half instead.
- */
- BUG_ON(in_interrupt());
-
mutex_lock(&_event_lock);
if (t->event_fn)
t->event_fn(t->event_context);
@@ -1389,6 +1392,46 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
return &t->targets[(KEYS_PER_NODE * n) + k];
}
+/*
+ * type->iterate_devices() should be called when the sanity check needs to
+ * iterate and check all underlying data devices. iterate_devices() will
+ * iterate all underlying data devices until it encounters a non-zero return
+ * code, returned by whether the input iterate_devices_callout_fn, or
+ * iterate_devices() itself internally.
+ *
+ * For some target type (e.g. dm-stripe), one call of iterate_devices() may
+ * iterate multiple underlying devices internally, in which case a non-zero
+ * return code returned by iterate_devices_callout_fn will stop the iteration
+ * in advance.
+ *
+ * Cases requiring _any_ underlying device supporting some kind of attribute,
+ * should use the iteration structure like dm_table_any_dev_attr(), or call
+ * it directly. @func should handle semantics of positive examples, e.g.
+ * capable of something.
+ *
+ * Cases requiring _all_ underlying devices supporting some kind of attribute,
+ * should use the iteration structure like dm_table_supports_nowait() or
+ * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that
+ * uses an @anti_func that handle semantics of counter examples, e.g. not
+ * capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data);
+ */
+static bool dm_table_any_dev_attr(struct dm_table *t,
+ iterate_devices_callout_fn func, void *data)
+{
+ struct dm_target *ti;
+ unsigned int i;
+
+ for (i = 0; i < dm_table_get_num_targets(t); i++) {
+ ti = dm_table_get_target(t, i);
+
+ if (ti->type->iterate_devices &&
+ ti->type->iterate_devices(ti, func, data))
+ return true;
+ }
+
+ return false;
+}
+
static int count_device(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
@@ -1425,13 +1468,13 @@ bool dm_table_has_no_data_devices(struct dm_table *table)
return true;
}
-static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
+static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
enum blk_zoned_model *zoned_model = data;
- return q && blk_queue_zoned_model(q) == *zoned_model;
+ return !q || blk_queue_zoned_model(q) != *zoned_model;
}
static bool dm_table_supports_zoned_model(struct dm_table *t,
@@ -1448,37 +1491,20 @@ static bool dm_table_supports_zoned_model(struct dm_table *t,
return false;
if (!ti->type->iterate_devices ||
- !ti->type->iterate_devices(ti, device_is_zoned_model, &zoned_model))
+ ti->type->iterate_devices(ti, device_not_zoned_model, &zoned_model))
return false;
}
return true;
}
-static int device_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
+static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
unsigned int *zone_sectors = data;
- return q && blk_queue_zone_sectors(q) == *zone_sectors;
-}
-
-static bool dm_table_matches_zone_sectors(struct dm_table *t,
- unsigned int zone_sectors)
-{
- struct dm_target *ti;
- unsigned i;
-
- for (i = 0; i < dm_table_get_num_targets(t); i++) {
- ti = dm_table_get_target(t, i);
-
- if (!ti->type->iterate_devices ||
- !ti->type->iterate_devices(ti, device_matches_zone_sectors, &zone_sectors))
- return false;
- }
-
- return true;
+ return !q || blk_queue_zone_sectors(q) != *zone_sectors;
}
static int validate_hardware_zoned_model(struct dm_table *table,
@@ -1498,7 +1524,7 @@ static int validate_hardware_zoned_model(struct dm_table *table,
if (!zone_sectors || !is_power_of_2(zone_sectors))
return -EINVAL;
- if (!dm_table_matches_zone_sectors(table, zone_sectors)) {
+ if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) {
DMERR("%s: zone sectors is not consistent across all devices",
dm_device_name(table->md));
return -EINVAL;
@@ -1688,29 +1714,12 @@ static int device_dax_write_cache_enabled(struct dm_target *ti,
return false;
}
-static int dm_table_supports_dax_write_cache(struct dm_table *t)
-{
- struct dm_target *ti;
- unsigned i;
-
- for (i = 0; i < dm_table_get_num_targets(t); i++) {
- ti = dm_table_get_target(t, i);
-
- if (ti->type->iterate_devices &&
- ti->type->iterate_devices(ti,
- device_dax_write_cache_enabled, NULL))
- return true;
- }
-
- return false;
-}
-
-static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
+static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
- return q && blk_queue_nonrot(q);
+ return q && !blk_queue_nonrot(q);
}
static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
@@ -1721,43 +1730,26 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
return q && !blk_queue_add_random(q);
}
-static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
+static int queue_no_sg_merge(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
- return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
-}
-
-static bool dm_table_all_devices_attribute(struct dm_table *t,
- iterate_devices_callout_fn func)
-{
- struct dm_target *ti;
- unsigned i;
-
- for (i = 0; i < dm_table_get_num_targets(t); i++) {
- ti = dm_table_get_target(t, i);
-
- if (!ti->type->iterate_devices ||
- !ti->type->iterate_devices(ti, func, NULL))
- return false;
- }
-
- return true;
+ return q && test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
}
-static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev,
+static int device_is_partial_completion(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
char b[BDEVNAME_SIZE];
/* For now, NVMe devices are the only devices of this class */
- return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0);
+ return (strncmp(bdevname(dev->bdev, b), "nvme", 4) != 0);
}
static bool dm_table_does_not_support_partial_completion(struct dm_table *t)
{
- return dm_table_all_devices_attribute(t, device_no_partial_completion);
+ return !dm_table_any_dev_attr(t, device_is_partial_completion, NULL);
}
static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
@@ -1884,27 +1876,6 @@ static int device_requires_stable_pages(struct dm_target *ti,
return q && bdi_cap_stable_pages_required(q->backing_dev_info);
}
-/*
- * If any underlying device requires stable pages, a table must require
- * them as well. Only targets that support iterate_devices are considered:
- * don't want error, zero, etc to require stable pages.
- */
-static bool dm_table_requires_stable_pages(struct dm_table *t)
-{
- struct dm_target *ti;
- unsigned i;
-
- for (i = 0; i < dm_table_get_num_targets(t); i++) {
- ti = dm_table_get_target(t, i);
-
- if (ti->type->iterate_devices &&
- ti->type->iterate_devices(ti, device_requires_stable_pages, NULL))
- return true;
- }
-
- return false;
-}
-
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
@@ -1941,32 +1912,35 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
else
blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
- if (dm_table_supports_dax_write_cache(t))
+ if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
dax_write_cache(t->md->dax_dev, true);
/* Ensure that all underlying devices are non-rotational. */
- if (dm_table_all_devices_attribute(t, device_is_nonrot))
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- else
+ if (dm_table_any_dev_attr(t, device_is_rotational, NULL))
blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
+ else
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
if (!dm_table_supports_write_same(t))
q->limits.max_write_same_sectors = 0;
if (!dm_table_supports_write_zeroes(t))
q->limits.max_write_zeroes_sectors = 0;
- if (dm_table_all_devices_attribute(t, queue_supports_sg_merge))
- blk_queue_flag_clear(QUEUE_FLAG_NO_SG_MERGE, q);
- else
+ if (dm_table_any_dev_attr(t, queue_no_sg_merge, NULL))
blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q);
+ else
+ blk_queue_flag_clear(QUEUE_FLAG_NO_SG_MERGE, q);
dm_table_verify_integrity(t);
/*
* Some devices don't use blk_integrity but still want stable pages
* because they do their own checksumming.
+ * If any underlying device requires stable pages, a table must require
+ * them as well. Only targets that support iterate_devices are considered:
+ * don't want error, zero, etc to require stable pages.
*/
- if (dm_table_requires_stable_pages(t))
+ if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL))
q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
else
q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
@@ -1977,7 +1951,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
* Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
* have it set.
*/
- if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
+ if (blk_queue_add_random(q) &&
+ dm_table_any_dev_attr(t, device_is_not_random, NULL))
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
}
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 6a26afcc1fd6..85077f4d257a 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -698,12 +698,16 @@ static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool f
THIN_MAX_CONCURRENT_LOCKS);
if (IS_ERR(pmd->bm)) {
DMERR("could not create block manager");
- return PTR_ERR(pmd->bm);
+ r = PTR_ERR(pmd->bm);
+ pmd->bm = NULL;
+ return r;
}
r = __open_or_format_metadata(pmd, format_device);
- if (r)
+ if (r) {
dm_block_manager_destroy(pmd->bm);
+ pmd->bm = NULL;
+ }
return r;
}
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 684af08d0747..a433f5824f18 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -65,19 +65,18 @@ static int fec_decode_rs8(struct dm_verity *v, struct dm_verity_fec_io *fio,
static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
unsigned *offset, struct dm_buffer **buf)
{
- u64 position, block;
+ u64 position, block, rem;
u8 *res;
position = (index + rsb) * v->fec->roots;
- block = position >> v->data_dev_block_bits;
- *offset = (unsigned)(position - (block << v->data_dev_block_bits));
+ block = div64_u64_rem(position, v->fec->io_size, &rem);
+ *offset = (unsigned)rem;
- res = dm_bufio_read(v->fec->bufio, v->fec->start + block, buf);
+ res = dm_bufio_read(v->fec->bufio, block, buf);
if (unlikely(IS_ERR(res))) {
DMERR("%s: FEC %llu: parity read failed (block %llu): %ld",
v->data_dev->name, (unsigned long long)rsb,
- (unsigned long long)(v->fec->start + block),
- PTR_ERR(res));
+ (unsigned long long)block, PTR_ERR(res));
*buf = NULL;
}
@@ -159,7 +158,7 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
/* read the next block when we run out of parity bytes */
offset += v->fec->roots;
- if (offset >= 1 << v->data_dev_block_bits) {
+ if (offset >= v->fec->io_size) {
dm_bufio_release(buf);
par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
@@ -436,7 +435,7 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
fio->level++;
if (type == DM_VERITY_BLOCK_TYPE_METADATA)
- block += v->data_blocks;
+ block = block - v->hash_start + v->data_blocks;
/*
* For RS(M, N), the continuous FEC data is divided into blocks of N
@@ -552,6 +551,7 @@ void verity_fec_dtr(struct dm_verity *v)
mempool_exit(&f->rs_pool);
mempool_exit(&f->prealloc_pool);
mempool_exit(&f->extra_pool);
+ mempool_exit(&f->output_pool);
kmem_cache_destroy(f->cache);
if (f->data_bufio)
@@ -674,7 +674,7 @@ int verity_fec_ctr(struct dm_verity *v)
{
struct dm_verity_fec *f = v->fec;
struct dm_target *ti = v->ti;
- u64 hash_blocks;
+ u64 hash_blocks, fec_blocks;
int ret;
if (!verity_fec_is_enabled(v)) {
@@ -743,16 +743,23 @@ int verity_fec_ctr(struct dm_verity *v)
return -E2BIG;
}
+ if ((f->roots << SECTOR_SHIFT) & ((1 << v->data_dev_block_bits) - 1))
+ f->io_size = 1 << v->data_dev_block_bits;
+ else
+ f->io_size = v->fec->roots << SECTOR_SHIFT;
+
f->bufio = dm_bufio_client_create(f->dev->bdev,
- 1 << v->data_dev_block_bits,
+ f->io_size,
1, 0, NULL, NULL);
if (IS_ERR(f->bufio)) {
ti->error = "Cannot initialize FEC bufio client";
return PTR_ERR(f->bufio);
}
- if (dm_bufio_get_device_size(f->bufio) <
- ((f->start + f->rounds * f->roots) >> v->data_dev_block_bits)) {
+ dm_bufio_set_sector_offset(f->bufio, f->start << (v->data_dev_block_bits - SECTOR_SHIFT));
+
+ fec_blocks = div64_u64(f->rounds * f->roots, v->fec->roots << SECTOR_SHIFT);
+ if (dm_bufio_get_device_size(f->bufio) < fec_blocks) {
ti->error = "FEC device is too small";
return -E2BIG;
}
diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h
index 6ad803b2b36c..b9f0bb0f63c4 100644
--- a/drivers/md/dm-verity-fec.h
+++ b/drivers/md/dm-verity-fec.h
@@ -40,6 +40,7 @@ struct dm_verity_fec {
struct dm_dev *dev; /* parity data device */
struct dm_bufio_client *data_bufio; /* for data dev access */
struct dm_bufio_client *bufio; /* for parity data access */
+ size_t io_size; /* IO size for roots */
sector_t start; /* parity data start in blocks */
sector_t blocks; /* number of blocks covered */
sector_t rounds; /* number of interleaving rounds */
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index e3599b43f9eb..fa8c201fca77 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -34,7 +34,7 @@
#define DM_VERITY_OPT_IGN_ZEROES "ignore_zero_blocks"
#define DM_VERITY_OPT_AT_MOST_ONCE "check_at_most_once"
-#define DM_VERITY_OPTS_MAX (2 + DM_VERITY_OPTS_FEC)
+#define DM_VERITY_OPTS_MAX (3 + DM_VERITY_OPTS_FEC)
static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
@@ -534,6 +534,15 @@ static int verity_verify_io(struct dm_verity_io *io)
}
/*
+ * Skip verity work in response to I/O error when system is shutting down.
+ */
+static inline bool verity_is_system_shutting_down(void)
+{
+ return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
+ || system_state == SYSTEM_RESTART;
+}
+
+/*
* End one "io" structure with a given error.
*/
static void verity_finish_io(struct dm_verity_io *io, blk_status_t status)
@@ -560,7 +569,8 @@ static void verity_end_io(struct bio *bio)
{
struct dm_verity_io *io = bio->bi_private;
- if (bio->bi_status && !verity_fec_is_enabled(io->v)) {
+ if (bio->bi_status &&
+ (!verity_fec_is_enabled(io->v) || verity_is_system_shutting_down())) {
verity_finish_io(io, bio->bi_status);
return;
}
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 4e4a09054f85..a1d4166864d0 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -226,6 +226,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
pfn_t pfn;
int id;
struct page **pages;
+ sector_t offset;
wc->memory_vmapped = false;
@@ -244,9 +245,16 @@ static int persistent_memory_claim(struct dm_writecache *wc)
goto err1;
}
+ offset = get_start_sect(wc->ssd_dev->bdev);
+ if (offset & (PAGE_SIZE / 512 - 1)) {
+ r = -EINVAL;
+ goto err1;
+ }
+ offset >>= PAGE_SHIFT - 9;
+
id = dax_read_lock();
- da = dax_direct_access(wc->ssd_dev->dax_dev, 0, p, &wc->memory_map, &pfn);
+ da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, &wc->memory_map, &pfn);
if (da < 0) {
wc->memory_map = NULL;
r = da;
@@ -268,7 +276,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
i = 0;
do {
long daa;
- daa = dax_direct_access(wc->ssd_dev->dax_dev, i, p - i,
+ daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i, p - i,
NULL, &pfn);
if (daa <= 0) {
r = daa ? daa : -EINVAL;
@@ -281,6 +289,8 @@ static int persistent_memory_claim(struct dm_writecache *wc)
while (daa-- && i < p) {
pages[i++] = pfn_t_to_page(pfn);
pfn.val++;
+ if (!(i & 15))
+ cond_resched();
}
} while (i < p);
wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL);
@@ -308,7 +318,7 @@ err1:
#else
static int persistent_memory_claim(struct dm_writecache *wc)
{
- BUG();
+ return -EOPNOTSUPP;
}
#endif
@@ -811,6 +821,8 @@ static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_
writecache_wait_for_ios(wc, WRITE);
discarded_something = true;
}
+ if (!writecache_entry_is_committed(wc, e))
+ wc->uncommitted_blocks--;
writecache_free_entry(wc, e);
}
@@ -878,11 +890,30 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
struct wc_entry *e = &wc->entries[b];
e->index = b;
e->write_in_progress = false;
+ cond_resched();
}
return 0;
}
+static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors)
+{
+ struct dm_io_region region;
+ struct dm_io_request req;
+
+ region.bdev = wc->ssd_dev->bdev;
+ region.sector = wc->start_sector;
+ region.count = n_sectors;
+ req.bi_op = REQ_OP_READ;
+ req.bi_op_flags = REQ_SYNC;
+ req.mem.type = DM_IO_VMA;
+ req.mem.ptr.vma = (char *)wc->memory_map;
+ req.client = wc->dm_io;
+ req.notify.fn = NULL;
+
+ return dm_io(&req, 1, &region, NULL);
+}
+
static void writecache_resume(struct dm_target *ti)
{
struct dm_writecache *wc = ti->private;
@@ -893,8 +924,18 @@ static void writecache_resume(struct dm_target *ti)
wc_lock(wc);
- if (WC_MODE_PMEM(wc))
+ if (WC_MODE_PMEM(wc)) {
persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
+ } else {
+ r = writecache_read_metadata(wc, wc->metadata_sectors);
+ if (r) {
+ size_t sb_entries_offset;
+ writecache_error(wc, r, "unable to read metadata: %d", r);
+ sb_entries_offset = offsetof(struct wc_memory_superblock, entries);
+ memset((char *)wc->memory_map + sb_entries_offset, -1,
+ (wc->metadata_sectors << SECTOR_SHIFT) - sb_entries_offset);
+ }
+ }
wc->tree = RB_ROOT;
INIT_LIST_HEAD(&wc->lru);
@@ -932,6 +973,7 @@ static void writecache_resume(struct dm_target *ti)
e->original_sector = le64_to_cpu(wme.original_sector);
e->seq_count = le64_to_cpu(wme.seq_count);
}
+ cond_resched();
}
#endif
for (b = 0; b < wc->n_blocks; b++) {
@@ -1764,8 +1806,10 @@ static int init_memory(struct dm_writecache *wc)
pmem_assign(sb(wc)->n_blocks, cpu_to_le64(wc->n_blocks));
pmem_assign(sb(wc)->seq_count, cpu_to_le64(0));
- for (b = 0; b < wc->n_blocks; b++)
+ for (b = 0; b < wc->n_blocks; b++) {
write_original_sector_seq_count(wc, &wc->entries[b], -1, -1);
+ cond_resched();
+ }
writecache_flush_all_metadata(wc);
writecache_commit_flushed(wc, false);
@@ -1839,7 +1883,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
struct wc_memory_superblock s;
static struct dm_arg _args[] = {
- {0, 10, "Invalid number of feature args"},
+ {0, 16, "Invalid number of feature args"},
};
as.argc = argc;
@@ -1974,6 +2018,12 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->error = "Invalid block size";
goto bad;
}
+ if (wc->block_size < bdev_logical_block_size(wc->dev->bdev) ||
+ wc->block_size < bdev_logical_block_size(wc->ssd_dev->bdev)) {
+ r = -EINVAL;
+ ti->error = "Block size is smaller than device logical block size";
+ goto bad;
+ }
wc->block_size_bits = __ffs(wc->block_size);
wc->max_writeback_jobs = MAX_WRITEBACK_JOBS;
@@ -2062,8 +2112,6 @@ invalid_optional:
goto bad;
}
} else {
- struct dm_io_region region;
- struct dm_io_request req;
size_t n_blocks, n_metadata_blocks;
uint64_t n_bitmap_bits;
@@ -2120,19 +2168,9 @@ invalid_optional:
goto bad;
}
- region.bdev = wc->ssd_dev->bdev;
- region.sector = wc->start_sector;
- region.count = wc->metadata_sectors;
- req.bi_op = REQ_OP_READ;
- req.bi_op_flags = REQ_SYNC;
- req.mem.type = DM_IO_VMA;
- req.mem.ptr.vma = (char *)wc->memory_map;
- req.client = wc->dm_io;
- req.notify.fn = NULL;
-
- r = dm_io(&req, 1, &region, NULL);
+ r = writecache_read_metadata(wc, wc->block_size >> SECTOR_SHIFT);
if (r) {
- ti->error = "Unable to read metadata";
+ ti->error = "Unable to read first block of metadata";
goto bad;
}
}
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 086a870087cf..5c2bbdf67f25 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -1105,7 +1105,6 @@ static int dmz_init_zone(struct dmz_metadata *zmd, struct dm_zone *zone,
if (blkz->type == BLK_ZONE_TYPE_CONVENTIONAL) {
set_bit(DMZ_RND, &zone->flags);
- zmd->nr_rnd_zones++;
} else if (blkz->type == BLK_ZONE_TYPE_SEQWRITE_REQ ||
blkz->type == BLK_ZONE_TYPE_SEQWRITE_PREF) {
set_bit(DMZ_SEQ, &zone->flags);
@@ -1581,7 +1580,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
return dzone;
}
- return ERR_PTR(-EBUSY);
+ return NULL;
}
/*
@@ -1601,7 +1600,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
return zone;
}
- return ERR_PTR(-EBUSY);
+ return NULL;
}
/*
diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
index 84ac671acd2e..879848aad97a 100644
--- a/drivers/md/dm-zoned-reclaim.c
+++ b/drivers/md/dm-zoned-reclaim.c
@@ -348,8 +348,8 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc)
/* Get a data zone */
dzone = dmz_get_zone_for_reclaim(zmd);
- if (IS_ERR(dzone))
- return PTR_ERR(dzone);
+ if (!dzone)
+ return -EBUSY;
start = jiffies;
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 971a2da43cbf..185397272586 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -789,7 +789,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
/* Set target (no write same support) */
- ti->max_io_len = dev->zone_nr_sectors << 9;
+ ti->max_io_len = dev->zone_nr_sectors;
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
ti->num_write_zeroes_bios = 1;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 43643151584a..01bf5bc925d0 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/blkpg.h>
#include <linux/bio.h>
@@ -140,10 +141,21 @@ EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
#define DMF_NOFLUSH_SUSPENDING 5
#define DMF_DEFERRED_REMOVE 6
#define DMF_SUSPENDED_INTERNALLY 7
+#define DMF_POST_SUSPENDING 8
#define DM_NUMA_NODE NUMA_NO_NODE
static int dm_numa_node = DM_NUMA_NODE;
+#define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE)
+static int swap_bios = DEFAULT_SWAP_BIOS;
+static int get_swap_bios(void)
+{
+ int latch = READ_ONCE(swap_bios);
+ if (unlikely(latch <= 0))
+ latch = DEFAULT_SWAP_BIOS;
+ return latch;
+}
+
/*
* For mempools pre-allocation at the table loading time.
*/
@@ -460,7 +472,6 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
struct block_device **bdev)
- __acquires(md->io_barrier)
{
struct dm_target *tgt;
struct dm_table *map;
@@ -494,7 +505,6 @@ retry:
}
static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
- __releases(md->io_barrier)
{
dm_put_live_table(md, srcu_idx);
}
@@ -515,7 +525,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
* subset of the parent bdev; require extra privileges.
*/
if (!capable(CAP_SYS_RAWIO)) {
- DMWARN_LIMIT(
+ DMDEBUG_LIMIT(
"%s: sending ioctl %x to DM device without required privilege.",
current->comm, cmd);
r = -ENOIOCTLCMD;
@@ -935,6 +945,11 @@ void disable_write_zeroes(struct mapped_device *md)
limits->max_write_zeroes_sectors = 0;
}
+static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
+{
+ return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios);
+}
+
static void clone_endio(struct bio *bio)
{
blk_status_t error = bio->bi_status;
@@ -972,6 +987,11 @@ static void clone_endio(struct bio *bio)
}
}
+ if (unlikely(swap_bios_limit(tio->ti, bio))) {
+ struct mapped_device *md = io->md;
+ up(&md->swap_bios_semaphore);
+ }
+
free_tio(tio);
dec_pending(io, error);
}
@@ -1250,6 +1270,22 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
}
EXPORT_SYMBOL_GPL(dm_remap_zone_report);
+static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
+{
+ mutex_lock(&md->swap_bios_lock);
+ while (latch < md->swap_bios) {
+ cond_resched();
+ down(&md->swap_bios_semaphore);
+ md->swap_bios--;
+ }
+ while (latch > md->swap_bios) {
+ cond_resched();
+ up(&md->swap_bios_semaphore);
+ md->swap_bios++;
+ }
+ mutex_unlock(&md->swap_bios_lock);
+}
+
static blk_qc_t __map_bio(struct dm_target_io *tio)
{
int r;
@@ -1270,6 +1306,14 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
atomic_inc(&io->io_count);
sector = clone->bi_iter.bi_sector;
+ if (unlikely(swap_bios_limit(ti, clone))) {
+ struct mapped_device *md = io->md;
+ int latch = get_swap_bios();
+ if (unlikely(latch != md->swap_bios))
+ __set_swap_bios_limit(md, latch);
+ down(&md->swap_bios_semaphore);
+ }
+
r = ti->type->map(ti, clone);
switch (r) {
case DM_MAPIO_SUBMITTED:
@@ -1284,10 +1328,18 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
ret = generic_make_request(clone);
break;
case DM_MAPIO_KILL:
+ if (unlikely(swap_bios_limit(ti, clone))) {
+ struct mapped_device *md = io->md;
+ up(&md->swap_bios_semaphore);
+ }
free_tio(tio);
dec_pending(io, BLK_STS_IOERR);
break;
case DM_MAPIO_REQUEUE:
+ if (unlikely(swap_bios_limit(ti, clone))) {
+ struct mapped_device *md = io->md;
+ up(&md->swap_bios_semaphore);
+ }
free_tio(tio);
dec_pending(io, BLK_STS_DM_REQUEUE);
break;
@@ -1859,6 +1911,7 @@ static void cleanup_mapped_device(struct mapped_device *md)
mutex_destroy(&md->suspend_lock);
mutex_destroy(&md->type_lock);
mutex_destroy(&md->table_devices_lock);
+ mutex_destroy(&md->swap_bios_lock);
dm_mq_cleanup_mapped_device(md);
}
@@ -1933,6 +1986,10 @@ static struct mapped_device *alloc_dev(int minor)
init_completion(&md->kobj_holder.completion);
md->kworker_task = NULL;
+ md->swap_bios = get_swap_bios();
+ sema_init(&md->swap_bios_semaphore, md->swap_bios);
+ mutex_init(&md->swap_bios_lock);
+
md->disk->major = _major;
md->disk->first_minor = minor;
md->disk->fops = &dm_blk_dops;
@@ -2354,6 +2411,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
if (!dm_suspended_md(md)) {
dm_table_presuspend_targets(map);
set_bit(DMF_SUSPENDED, &md->flags);
+ set_bit(DMF_POST_SUSPENDING, &md->flags);
dm_table_postsuspend_targets(map);
}
/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
@@ -2679,7 +2737,9 @@ retry:
if (r)
goto out_unlock;
+ set_bit(DMF_POST_SUSPENDING, &md->flags);
dm_table_postsuspend_targets(map);
+ clear_bit(DMF_POST_SUSPENDING, &md->flags);
out_unlock:
mutex_unlock(&md->suspend_lock);
@@ -2776,7 +2836,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
DMF_SUSPENDED_INTERNALLY);
+ set_bit(DMF_POST_SUSPENDING, &md->flags);
dm_table_postsuspend_targets(map);
+ clear_bit(DMF_POST_SUSPENDING, &md->flags);
}
static void __dm_internal_resume(struct mapped_device *md)
@@ -2853,17 +2915,25 @@ EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
unsigned cookie)
{
+ int r;
+ unsigned noio_flag;
char udev_cookie[DM_COOKIE_LENGTH];
char *envp[] = { udev_cookie, NULL };
+ noio_flag = memalloc_noio_save();
+
if (!cookie)
- return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
+ r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
else {
snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
DM_COOKIE_ENV_VAR_NAME, cookie);
- return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
- action, envp);
+ r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
+ action, envp);
}
+
+ memalloc_noio_restore(noio_flag);
+
+ return r;
}
uint32_t dm_next_uevent_seq(struct mapped_device *md)
@@ -2929,6 +2999,11 @@ int dm_suspended_md(struct mapped_device *md)
return test_bit(DMF_SUSPENDED, &md->flags);
}
+static int dm_post_suspending_md(struct mapped_device *md)
+{
+ return test_bit(DMF_POST_SUSPENDING, &md->flags);
+}
+
int dm_suspended_internally_md(struct mapped_device *md)
{
return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
@@ -2945,6 +3020,12 @@ int dm_suspended(struct dm_target *ti)
}
EXPORT_SYMBOL_GPL(dm_suspended);
+int dm_post_suspending(struct dm_target *ti)
+{
+ return dm_post_suspending_md(dm_table_get_md(ti->table));
+}
+EXPORT_SYMBOL_GPL(dm_post_suspending);
+
int dm_noflush_suspending(struct dm_target *ti)
{
return __noflush_suspending(dm_table_get_md(ti->table));
@@ -3204,6 +3285,9 @@ MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
+module_param(swap_bios, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs");
+
MODULE_DESCRIPTION(DM_NAME " driver");
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index fd8607124bdb..e79d2aa2372f 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -1371,7 +1371,7 @@ __acquires(bitmap->lock)
if (bitmap->bp[page].hijacked ||
bitmap->bp[page].map == NULL)
csize = ((sector_t)1) << (bitmap->chunkshift +
- PAGE_COUNTER_SHIFT - 1);
+ PAGE_COUNTER_SHIFT);
else
csize = ((sector_t)1) << bitmap->chunkshift;
*blocks = csize - (offset & (csize - 1));
@@ -1725,6 +1725,8 @@ void md_bitmap_flush(struct mddev *mddev)
md_bitmap_daemon_work(mddev);
bitmap->daemon_lastrun -= sleep;
md_bitmap_daemon_work(mddev);
+ if (mddev->bitmap_info.external)
+ md_super_wait(mddev);
md_bitmap_update_sb(bitmap);
}
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 0b2af6e74fc3..107f36b9155f 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -669,9 +669,27 @@ out:
* Takes the lock on the TOKEN lock resource so no other
* node can communicate while the operation is underway.
*/
-static int lock_token(struct md_cluster_info *cinfo, bool mddev_locked)
+static int lock_token(struct md_cluster_info *cinfo)
{
- int error, set_bit = 0;
+ int error;
+
+ error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX);
+ if (error) {
+ pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n",
+ __func__, __LINE__, error);
+ } else {
+ /* Lock the receive sequence */
+ mutex_lock(&cinfo->recv_mutex);
+ }
+ return error;
+}
+
+/* lock_comm()
+ * Sets the MD_CLUSTER_SEND_LOCK bit to lock the send channel.
+ */
+static int lock_comm(struct md_cluster_info *cinfo, bool mddev_locked)
+{
+ int rv, set_bit = 0;
struct mddev *mddev = cinfo->mddev;
/*
@@ -682,34 +700,19 @@ static int lock_token(struct md_cluster_info *cinfo, bool mddev_locked)
*/
if (mddev_locked && !test_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD,
&cinfo->state)) {
- error = test_and_set_bit_lock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD,
+ rv = test_and_set_bit_lock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD,
&cinfo->state);
- WARN_ON_ONCE(error);
+ WARN_ON_ONCE(rv);
md_wakeup_thread(mddev->thread);
set_bit = 1;
}
- error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX);
- if (set_bit)
- clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
- if (error)
- pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n",
- __func__, __LINE__, error);
-
- /* Lock the receive sequence */
- mutex_lock(&cinfo->recv_mutex);
- return error;
-}
-
-/* lock_comm()
- * Sets the MD_CLUSTER_SEND_LOCK bit to lock the send channel.
- */
-static int lock_comm(struct md_cluster_info *cinfo, bool mddev_locked)
-{
wait_event(cinfo->wait,
!test_and_set_bit(MD_CLUSTER_SEND_LOCK, &cinfo->state));
-
- return lock_token(cinfo, mddev_locked);
+ rv = lock_token(cinfo);
+ if (set_bit)
+ clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
+ return rv;
}
static void unlock_comm(struct md_cluster_info *cinfo)
@@ -789,9 +792,11 @@ static int sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg,
{
int ret;
- lock_comm(cinfo, mddev_locked);
- ret = __sendmsg(cinfo, cmsg);
- unlock_comm(cinfo);
+ ret = lock_comm(cinfo, mddev_locked);
+ if (!ret) {
+ ret = __sendmsg(cinfo, cmsg);
+ unlock_comm(cinfo);
+ }
return ret;
}
@@ -1063,7 +1068,7 @@ static int metadata_update_start(struct mddev *mddev)
return 0;
}
- ret = lock_token(cinfo, 1);
+ ret = lock_token(cinfo);
clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
return ret;
}
@@ -1181,7 +1186,10 @@ static void update_size(struct mddev *mddev, sector_t old_dev_sectors)
int raid_slot = -1;
md_update_sb(mddev, 1);
- lock_comm(cinfo, 1);
+ if (lock_comm(cinfo, 1)) {
+ pr_err("%s: lock_comm failed\n", __func__);
+ return;
+ }
memset(&cmsg, 0, sizeof(cmsg));
cmsg.type = cpu_to_le32(METADATA_UPDATED);
@@ -1330,7 +1338,8 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev)
cmsg.type = cpu_to_le32(NEWDISK);
memcpy(cmsg.uuid, uuid, 16);
cmsg.raid_slot = cpu_to_le32(rdev->desc_nr);
- lock_comm(cinfo, 1);
+ if (lock_comm(cinfo, 1))
+ return -EAGAIN;
ret = __sendmsg(cinfo, &cmsg);
if (ret) {
unlock_comm(cinfo);
@@ -1443,6 +1452,7 @@ static void unlock_all_bitmaps(struct mddev *mddev)
}
}
kfree(cinfo->other_bitmap_lockres);
+ cinfo->other_bitmap_lockres = NULL;
}
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 62f214d43e15..fae6a983ceee 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -474,8 +474,10 @@ static void md_submit_flush_data(struct work_struct *ws)
* could wait for this and below md_handle_request could wait for those
* bios because of suspend check
*/
+ spin_lock_irq(&mddev->lock);
mddev->last_flush = mddev->start_flush;
mddev->flush_bio = NULL;
+ spin_unlock_irq(&mddev->lock);
wake_up(&mddev->sb_wait);
if (bio->bi_iter.bi_size == 0) {
@@ -581,8 +583,35 @@ void mddev_init(struct mddev *mddev)
}
EXPORT_SYMBOL_GPL(mddev_init);
+static struct mddev *mddev_find_locked(dev_t unit)
+{
+ struct mddev *mddev;
+
+ list_for_each_entry(mddev, &all_mddevs, all_mddevs)
+ if (mddev->unit == unit)
+ return mddev;
+
+ return NULL;
+}
+
static struct mddev *mddev_find(dev_t unit)
{
+ struct mddev *mddev;
+
+ if (MAJOR(unit) != MD_MAJOR)
+ unit &= ~((1 << MdpMinorShift) - 1);
+
+ spin_lock(&all_mddevs_lock);
+ mddev = mddev_find_locked(unit);
+ if (mddev)
+ mddev_get(mddev);
+ spin_unlock(&all_mddevs_lock);
+
+ return mddev;
+}
+
+static struct mddev *mddev_find_or_alloc(dev_t unit)
+{
struct mddev *mddev, *new = NULL;
if (unit && MAJOR(unit) != MD_MAJOR)
@@ -592,13 +621,13 @@ static struct mddev *mddev_find(dev_t unit)
spin_lock(&all_mddevs_lock);
if (unit) {
- list_for_each_entry(mddev, &all_mddevs, all_mddevs)
- if (mddev->unit == unit) {
- mddev_get(mddev);
- spin_unlock(&all_mddevs_lock);
- kfree(new);
- return mddev;
- }
+ mddev = mddev_find_locked(unit);
+ if (mddev) {
+ mddev_get(mddev);
+ spin_unlock(&all_mddevs_lock);
+ kfree(new);
+ return mddev;
+ }
if (new) {
list_add(&new->all_mddevs, &all_mddevs);
@@ -624,12 +653,7 @@ static struct mddev *mddev_find(dev_t unit)
return NULL;
}
- is_free = 1;
- list_for_each_entry(mddev, &all_mddevs, all_mddevs)
- if (mddev->unit == dev) {
- is_free = 0;
- break;
- }
+ is_free = !mddev_find_locked(dev);
}
new->unit = dev;
new->md_minor = MINOR(dev);
@@ -1182,6 +1206,8 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
mddev->new_layout = mddev->layout;
mddev->new_chunk_sectors = mddev->chunk_sectors;
}
+ if (mddev->level == 0)
+ mddev->layout = -1;
if (sb->state & (1<<MD_SB_CLEAN))
mddev->recovery_cp = MaxSector;
@@ -1598,6 +1624,10 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset;
}
+ if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT) &&
+ sb->level != 0)
+ return -EINVAL;
+
if (!refdev) {
ret = 1;
} else {
@@ -1708,6 +1738,10 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
mddev->new_chunk_sectors = mddev->chunk_sectors;
}
+ if (mddev->level == 0 &&
+ !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT))
+ mddev->layout = -1;
+
if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
set_bit(MD_HAS_JOURNAL, &mddev->flags);
@@ -5290,7 +5324,7 @@ static int md_alloc(dev_t dev, char *name)
* writing to /sys/module/md_mod/parameters/new_array.
*/
static DEFINE_MUTEX(disks_mutex);
- struct mddev *mddev = mddev_find(dev);
+ struct mddev *mddev = mddev_find_or_alloc(dev);
struct gendisk *disk;
int partitioned;
int shift;
@@ -5874,7 +5908,7 @@ EXPORT_SYMBOL_GPL(md_stop_writes);
static void mddev_detach(struct mddev *mddev)
{
md_bitmap_wait_behind_writes(mddev);
- if (mddev->pers && mddev->pers->quiesce) {
+ if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) {
mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0);
}
@@ -6143,11 +6177,9 @@ static void autorun_devices(int part)
md_probe(dev, NULL, NULL);
mddev = mddev_find(dev);
- if (!mddev || !mddev->gendisk) {
- if (mddev)
- mddev_put(mddev);
+ if (!mddev)
break;
- }
+
if (mddev_lock(mddev))
pr_warn("md: %s locked, cannot run\n", mdname(mddev));
else if (mddev->raid_disks || mddev->major_version
@@ -6554,8 +6586,10 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev)
goto busy;
kick_rdev:
- if (mddev_is_clustered(mddev))
- md_cluster_ops->remove_disk(mddev, rdev);
+ if (mddev_is_clustered(mddev)) {
+ if (md_cluster_ops->remove_disk(mddev, rdev))
+ goto busy;
+ }
md_kick_rdev_from_array(rdev);
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
@@ -6784,6 +6818,9 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
mddev->external = 0;
mddev->layout = info->layout;
+ if (mddev->level == 0)
+ /* Cannot trust RAID0 layout info here */
+ mddev->layout = -1;
mddev->chunk_sectors = info->chunk_size >> 9;
if (mddev->persistent) {
@@ -6882,6 +6919,7 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks)
return -EINVAL;
if (mddev->sync_thread ||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
+ test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) ||
mddev->reshape_position != MaxSector)
return -EBUSY;
@@ -7201,8 +7239,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
err = -EBUSY;
goto out;
}
- WARN_ON_ONCE(test_bit(MD_CLOSING, &mddev->flags));
- set_bit(MD_CLOSING, &mddev->flags);
+ if (test_and_set_bit(MD_CLOSING, &mddev->flags)) {
+ mutex_unlock(&mddev->open_mutex);
+ err = -EBUSY;
+ goto out;
+ }
did_set_md_closing = true;
mutex_unlock(&mddev->open_mutex);
sync_blockdev(bdev);
@@ -7438,9 +7479,9 @@ static int md_open(struct block_device *bdev, fmode_t mode)
*/
mddev_put(mddev);
/* Wait until bdev->bd_disk is definitely gone */
- flush_workqueue(md_misc_wq);
- /* Then retry the open from the top */
- return -ERESTARTSYS;
+ if (work_pending(&mddev->del_work))
+ flush_workqueue(md_misc_wq);
+ return -EBUSY;
}
BUG_ON(mddev != bdev->bd_disk->private_data);
@@ -7773,7 +7814,11 @@ static void *md_seq_start(struct seq_file *seq, loff_t *pos)
loff_t l = *pos;
struct mddev *mddev;
- if (l >= 0x10000)
+ if (l == 0x10000) {
+ ++*pos;
+ return (void *)2;
+ }
+ if (l > 0x10000)
return NULL;
if (!l--)
/* header */
@@ -8861,11 +8906,11 @@ void md_check_recovery(struct mddev *mddev)
}
if (mddev_is_clustered(mddev)) {
- struct md_rdev *rdev;
+ struct md_rdev *rdev, *tmp;
/* kick the device if another node issued a
* remove disk.
*/
- rdev_for_each(rdev, mddev) {
+ rdev_for_each_safe(rdev, tmp, mddev) {
if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
rdev->raid_disk < 0)
md_kick_rdev_from_array(rdev);
@@ -9165,7 +9210,7 @@ err_wq:
static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
{
struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
- struct md_rdev *rdev2;
+ struct md_rdev *rdev2, *tmp;
int role, ret;
char b[BDEVNAME_SIZE];
@@ -9182,7 +9227,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
}
/* Check for change of roles in the active devices */
- rdev_for_each(rdev2, mddev) {
+ rdev_for_each_safe(rdev2, tmp, mddev) {
if (test_bit(Faulty, &rdev2->flags))
continue;
@@ -9224,8 +9269,11 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
}
}
- if (mddev->raid_disks != le32_to_cpu(sb->raid_disks))
- update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
+ if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) {
+ ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
+ if (ret)
+ pr_warn("md: updating array disks failed. %d\n", ret);
+ }
/* Finally set the event to be up to date */
mddev->events = le64_to_cpu(sb->events);
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
index a240990a7f33..5673f8eb5f88 100644
--- a/drivers/md/persistent-data/dm-btree-internal.h
+++ b/drivers/md/persistent-data/dm-btree-internal.h
@@ -34,12 +34,12 @@ struct node_header {
__le32 max_entries;
__le32 value_size;
__le32 padding;
-} __packed;
+} __attribute__((packed, aligned(8)));
struct btree_node {
struct node_header header;
__le64 keys[0];
-} __packed;
+} __attribute__((packed, aligned(8)));
/*
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index 17aef55ed708..a284762e548e 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -337,6 +337,8 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
*/
begin = do_div(index_begin, ll->entries_per_block);
end = do_div(end, ll->entries_per_block);
+ if (end == 0)
+ end = ll->entries_per_block;
for (i = index_begin; i < index_end; i++, begin = 0) {
struct dm_block *blk;
diff --git a/drivers/md/persistent-data/dm-space-map-common.h b/drivers/md/persistent-data/dm-space-map-common.h
index 8de63ce39bdd..87e17909ef52 100644
--- a/drivers/md/persistent-data/dm-space-map-common.h
+++ b/drivers/md/persistent-data/dm-space-map-common.h
@@ -33,7 +33,7 @@ struct disk_index_entry {
__le64 blocknr;
__le32 nr_free;
__le32 none_free_before;
-} __packed;
+} __attribute__ ((packed, aligned(8)));
#define MAX_METADATA_BITMAPS 255
@@ -43,7 +43,7 @@ struct disk_metadata_index {
__le64 blocknr;
struct disk_index_entry index[MAX_METADATA_BITMAPS];
-} __packed;
+} __attribute__ ((packed, aligned(8)));
struct ll_disk;
@@ -86,7 +86,7 @@ struct disk_sm_root {
__le64 nr_allocated;
__le64 bitmap_root;
__le64 ref_count_root;
-} __packed;
+} __attribute__ ((packed, aligned(8)));
#define ENTRIES_PER_BYTE 4
@@ -94,7 +94,7 @@ struct disk_bitmap_header {
__le32 csum;
__le32 not_used;
__le64 blocknr;
-} __packed;
+} __attribute__ ((packed, aligned(8)));
enum allocation_event {
SM_NONE,
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index efa9df2336da..0272102b207e 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -152,6 +152,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
if (conf->nr_strip_zones == 1) {
conf->layout = RAID0_ORIG_LAYOUT;
+ } else if (mddev->layout == RAID0_ORIG_LAYOUT ||
+ mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
+ conf->layout = mddev->layout;
} else if (default_layout == RAID0_ORIG_LAYOUT ||
default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
conf->layout = default_layout;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index abcb4c3a76c1..876d3e1339d1 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -445,6 +445,8 @@ static void raid1_end_write_request(struct bio *bio)
if (!test_bit(Faulty, &rdev->flags))
set_bit(R1BIO_WriteError, &r1_bio->state);
else {
+ /* Fail the request */
+ set_bit(R1BIO_Degraded, &r1_bio->state);
/* Finished with this branch */
r1_bio->bios[mirror] = NULL;
to_put = bio;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 02c5e390f89f..8e0f936b3e37 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1138,7 +1138,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
struct md_rdev *err_rdev = NULL;
gfp_t gfp = GFP_NOIO;
- if (r10_bio->devs[slot].rdev) {
+ if (slot >= 0 && r10_bio->devs[slot].rdev) {
/*
* This is an error retry, but we cannot
* safely dereference the rdev in the r10_bio,
@@ -1547,6 +1547,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
r10_bio->mddev = mddev;
r10_bio->sector = bio->bi_iter.bi_sector;
r10_bio->state = 0;
+ r10_bio->read_slot = -1;
memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->copies);
if (bio_data_dir(bio) == READ)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 01021382131b..c7bda4b0bced 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2417,8 +2417,6 @@ static int resize_stripes(struct r5conf *conf, int newsize)
} else
err = -ENOMEM;
- mutex_unlock(&conf->cache_size_mutex);
-
conf->slab_cache = sc;
conf->active_name = 1-conf->active_name;
@@ -2441,6 +2439,8 @@ static int resize_stripes(struct r5conf *conf, int newsize)
if (!err)
conf->pool_size = newsize;
+ mutex_unlock(&conf->cache_size_mutex);
+
return err;
}
@@ -3596,6 +3596,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
* is missing/faulty, then we need to read everything we can.
*/
if (sh->raid_conf->level != 6 &&
+ sh->raid_conf->rmw_level != PARITY_DISABLE_RMW &&
sh->sector < sh->raid_conf->mddev->recovery_cp)
/* reconstruct-write isn't being forced */
return 0;
@@ -4832,7 +4833,7 @@ static void handle_stripe(struct stripe_head *sh)
* or to load a block that is being partially written.
*/
if (s.to_read || s.non_overwrite
- || (conf->level == 6 && s.to_write && s.failed)
+ || (s.to_write && s.failed)
|| (s.syncing && (s.uptodate + s.compute < disks))
|| s.replacing
|| s.expanding)
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index ba7e976bf6dc..60b20ae02b05 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -1668,6 +1668,10 @@ int __cec_s_log_addrs(struct cec_adapter *adap,
unsigned j;
log_addrs->log_addr[i] = CEC_LOG_ADDR_INVALID;
+ if (log_addrs->log_addr_type[i] > CEC_LOG_ADDR_TYPE_UNREGISTERED) {
+ dprintk(1, "unknown logical address type\n");
+ return -EINVAL;
+ }
if (type_mask & (1 << log_addrs->log_addr_type[i])) {
dprintk(1, "duplicate logical address type\n");
return -EINVAL;
@@ -1688,10 +1692,6 @@ int __cec_s_log_addrs(struct cec_adapter *adap,
dprintk(1, "invalid primary device type\n");
return -EINVAL;
}
- if (log_addrs->log_addr_type[i] > CEC_LOG_ADDR_TYPE_UNREGISTERED) {
- dprintk(1, "unknown logical address type\n");
- return -EINVAL;
- }
for (j = 0; j < feature_sz; j++) {
if ((features[j] & 0x80) == 0) {
if (op_is_dev_features)
diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c
index 4961573850d5..b2b3f779592f 100644
--- a/drivers/media/cec/cec-api.c
+++ b/drivers/media/cec/cec-api.c
@@ -147,7 +147,13 @@ static long cec_adap_g_log_addrs(struct cec_adapter *adap,
struct cec_log_addrs log_addrs;
mutex_lock(&adap->lock);
- log_addrs = adap->log_addrs;
+ /*
+ * We use memcpy here instead of assignment since there is a
+ * hole at the end of struct cec_log_addrs that an assignment
+ * might ignore. So when we do copy_to_user() we could leak
+ * one byte of memory.
+ */
+ memcpy(&log_addrs, &adap->log_addrs, sizeof(log_addrs));
if (!adap->is_configured)
memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID,
sizeof(log_addrs.log_addr));
diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c
index 43cfd1dbda01..afca47b97c2a 100644
--- a/drivers/media/common/siano/smsdvb-main.c
+++ b/drivers/media/common/siano/smsdvb-main.c
@@ -1180,12 +1180,15 @@ static int smsdvb_hotplug(struct smscore_device_t *coredev,
rc = dvb_create_media_graph(&client->adapter, true);
if (rc < 0) {
pr_err("dvb_create_media_graph failed %d\n", rc);
- goto client_error;
+ goto media_graph_error;
}
pr_info("DVB interface registered.\n");
return 0;
+media_graph_error:
+ smsdvb_debugfs_release(client);
+
client_error:
dvb_unregister_frontend(&client->frontend);
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index 04dc2f4bc7aa..d8f19a4d214a 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -241,6 +241,7 @@ static void dvb_media_device_free(struct dvb_device *dvbdev)
if (dvbdev->adapter->conn) {
media_device_unregister_entity(dvbdev->adapter->conn);
+ kfree(dvbdev->adapter->conn);
dvbdev->adapter->conn = NULL;
kfree(dvbdev->adapter->conn_pads);
dvbdev->adapter->conn_pads = NULL;
diff --git a/drivers/media/dvb-frontends/sp8870.c b/drivers/media/dvb-frontends/sp8870.c
index 8d31cf3f4f07..3a577788041d 100644
--- a/drivers/media/dvb-frontends/sp8870.c
+++ b/drivers/media/dvb-frontends/sp8870.c
@@ -293,7 +293,9 @@ static int sp8870_set_frontend_parameters(struct dvb_frontend *fe)
sp8870_writereg(state, 0xc05, reg0xc05);
// read status reg in order to clear pending irqs
- sp8870_readreg(state, 0x200);
+ err = sp8870_readreg(state, 0x200);
+ if (err < 0)
+ return err;
// system controller start
sp8870_microcontroller_start(state);
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index 097c42d3f8c2..df0c7243eafe 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -483,10 +483,11 @@ static int tda10071_read_status(struct dvb_frontend *fe, enum fe_status *status)
goto error;
if (dev->delivery_system == SYS_DVBS) {
- dev->dvbv3_ber = buf[0] << 24 | buf[1] << 16 |
- buf[2] << 8 | buf[3] << 0;
- dev->post_bit_error += buf[0] << 24 | buf[1] << 16 |
- buf[2] << 8 | buf[3] << 0;
+ u32 bit_error = buf[0] << 24 | buf[1] << 16 |
+ buf[2] << 8 | buf[3] << 0;
+
+ dev->dvbv3_ber = bit_error;
+ dev->post_bit_error += bit_error;
c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
c->post_bit_error.stat[0].uvalue = dev->post_bit_error;
dev->block_error += buf[4] << 8 | buf[5] << 0;
diff --git a/drivers/media/firewire/firedtv-fw.c b/drivers/media/firewire/firedtv-fw.c
index 92f4112d2e37..2ac9d24d3f0c 100644
--- a/drivers/media/firewire/firedtv-fw.c
+++ b/drivers/media/firewire/firedtv-fw.c
@@ -271,6 +271,10 @@ static int node_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
name_len = fw_csr_string(unit->directory, CSR_MODEL,
name, sizeof(name));
+ if (name_len < 0) {
+ err = name_len;
+ goto fail_free;
+ }
for (i = ARRAY_SIZE(model_names); --i; )
if (strlen(model_names[i]) <= name_len &&
strncmp(name, model_names[i], name_len) == 0)
diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
index 6869bb593a68..4052abeead50 100644
--- a/drivers/media/i2c/adv7511-v4l2.c
+++ b/drivers/media/i2c/adv7511-v4l2.c
@@ -1965,7 +1965,7 @@ static int adv7511_remove(struct i2c_client *client)
adv7511_set_isr(sd, false);
adv7511_init_setup(sd);
- cancel_delayed_work(&state->edid_handler);
+ cancel_delayed_work_sync(&state->edid_handler);
i2c_unregister_device(state->i2c_edid);
if (state->i2c_cec)
i2c_unregister_device(state->i2c_cec);
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index a4b0a89c7e7e..04577d409e63 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -3560,7 +3560,7 @@ static int adv76xx_remove(struct i2c_client *client)
io_write(sd, 0x6e, 0);
io_write(sd, 0x73, 0);
- cancel_delayed_work(&state->delayed_work_enable_hotplug);
+ cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
adv76xx_unregister_clients(to_state(sd));
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 58662ba92d4f..d0ed20652ddb 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -3585,7 +3585,7 @@ static int adv7842_remove(struct i2c_client *client)
struct adv7842_state *state = to_state(sd);
adv7842_irq_enable(sd, false);
- cancel_delayed_work(&state->delayed_work_enable_hotplug);
+ cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
v4l2_device_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
adv7842_unregister_clients(sd);
diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
index 8cc3bdb7f608..0fe8b869245b 100644
--- a/drivers/media/i2c/imx274.c
+++ b/drivers/media/i2c/imx274.c
@@ -1239,6 +1239,8 @@ static int imx274_s_frame_interval(struct v4l2_subdev *sd,
ret = imx274_set_frame_interval(imx274, fi->interval);
if (!ret) {
+ fi->interval = imx274->frame_interval;
+
/*
* exposure time range is decided by frame interval
* need to update it after frame interval changes
@@ -1760,9 +1762,9 @@ static int imx274_set_frame_interval(struct stimx274 *priv,
__func__, frame_interval.numerator,
frame_interval.denominator);
- if (frame_interval.numerator == 0) {
- err = -EINVAL;
- goto fail;
+ if (frame_interval.numerator == 0 || frame_interval.denominator == 0) {
+ frame_interval.denominator = IMX274_DEF_FRAME_RATE;
+ frame_interval.numerator = 1;
}
req_frame_rate = (u32)(frame_interval.denominator
diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
index 12e79f9e32d5..d9a964430609 100644
--- a/drivers/media/i2c/m5mols/m5mols_core.c
+++ b/drivers/media/i2c/m5mols/m5mols_core.c
@@ -768,7 +768,8 @@ static int m5mols_sensor_power(struct m5mols_info *info, bool enable)
ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies);
if (ret) {
- info->set_power(&client->dev, 0);
+ if (info->set_power)
+ info->set_power(&client->dev, 0);
return ret;
}
diff --git a/drivers/media/i2c/max2175.c b/drivers/media/i2c/max2175.c
index 008a082cb8ad..dddc5ef50dd4 100644
--- a/drivers/media/i2c/max2175.c
+++ b/drivers/media/i2c/max2175.c
@@ -511,7 +511,7 @@ static void max2175_set_bbfilter(struct max2175 *ctx)
}
}
-static bool max2175_set_csm_mode(struct max2175 *ctx,
+static int max2175_set_csm_mode(struct max2175 *ctx,
enum max2175_csm_mode new_mode)
{
int ret = max2175_poll_csm_ready(ctx);
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 2023df14f828..aa9c0b7ee7a2 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -2829,8 +2829,8 @@ static int ov5640_probe(struct i2c_client *client,
free_ctrls:
v4l2_ctrl_handler_free(&sensor->ctrls.handler);
entity_cleanup:
- mutex_destroy(&sensor->lock);
media_entity_cleanup(&sensor->sd.entity);
+ mutex_destroy(&sensor->lock);
return ret;
}
@@ -2840,9 +2840,9 @@ static int ov5640_remove(struct i2c_client *client)
struct ov5640_dev *sensor = to_ov5640_dev(sd);
v4l2_async_unregister_subdev(&sensor->sd);
- mutex_destroy(&sensor->lock);
media_entity_cleanup(&sensor->sd.entity);
v4l2_ctrl_handler_free(&sensor->ctrls.handler);
+ mutex_destroy(&sensor->lock);
return 0;
}
diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c
index 53dd30d96e69..62dfdcb6e0bd 100644
--- a/drivers/media/i2c/ov5670.c
+++ b/drivers/media/i2c/ov5670.c
@@ -2081,7 +2081,8 @@ static int ov5670_init_controls(struct ov5670 *ov5670)
/* By default, V4L2_CID_PIXEL_RATE is read only */
ov5670->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov5670_ctrl_ops,
- V4L2_CID_PIXEL_RATE, 0,
+ V4L2_CID_PIXEL_RATE,
+ link_freq_configs[0].pixel_rate,
link_freq_configs[0].pixel_rate,
1,
link_freq_configs[0].pixel_rate);
diff --git a/drivers/media/i2c/ov5695.c b/drivers/media/i2c/ov5695.c
index 5d107c53364d..be242bb8fefc 100644
--- a/drivers/media/i2c/ov5695.c
+++ b/drivers/media/i2c/ov5695.c
@@ -974,16 +974,9 @@ unlock_and_return:
return ret;
}
-/* Calculate the delay in us by clock rate and clock cycles */
-static inline u32 ov5695_cal_delay(u32 cycles)
-{
- return DIV_ROUND_UP(cycles, OV5695_XVCLK_FREQ / 1000 / 1000);
-}
-
static int __ov5695_power_on(struct ov5695 *ov5695)
{
- int ret;
- u32 delay_us;
+ int i, ret;
struct device *dev = &ov5695->client->dev;
ret = clk_prepare_enable(ov5695->xvclk);
@@ -994,21 +987,28 @@ static int __ov5695_power_on(struct ov5695 *ov5695)
gpiod_set_value_cansleep(ov5695->reset_gpio, 1);
- ret = regulator_bulk_enable(OV5695_NUM_SUPPLIES, ov5695->supplies);
- if (ret < 0) {
- dev_err(dev, "Failed to enable regulators\n");
- goto disable_clk;
+ /*
+ * The hardware requires the regulators to be powered on in order,
+ * so enable them one by one.
+ */
+ for (i = 0; i < OV5695_NUM_SUPPLIES; i++) {
+ ret = regulator_enable(ov5695->supplies[i].consumer);
+ if (ret) {
+ dev_err(dev, "Failed to enable %s: %d\n",
+ ov5695->supplies[i].supply, ret);
+ goto disable_reg_clk;
+ }
}
gpiod_set_value_cansleep(ov5695->reset_gpio, 0);
- /* 8192 cycles prior to first SCCB transaction */
- delay_us = ov5695_cal_delay(8192);
- usleep_range(delay_us, delay_us * 2);
+ usleep_range(1000, 1200);
return 0;
-disable_clk:
+disable_reg_clk:
+ for (--i; i >= 0; i--)
+ regulator_disable(ov5695->supplies[i].consumer);
clk_disable_unprepare(ov5695->xvclk);
return ret;
@@ -1016,9 +1016,22 @@ disable_clk:
static void __ov5695_power_off(struct ov5695 *ov5695)
{
+ struct device *dev = &ov5695->client->dev;
+ int i, ret;
+
clk_disable_unprepare(ov5695->xvclk);
gpiod_set_value_cansleep(ov5695->reset_gpio, 1);
- regulator_bulk_disable(OV5695_NUM_SUPPLIES, ov5695->supplies);
+
+ /*
+ * The hardware requires the regulators to be powered off in order,
+ * so disable them one by one.
+ */
+ for (i = OV5695_NUM_SUPPLIES - 1; i >= 0; i--) {
+ ret = regulator_disable(ov5695->supplies[i].consumer);
+ if (ret)
+ dev_err(dev, "Failed to disable %s: %d\n",
+ ov5695->supplies[i].supply, ret);
+ }
}
static int __maybe_unused ov5695_runtime_resume(struct device *dev)
@@ -1288,7 +1301,7 @@ static int ov5695_probe(struct i2c_client *client,
if (clk_get_rate(ov5695->xvclk) != OV5695_XVCLK_FREQ)
dev_warn(dev, "xvclk mismatched, modes are based on 24MHz\n");
- ov5695->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ ov5695->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ov5695->reset_gpio)) {
dev_err(dev, "Failed to get reset-gpios\n");
return -EINVAL;
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 4731e1c72f96..0a434bdce3b3 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -2337,11 +2337,12 @@ smiapp_sysfs_nvm_read(struct device *dev, struct device_attribute *attr,
if (rval < 0) {
if (rval != -EBUSY && rval != -EAGAIN)
pm_runtime_set_active(&client->dev);
- pm_runtime_put(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
return -ENODEV;
}
if (smiapp_read_nvm(sensor, sensor->nvm)) {
+ pm_runtime_put(&client->dev);
dev_err(&client->dev, "nvm read failed\n");
return -ENODEV;
}
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index e4c0a27b636a..041b16965b96 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -919,8 +919,8 @@ static const struct cec_adap_ops tc358743_cec_adap_ops = {
.adap_monitor_all_enable = tc358743_cec_adap_monitor_all_enable,
};
-static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus,
- bool *handled)
+static void tc358743_cec_handler(struct v4l2_subdev *sd, u16 intstatus,
+ bool *handled)
{
struct tc358743_state *state = to_state(sd);
unsigned int cec_rxint, cec_txint;
@@ -953,7 +953,8 @@ static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus,
cec_transmit_attempt_done(state->cec_adap,
CEC_TX_STATUS_ERROR);
}
- *handled = true;
+ if (handled)
+ *handled = true;
}
if ((intstatus & MASK_CEC_RINT) &&
(cec_rxint & MASK_CECRIEND)) {
@@ -968,7 +969,8 @@ static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus,
msg.msg[i] = v & 0xff;
}
cec_received_msg(state->cec_adap, &msg);
- *handled = true;
+ if (handled)
+ *handled = true;
}
i2c_wr16(sd, INTSTATUS,
intstatus & (MASK_CEC_RINT | MASK_CEC_TINT));
@@ -1432,7 +1434,7 @@ static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
#ifdef CONFIG_VIDEO_TC358743_CEC
if (intstatus & (MASK_CEC_RINT | MASK_CEC_TINT)) {
- tc358743_cec_isr(sd, intstatus, handled);
+ tc358743_cec_handler(sd, intstatus, handled);
i2c_wr16(sd, INTSTATUS,
intstatus & (MASK_CEC_RINT | MASK_CEC_TINT));
intstatus &= ~(MASK_CEC_RINT | MASK_CEC_TINT);
@@ -1461,7 +1463,7 @@ static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
static irqreturn_t tc358743_irq_handler(int irq, void *dev_id)
{
struct tc358743_state *state = dev_id;
- bool handled;
+ bool handled = false;
tc358743_isr(&state->sd, 0, &handled);
@@ -2190,7 +2192,7 @@ static int tc358743_remove(struct i2c_client *client)
del_timer_sync(&state->timer);
flush_work(&state->work_i2c_poll);
}
- cancel_delayed_work(&state->delayed_work_enable_hotplug);
+ cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
cec_unregister_adapter(state->cec_adap);
v4l2_async_unregister_subdev(sd);
v4l2_device_unregister_subdev(sd);
diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c
index f27d294dcbef..dd50acc085d8 100644
--- a/drivers/media/i2c/video-i2c.c
+++ b/drivers/media/i2c/video-i2c.c
@@ -105,7 +105,7 @@ static int amg88xx_xfer(struct video_i2c_data *data, char *buf)
return (ret == 2) ? 0 : -EIO;
}
-#if IS_ENABLED(CONFIG_HWMON)
+#if IS_REACHABLE(CONFIG_HWMON)
static const u32 amg88xx_temp_config[] = {
HWMON_T_INPUT,
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index ed518b1f82e4..d04ed438a45d 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -568,6 +568,38 @@ static void media_device_release(struct media_devnode *devnode)
dev_dbg(devnode->parent, "Media device released\n");
}
+static void __media_device_unregister_entity(struct media_entity *entity)
+{
+ struct media_device *mdev = entity->graph_obj.mdev;
+ struct media_link *link, *tmp;
+ struct media_interface *intf;
+ unsigned int i;
+
+ ida_free(&mdev->entity_internal_idx, entity->internal_idx);
+
+ /* Remove all interface links pointing to this entity */
+ list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) {
+ list_for_each_entry_safe(link, tmp, &intf->links, list) {
+ if (link->entity == entity)
+ __media_remove_intf_link(link);
+ }
+ }
+
+ /* Remove all data links that belong to this entity */
+ __media_entity_remove_links(entity);
+
+ /* Remove all pads that belong to this entity */
+ for (i = 0; i < entity->num_pads; i++)
+ media_gobj_destroy(&entity->pads[i].graph_obj);
+
+ /* Remove the entity */
+ media_gobj_destroy(&entity->graph_obj);
+
+ /* invoke entity_notify callbacks to handle entity removal?? */
+
+ entity->graph_obj.mdev = NULL;
+}
+
/**
* media_device_register_entity - Register an entity with a media device
* @mdev: The media device
@@ -625,6 +657,7 @@ int __must_check media_device_register_entity(struct media_device *mdev,
*/
ret = media_graph_walk_init(&new, mdev);
if (ret) {
+ __media_device_unregister_entity(entity);
mutex_unlock(&mdev->graph_mutex);
return ret;
}
@@ -637,38 +670,6 @@ int __must_check media_device_register_entity(struct media_device *mdev,
}
EXPORT_SYMBOL_GPL(media_device_register_entity);
-static void __media_device_unregister_entity(struct media_entity *entity)
-{
- struct media_device *mdev = entity->graph_obj.mdev;
- struct media_link *link, *tmp;
- struct media_interface *intf;
- unsigned int i;
-
- ida_free(&mdev->entity_internal_idx, entity->internal_idx);
-
- /* Remove all interface links pointing to this entity */
- list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) {
- list_for_each_entry_safe(link, tmp, &intf->links, list) {
- if (link->entity == entity)
- __media_remove_intf_link(link);
- }
- }
-
- /* Remove all data links that belong to this entity */
- __media_entity_remove_links(entity);
-
- /* Remove all pads that belong to this entity */
- for (i = 0; i < entity->num_pads; i++)
- media_gobj_destroy(&entity->pads[i].graph_obj);
-
- /* Remove the entity */
- media_gobj_destroy(&entity->graph_obj);
-
- /* invoke entity_notify callbacks to handle entity removal?? */
-
- entity->graph_obj.mdev = NULL;
-}
-
void media_device_unregister_entity(struct media_entity *entity)
{
struct media_device *mdev = entity->graph_obj.mdev;
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index cf05e11da01b..4c042ba6de91 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -4055,11 +4055,13 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
btv->id = dev->device;
if (pci_enable_device(dev)) {
pr_warn("%d: Can't enable device\n", btv->c.nr);
- return -EIO;
+ result = -EIO;
+ goto free_mem;
}
if (pci_set_dma_mask(dev, DMA_BIT_MASK(32))) {
pr_warn("%d: No suitable DMA available\n", btv->c.nr);
- return -EIO;
+ result = -EIO;
+ goto free_mem;
}
if (!request_mem_region(pci_resource_start(dev,0),
pci_resource_len(dev,0),
@@ -4067,7 +4069,8 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
pr_warn("%d: can't request iomem (0x%llx)\n",
btv->c.nr,
(unsigned long long)pci_resource_start(dev, 0));
- return -EBUSY;
+ result = -EBUSY;
+ goto free_mem;
}
pci_set_master(dev);
pci_set_command(dev);
@@ -4253,6 +4256,10 @@ fail0:
release_mem_region(pci_resource_start(btv->c.pci,0),
pci_resource_len(btv->c.pci,0));
pci_disable_device(btv->c.pci);
+
+free_mem:
+ bttvs[btv->c.nr] = NULL;
+ kfree(btv);
return result;
}
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index fd5c52b21436..a1d738969d7b 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -2084,6 +2084,10 @@ static struct {
* 0x1451 is PCI ID for the IOMMU found on Ryzen
*/
{ PCI_VENDOR_ID_AMD, 0x1451 },
+ /* According to sudo lspci -nn,
+ * 0x1423 is the PCI ID for the IOMMU found on Kaveri
+ */
+ { PCI_VENDOR_ID_AMD, 0x1423 },
};
static bool cx23885_does_need_dma_reset(void)
diff --git a/drivers/media/pci/cx23885/cx23888-ir.c b/drivers/media/pci/cx23885/cx23888-ir.c
index 00329f668b59..5177479d13d3 100644
--- a/drivers/media/pci/cx23885/cx23888-ir.c
+++ b/drivers/media/pci/cx23885/cx23888-ir.c
@@ -1178,8 +1178,11 @@ int cx23888_ir_probe(struct cx23885_dev *dev)
return -ENOMEM;
spin_lock_init(&state->rx_kfifo_lock);
- if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE, GFP_KERNEL))
+ if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE,
+ GFP_KERNEL)) {
+ kfree(state);
return -ENOMEM;
+ }
state->dev = dev;
sd = &state->sd;
diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
index 2f0171134f7e..e04fe9f17b7a 100644
--- a/drivers/media/pci/cx25821/cx25821-core.c
+++ b/drivers/media/pci/cx25821/cx25821-core.c
@@ -986,8 +986,10 @@ int cx25821_riscmem_alloc(struct pci_dev *pci,
__le32 *cpu;
dma_addr_t dma = 0;
- if (NULL != risc->cpu && risc->size < size)
+ if (risc->cpu && risc->size < size) {
pci_free_consistent(pci, risc->size, risc->cpu, risc->dma);
+ risc->cpu = NULL;
+ }
if (NULL == risc->cpu) {
cpu = pci_zalloc_consistent(pci, size, &dma);
if (NULL == cpu)
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index ca1a4d8e972e..cdf4d07343a7 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -801,6 +801,7 @@ static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
atomic_dec(&q->bufs_queued);
vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf,
state);
+ q->bufs[i] = NULL;
}
}
}
@@ -1245,29 +1246,15 @@ static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_format *fmt)
{
struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
- struct v4l2_subdev_format format;
- int ret;
-
- if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
- return 0;
- }
- if (fmt->pad == CIO2_PAD_SINK) {
- format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- ret = v4l2_subdev_call(sd, pad, get_fmt, NULL,
- &format);
+ mutex_lock(&q->subdev_lock);
- if (ret)
- return ret;
- /* update colorspace etc */
- q->subdev_fmt.colorspace = format.format.colorspace;
- q->subdev_fmt.ycbcr_enc = format.format.ycbcr_enc;
- q->subdev_fmt.quantization = format.format.quantization;
- q->subdev_fmt.xfer_func = format.format.xfer_func;
- }
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ else
+ fmt->format = q->subdev_fmt;
- fmt->format = q->subdev_fmt;
+ mutex_unlock(&q->subdev_lock);
return 0;
}
@@ -1284,6 +1271,9 @@ static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_format *fmt)
{
struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
+ struct v4l2_mbus_framefmt *mbus;
+ u32 mbus_code = fmt->format.code;
+ unsigned int i;
/*
* Only allow setting sink pad format;
@@ -1292,16 +1282,29 @@ static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
if (fmt->pad == CIO2_PAD_SOURCE)
return cio2_subdev_get_fmt(sd, cfg, fmt);
- if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
- } else {
- /* It's the sink, allow changing frame size */
- q->subdev_fmt.width = fmt->format.width;
- q->subdev_fmt.height = fmt->format.height;
- q->subdev_fmt.code = fmt->format.code;
- fmt->format = q->subdev_fmt;
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ mbus = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ else
+ mbus = &q->subdev_fmt;
+
+ fmt->format.code = formats[0].mbus_code;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ if (formats[i].mbus_code == mbus_code) {
+ fmt->format.code = mbus_code;
+ break;
+ }
}
+ fmt->format.width = min_t(u32, fmt->format.width, CIO2_IMAGE_MAX_WIDTH);
+ fmt->format.height = min_t(u32, fmt->format.height,
+ CIO2_IMAGE_MAX_LENGTH);
+ fmt->format.field = V4L2_FIELD_NONE;
+
+ mutex_lock(&q->subdev_lock);
+ *mbus = fmt->format;
+ mutex_unlock(&q->subdev_lock);
+
return 0;
}
@@ -1549,6 +1552,7 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
/* Initialize miscellaneous variables */
mutex_init(&q->lock);
+ mutex_init(&q->subdev_lock);
/* Initialize formats to default values */
fmt = &q->subdev_fmt;
@@ -1666,6 +1670,7 @@ fail_vdev_media_entity:
fail_subdev_media_entity:
cio2_fbpt_exit(q, &cio2->pci_dev->dev);
fail_fbpt:
+ mutex_destroy(&q->subdev_lock);
mutex_destroy(&q->lock);
return r;
@@ -1679,6 +1684,7 @@ static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
v4l2_device_unregister_subdev(&q->subdev);
media_entity_cleanup(&q->subdev.entity);
cio2_fbpt_exit(q, &cio2->pci_dev->dev);
+ mutex_destroy(&q->subdev_lock);
mutex_destroy(&q->lock);
}
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.h b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
index 240635be7a31..b73c016d8a1b 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.h
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
@@ -334,6 +334,7 @@ struct cio2_queue {
/* Subdev, /dev/v4l-subdevX */
struct v4l2_subdev subdev;
+ struct mutex subdev_lock; /* Serialise acces to subdev_fmt field */
struct media_pad subdev_pads[CIO2_PADS];
struct v4l2_mbus_framefmt subdev_fmt;
atomic_t frame_sequence;
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c b/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c
index f33c0de3e849..019bbc18cede 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c
@@ -184,7 +184,7 @@ int netup_spi_init(struct netup_unidvb_dev *ndev)
struct spi_master *master;
struct netup_spi *nspi;
- master = spi_alloc_master(&ndev->pci_dev->dev,
+ master = devm_spi_alloc_master(&ndev->pci_dev->dev,
sizeof(struct netup_spi));
if (!master) {
dev_err(&ndev->pci_dev->dev,
@@ -217,6 +217,7 @@ int netup_spi_init(struct netup_unidvb_dev *ndev)
ndev->pci_slot,
ndev->pci_func);
if (!spi_new_device(master, &netup_spi_board)) {
+ spi_unregister_master(master);
ndev->spi = NULL;
dev_err(&ndev->pci_dev->dev,
"%s(): unable to create SPI device\n", __func__);
@@ -235,13 +236,13 @@ void netup_spi_release(struct netup_unidvb_dev *ndev)
if (!spi)
return;
+ spi_unregister_master(spi->master);
spin_lock_irqsave(&spi->lock, flags);
reg = readw(&spi->regs->control_stat);
writew(reg | NETUP_SPI_CTRL_IRQ, &spi->regs->control_stat);
reg = readw(&spi->regs->control_stat);
writew(reg & ~NETUP_SPI_CTRL_IMASK, &spi->regs->control_stat);
spin_unlock_irqrestore(&spi->lock, flags);
- spi_unregister_master(spi->master);
ndev->spi = NULL;
}
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index 66acfd35ffc6..8680eb08b654 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -293,8 +293,11 @@ static int empress_init(struct saa7134_dev *dev)
q->lock = &dev->lock;
q->dev = &dev->pci->dev;
err = vb2_queue_init(q);
- if (err)
+ if (err) {
+ video_device_release(dev->empress_dev);
+ dev->empress_dev = NULL;
return err;
+ }
dev->empress_dev->queue = q;
video_set_drvdata(dev->empress_dev, dev);
diff --git a/drivers/media/pci/saa7134/saa7134-tvaudio.c b/drivers/media/pci/saa7134/saa7134-tvaudio.c
index 68d400e1e240..8c3da6f7a60f 100644
--- a/drivers/media/pci/saa7134/saa7134-tvaudio.c
+++ b/drivers/media/pci/saa7134/saa7134-tvaudio.c
@@ -693,7 +693,8 @@ int saa_dsp_writel(struct saa7134_dev *dev, int reg, u32 value)
{
int err;
- audio_dbg(2, "dsp write reg 0x%x = 0x%06x\n", reg << 2, value);
+ audio_dbg(2, "dsp write reg 0x%x = 0x%06x\n",
+ (reg << 2) & 0xffffffff, value);
err = saa_dsp_wait_bit(dev,SAA7135_DSP_RWSTATE_WRR);
if (err < 0)
return err;
diff --git a/drivers/media/pci/saa7146/mxb.c b/drivers/media/pci/saa7146/mxb.c
index 6b5582b7c595..6e25654da256 100644
--- a/drivers/media/pci/saa7146/mxb.c
+++ b/drivers/media/pci/saa7146/mxb.c
@@ -653,16 +653,17 @@ static int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *
struct mxb *mxb = (struct mxb *)dev->ext_priv;
DEB_D("VIDIOC_S_AUDIO %d\n", a->index);
- if (mxb_inputs[mxb->cur_input].audioset & (1 << a->index)) {
- if (mxb->cur_audinput != a->index) {
- mxb->cur_audinput = a->index;
- tea6420_route(mxb, a->index);
- if (mxb->cur_audinput == 0)
- mxb_update_audmode(mxb);
- }
- return 0;
+ if (a->index >= 32 ||
+ !(mxb_inputs[mxb->cur_input].audioset & (1 << a->index)))
+ return -EINVAL;
+
+ if (mxb->cur_audinput != a->index) {
+ mxb->cur_audinput = a->index;
+ tea6420_route(mxb, a->index);
+ if (mxb->cur_audinput == 0)
+ mxb_update_audmode(mxb);
}
- return -EINVAL;
+ return 0;
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
index 32136ebe4f61..962f8eb73b05 100644
--- a/drivers/media/pci/saa7164/saa7164-encoder.c
+++ b/drivers/media/pci/saa7164/saa7164-encoder.c
@@ -1024,7 +1024,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
printk(KERN_ERR "%s() failed (errno = %d), NO PCI configuration\n",
__func__, result);
result = -ENOMEM;
- goto failed;
+ goto fail_pci;
}
/* Establish encoder defaults here */
@@ -1078,7 +1078,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
100000, ENCODER_DEF_BITRATE);
if (hdl->error) {
result = hdl->error;
- goto failed;
+ goto fail_hdl;
}
port->std = V4L2_STD_NTSC_M;
@@ -1096,7 +1096,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
printk(KERN_INFO "%s: can't allocate mpeg device\n",
dev->name);
result = -ENOMEM;
- goto failed;
+ goto fail_hdl;
}
port->v4l_device->ctrl_handler = hdl;
@@ -1107,10 +1107,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
if (result < 0) {
printk(KERN_INFO "%s: can't register mpeg device\n",
dev->name);
- /* TODO: We're going to leak here if we don't dealloc
- The buffers above. The unreg function can't deal wit it.
- */
- goto failed;
+ goto fail_reg;
}
printk(KERN_INFO "%s: registered device video%d [mpeg]\n",
@@ -1132,9 +1129,14 @@ int saa7164_encoder_register(struct saa7164_port *port)
saa7164_api_set_encoder(port);
saa7164_api_get_encoder(port);
+ return 0;
- result = 0;
-failed:
+fail_reg:
+ video_device_release(port->v4l_device);
+ port->v4l_device = NULL;
+fail_hdl:
+ v4l2_ctrl_handler_free(hdl);
+fail_pci:
return result;
}
diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
index 2ac33b5cc454..f06e6d35d846 100644
--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
@@ -410,7 +410,7 @@ int solo_g723_init(struct solo_dev *solo_dev)
ret = snd_ctl_add(card, snd_ctl_new1(&kctl, solo_dev));
if (ret < 0)
- return ret;
+ goto snd_error;
ret = solo_snd_pcm_init(solo_dev);
if (ret < 0)
diff --git a/drivers/media/pci/sta2x11/Kconfig b/drivers/media/pci/sta2x11/Kconfig
index 4407b9f881e4..bd690613fe68 100644
--- a/drivers/media/pci/sta2x11/Kconfig
+++ b/drivers/media/pci/sta2x11/Kconfig
@@ -1,6 +1,7 @@
config STA2X11_VIP
tristate "STA2X11 VIP Video For Linux"
depends on STA2X11 || COMPILE_TEST
+ select GPIOLIB if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_ADV7180 if MEDIA_SUBDRV_AUTOSELECT
select VIDEOBUF2_DMA_CONTIG
depends on PCI && VIDEO_V4L2 && VIRT_TO_BUS
diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c
index d6816effb878..d02b5fd940c1 100644
--- a/drivers/media/pci/ttpci/av7110.c
+++ b/drivers/media/pci/ttpci/av7110.c
@@ -424,14 +424,15 @@ static void debiirq(unsigned long cookie)
case DATA_CI_GET:
{
u8 *data = av7110->debi_virt;
+ u8 data_0 = data[0];
- if ((data[0] < 2) && data[2] == 0xff) {
+ if (data_0 < 2 && data[2] == 0xff) {
int flags = 0;
if (data[5] > 0)
flags |= CA_CI_MODULE_PRESENT;
if (data[5] > 5)
flags |= CA_CI_MODULE_READY;
- av7110->ci_slot[data[0]].flags = flags;
+ av7110->ci_slot[data_0].flags = flags;
} else
ci_get_data(&av7110->ci_rbuffer,
av7110->debi_virt,
diff --git a/drivers/media/pci/ttpci/budget-core.c b/drivers/media/pci/ttpci/budget-core.c
index b3dc45b91101..9b545c743168 100644
--- a/drivers/media/pci/ttpci/budget-core.c
+++ b/drivers/media/pci/ttpci/budget-core.c
@@ -383,20 +383,25 @@ static int budget_register(struct budget *budget)
ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->hw_frontend);
if (ret < 0)
- return ret;
+ goto err_release_dmx;
budget->mem_frontend.source = DMX_MEMORY_FE;
ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->mem_frontend);
if (ret < 0)
- return ret;
+ goto err_release_dmx;
ret = dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, &budget->hw_frontend);
if (ret < 0)
- return ret;
+ goto err_release_dmx;
dvb_net_init(&budget->dvb_adapter, &budget->dvb_net, &dvbdemux->dmx);
return 0;
+
+err_release_dmx:
+ dvb_dmxdev_release(&budget->dmxdev);
+ dvb_dmx_release(&budget->demux);
+ return ret;
}
static void budget_unregister(struct budget *budget)
diff --git a/drivers/media/pci/tw5864/tw5864-video.c b/drivers/media/pci/tw5864/tw5864-video.c
index 6c40e60ac993..b0f8d1532b70 100644
--- a/drivers/media/pci/tw5864/tw5864-video.c
+++ b/drivers/media/pci/tw5864/tw5864-video.c
@@ -776,6 +776,9 @@ static int tw5864_enum_frameintervals(struct file *file, void *priv,
fintv->type = V4L2_FRMIVAL_TYPE_STEPWISE;
ret = tw5864_frameinterval_get(input, &frameinterval);
+ if (ret)
+ return ret;
+
fintv->stepwise.step = frameinterval;
fintv->stepwise.min = frameinterval;
fintv->stepwise.max = frameinterval;
@@ -794,6 +797,9 @@ static int tw5864_g_parm(struct file *file, void *priv,
cp->capability = V4L2_CAP_TIMEPERFRAME;
ret = tw5864_frameinterval_get(input, &cp->timeperframe);
+ if (ret)
+ return ret;
+
cp->timeperframe.numerator *= input->frame_interval;
cp->capturemode = 0;
cp->readbuffers = 2;
diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
index 89a86c19579b..50fc71d0cb9f 100644
--- a/drivers/media/platform/davinci/vpss.c
+++ b/drivers/media/platform/davinci/vpss.c
@@ -514,19 +514,31 @@ static void vpss_exit(void)
static int __init vpss_init(void)
{
+ int ret;
+
if (!request_mem_region(VPSS_CLK_CTRL, 4, "vpss_clock_control"))
return -EBUSY;
oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4);
if (unlikely(!oper_cfg.vpss_regs_base2)) {
- release_mem_region(VPSS_CLK_CTRL, 4);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_ioremap;
}
writel(VPSS_CLK_CTRL_VENCCLKEN |
- VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
+ VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
+
+ ret = platform_driver_register(&vpss_driver);
+ if (ret)
+ goto err_pd_register;
+
+ return 0;
- return platform_driver_register(&vpss_driver);
+err_pd_register:
+ iounmap(oper_cfg.vpss_regs_base2);
+err_ioremap:
+ release_mem_region(VPSS_CLK_CTRL, 4);
+ return ret;
}
subsys_initcall(vpss_init);
module_exit(vpss_exit);
diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c
index 9a48c0f69320..1dbebdc1c2f8 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp.c
@@ -311,8 +311,10 @@ static int fimc_isp_subdev_s_power(struct v4l2_subdev *sd, int on)
if (on) {
ret = pm_runtime_get_sync(&is->pdev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put(&is->pdev->dev);
return ret;
+ }
set_bit(IS_ST_PWR_ON, &is->state);
ret = fimc_is_start_firmware(is);
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
index 70d5f5586a5d..10fe7d2e8790 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -480,7 +480,7 @@ static int fimc_lite_open(struct file *file)
set_bit(ST_FLITE_IN_USE, &fimc->state);
ret = pm_runtime_get_sync(&fimc->pdev->dev);
if (ret < 0)
- goto unlock;
+ goto err_pm;
ret = v4l2_fh_open(file);
if (ret < 0)
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index b5993532831d..3261dc72cc61 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -481,8 +481,10 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
return -ENXIO;
ret = pm_runtime_get_sync(fmd->pmf);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put(fmd->pmf);
return ret;
+ }
fmd->num_sensors = 0;
@@ -1257,6 +1259,7 @@ static int fimc_md_get_pinctrl(struct fimc_md *fmd)
if (IS_ERR(pctl->state_default))
return PTR_ERR(pctl->state_default);
+ /* PINCTRL_STATE_IDLE is optional */
pctl->state_idle = pinctrl_lookup_state(pctl->pinctrl,
PINCTRL_STATE_IDLE);
return 0;
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index b4e28a299e26..efab3ebc6756 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -513,8 +513,10 @@ static int s5pcsis_s_stream(struct v4l2_subdev *sd, int enable)
if (enable) {
s5pcsis_clear_counters(state);
ret = pm_runtime_get_sync(&state->pdev->dev);
- if (ret && ret != 1)
+ if (ret && ret != 1) {
+ pm_runtime_put_noidle(&state->pdev->dev);
return ret;
+ }
}
mutex_lock(&state->lock);
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
index 11429633b2fb..f0bca30a0a80 100644
--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
@@ -579,6 +579,13 @@ static int mtk_jpeg_queue_setup(struct vb2_queue *q,
if (!q_data)
return -EINVAL;
+ if (*num_planes) {
+ for (i = 0; i < *num_planes; i++)
+ if (sizes[i] < q_data->sizeimage[i])
+ return -EINVAL;
+ return 0;
+ }
+
*num_planes = q_data->fmt->colplanes;
for (i = 0; i < q_data->fmt->colplanes; i++) {
sizes[i] = q_data->sizeimage[i];
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
index 79ca03ac449c..3f64119e8c08 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
@@ -103,6 +103,7 @@ int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *mtkdev)
void mtk_vcodec_release_dec_pm(struct mtk_vcodec_dev *dev)
{
pm_runtime_disable(dev->pm.dev);
+ put_device(dev->pm.larbvdec);
}
void mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm)
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index 419e1cb10dc6..f4be4c672d40 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -929,8 +929,11 @@ static int emmaprp_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcdev);
irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
+ if (irq < 0) {
+ ret = irq;
+ goto rel_vdev;
+ }
+
ret = devm_request_irq(&pdev->dev, irq, emmaprp_irq, 0,
dev_name(&pdev->dev), pcdev);
if (ret)
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index addd03b51748..00e52f0b8251 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -2265,8 +2265,10 @@ static int isp_probe(struct platform_device *pdev)
mem = platform_get_resource(pdev, IORESOURCE_MEM, i);
isp->mmio_base[map_idx] =
devm_ioremap_resource(isp->dev, mem);
- if (IS_ERR(isp->mmio_base[map_idx]))
- return PTR_ERR(isp->mmio_base[map_idx]);
+ if (IS_ERR(isp->mmio_base[map_idx])) {
+ ret = PTR_ERR(isp->mmio_base[map_idx]);
+ goto error;
+ }
}
ret = isp_get_clocks(isp);
diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
index 591c6de498f8..20857ae42a77 100644
--- a/drivers/media/platform/omap3isp/isppreview.c
+++ b/drivers/media/platform/omap3isp/isppreview.c
@@ -2290,7 +2290,7 @@ static int preview_init_entities(struct isp_prev_device *prev)
me->ops = &preview_media_ops;
ret = media_entity_pads_init(me, PREV_PADS_NUM, pads);
if (ret < 0)
- return ret;
+ goto error_handler_free;
preview_init_formats(sd, NULL);
@@ -2323,6 +2323,8 @@ error_video_out:
omap3isp_video_cleanup(&prev->video_in);
error_video_in:
media_entity_cleanup(&prev->subdev.entity);
+error_handler_free:
+ v4l2_ctrl_handler_free(&prev->ctrls);
return ret;
}
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index 406ac673ad84..0281b8e53fef 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -1452,6 +1452,9 @@ static int pxac_vb2_prepare(struct vb2_buffer *vb)
struct pxa_camera_dev *pcdev = vb2_get_drv_priv(vb->vb2_queue);
struct pxa_buffer *buf = vb2_to_pxa_buffer(vb);
int ret = 0;
+#ifdef DEBUG
+ int i;
+#endif
switch (pcdev->channels) {
case 1:
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c
index 008afb85023b..3c5b9082ad72 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.c
@@ -176,8 +176,10 @@ static int csiphy_set_power(struct v4l2_subdev *sd, int on)
int ret;
ret = pm_runtime_get_sync(dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_sync(dev);
return ret;
+ }
ret = csiphy_set_clock_rates(csiphy);
if (ret < 0) {
diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c
index c9bb0d023db4..e81ebeb0506e 100644
--- a/drivers/media/platform/qcom/camss/camss-video.c
+++ b/drivers/media/platform/qcom/camss/camss-video.c
@@ -901,6 +901,7 @@ int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev,
video->nformats = ARRAY_SIZE(formats_rdi_8x96);
}
} else {
+ ret = -EINVAL;
goto error_video_register;
}
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index 60069869596c..168f5af6abcc 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -321,8 +321,10 @@ static int venus_probe(struct platform_device *pdev)
goto err_dev_unregister;
ret = pm_runtime_put_sync(dev);
- if (ret)
+ if (ret) {
+ pm_runtime_get_noresume(dev);
goto err_dev_unregister;
+ }
return 0;
@@ -333,6 +335,7 @@ err_core_deinit:
err_venus_shutdown:
venus_shutdown(dev);
err_runtime_disable:
+ pm_runtime_put_noidle(dev);
pm_runtime_set_suspended(dev);
pm_runtime_disable(dev);
hfi_destroy(core);
diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
index 2293d936e49c..7f515a4b9bd1 100644
--- a/drivers/media/platform/qcom/venus/hfi_parser.c
+++ b/drivers/media/platform/qcom/venus/hfi_parser.c
@@ -181,6 +181,7 @@ static void parse_codecs(struct venus_core *core, void *data)
if (IS_V1(core)) {
core->dec_codecs &= ~HFI_VIDEO_CODEC_HEVC;
core->dec_codecs &= ~HFI_VIDEO_CODEC_SPARK;
+ core->enc_codecs &= ~HFI_VIDEO_CODEC_HEVC;
}
}
diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c
index 43c78620c9d8..05c712e00a2a 100644
--- a/drivers/media/platform/rcar-fcp.c
+++ b/drivers/media/platform/rcar-fcp.c
@@ -8,6 +8,7 @@
*/
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
@@ -21,6 +22,7 @@
struct rcar_fcp_device {
struct list_head list;
struct device *dev;
+ struct device_dma_parameters dma_parms;
};
static LIST_HEAD(fcp_devices);
@@ -101,8 +103,10 @@ int rcar_fcp_enable(struct rcar_fcp_device *fcp)
return 0;
ret = pm_runtime_get_sync(fcp->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(fcp->dev);
return ret;
+ }
return 0;
}
@@ -136,6 +140,9 @@ static int rcar_fcp_probe(struct platform_device *pdev)
fcp->dev = &pdev->dev;
+ fcp->dev->dma_parms = &fcp->dma_parms;
+ dma_set_max_seg_size(fcp->dev, DMA_BIT_MASK(32));
+
pm_runtime_enable(&pdev->dev);
mutex_lock(&fcp_lock);
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
index 92323310f735..70a8cc433a03 100644
--- a/drivers/media/platform/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/rcar-vin/rcar-dma.c
@@ -1323,8 +1323,10 @@ int rvin_set_channel_routing(struct rvin_dev *vin, u8 chsel)
int ret;
ret = pm_runtime_get_sync(vin->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(vin->dev);
return ret;
+ }
/* Make register writes take effect immediately. */
vnmc = rvin_read(vin, VNMC_REG);
diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
index b677d014e7ba..81413ab52475 100644
--- a/drivers/media/platform/rcar_drif.c
+++ b/drivers/media/platform/rcar_drif.c
@@ -912,7 +912,6 @@ static int rcar_drif_g_fmt_sdr_cap(struct file *file, void *priv,
{
struct rcar_drif_sdr *sdr = video_drvdata(file);
- memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
f->fmt.sdr.pixelformat = sdr->fmt->pixelformat;
f->fmt.sdr.buffersize = sdr->fmt->buffersize;
diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
index 5a30f1d84fe1..2bd5898a6204 100644
--- a/drivers/media/platform/rcar_fdp1.c
+++ b/drivers/media/platform/rcar_fdp1.c
@@ -2368,7 +2368,7 @@ static int fdp1_probe(struct platform_device *pdev)
dprintk(fdp1, "FDP1 Version R-Car H3\n");
break;
case FD1_IP_M3N:
- dprintk(fdp1, "FDP1 Version R-Car M3N\n");
+ dprintk(fdp1, "FDP1 Version R-Car M3-N\n");
break;
case FD1_IP_E3:
dprintk(fdp1, "FDP1 Version R-Car E3\n");
diff --git a/drivers/media/platform/rockchip/rga/rga-buf.c b/drivers/media/platform/rockchip/rga/rga-buf.c
index 356821c2dacf..0932f1445dea 100644
--- a/drivers/media/platform/rockchip/rga/rga-buf.c
+++ b/drivers/media/platform/rockchip/rga/rga-buf.c
@@ -89,6 +89,7 @@ static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count)
ret = pm_runtime_get_sync(rga->dev);
if (ret < 0) {
+ pm_runtime_put_noidle(rga->dev);
rga_buf_return_buffers(q, VB2_BUF_STATE_QUEUED);
return ret;
}
diff --git a/drivers/media/platform/rockchip/rga/rga-hw.c b/drivers/media/platform/rockchip/rga/rga-hw.c
index 96d1b1b3fe8e..681de42f12e9 100644
--- a/drivers/media/platform/rockchip/rga/rga-hw.c
+++ b/drivers/media/platform/rockchip/rga/rga-hw.c
@@ -208,22 +208,25 @@ static void rga_cmd_set_trans_info(struct rga_ctx *ctx)
dst_info.data.format = ctx->out.fmt->hw_format;
dst_info.data.swap = ctx->out.fmt->color_swap;
- if (ctx->in.fmt->hw_format >= RGA_COLOR_FMT_YUV422SP) {
- if (ctx->out.fmt->hw_format < RGA_COLOR_FMT_YUV422SP) {
- switch (ctx->in.colorspace) {
- case V4L2_COLORSPACE_REC709:
- src_info.data.csc_mode =
- RGA_SRC_CSC_MODE_BT709_R0;
- break;
- default:
- src_info.data.csc_mode =
- RGA_SRC_CSC_MODE_BT601_R0;
- break;
- }
+ /*
+ * CSC mode must only be set when the colorspace families differ between
+ * input and output. It must remain unset (zeroed) if both are the same.
+ */
+
+ if (RGA_COLOR_FMT_IS_YUV(ctx->in.fmt->hw_format) &&
+ RGA_COLOR_FMT_IS_RGB(ctx->out.fmt->hw_format)) {
+ switch (ctx->in.colorspace) {
+ case V4L2_COLORSPACE_REC709:
+ src_info.data.csc_mode = RGA_SRC_CSC_MODE_BT709_R0;
+ break;
+ default:
+ src_info.data.csc_mode = RGA_SRC_CSC_MODE_BT601_R0;
+ break;
}
}
- if (ctx->out.fmt->hw_format >= RGA_COLOR_FMT_YUV422SP) {
+ if (RGA_COLOR_FMT_IS_RGB(ctx->in.fmt->hw_format) &&
+ RGA_COLOR_FMT_IS_YUV(ctx->out.fmt->hw_format)) {
switch (ctx->out.colorspace) {
case V4L2_COLORSPACE_REC709:
dst_info.data.csc_mode = RGA_SRC_CSC_MODE_BT709_R0;
diff --git a/drivers/media/platform/rockchip/rga/rga-hw.h b/drivers/media/platform/rockchip/rga/rga-hw.h
index ca3c204abe42..3e4b70eb9ced 100644
--- a/drivers/media/platform/rockchip/rga/rga-hw.h
+++ b/drivers/media/platform/rockchip/rga/rga-hw.h
@@ -103,6 +103,11 @@
#define RGA_COLOR_FMT_CP_8BPP 15
#define RGA_COLOR_FMT_MASK 15
+#define RGA_COLOR_FMT_IS_YUV(fmt) \
+ (((fmt) >= RGA_COLOR_FMT_YUV422SP) && ((fmt) < RGA_COLOR_FMT_CP_1BPP))
+#define RGA_COLOR_FMT_IS_RGB(fmt) \
+ ((fmt) < RGA_COLOR_FMT_YUV422SP)
+
#define RGA_COLOR_NONE_SWAP 0
#define RGA_COLOR_RB_SWAP 1
#define RGA_COLOR_ALPHA_SWAP 2
diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
index 79bc0ef6bb41..8d8ed72bd0aa 100644
--- a/drivers/media/platform/s3c-camif/camif-core.c
+++ b/drivers/media/platform/s3c-camif/camif-core.c
@@ -476,7 +476,7 @@ static int s3c_camif_probe(struct platform_device *pdev)
ret = camif_media_dev_init(camif);
if (ret < 0)
- goto err_alloc;
+ goto err_pm;
ret = camif_register_sensor(camif);
if (ret < 0)
@@ -510,10 +510,9 @@ err_sens:
media_device_unregister(&camif->media_dev);
media_device_cleanup(&camif->media_dev);
camif_unregister_media_entities(camif);
-err_alloc:
+err_pm:
pm_runtime_put(dev);
pm_runtime_disable(dev);
-err_pm:
camif_clk_put(camif);
err_clk:
s3c_camif_unregister_subdev(camif);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
index 5e080f32b0e8..95abf2bd7eba 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
@@ -83,8 +83,10 @@ int s5p_mfc_power_on(void)
int i, ret = 0;
ret = pm_runtime_get_sync(pm->device);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(pm->device);
return ret;
+ }
/* clock control */
for (i = 0; i < pm->num_clocks; i++) {
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index 40c4eef71c34..00f6e3f06dac 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -1371,7 +1371,7 @@ static int bdisp_probe(struct platform_device *pdev)
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "failed to set PM\n");
- goto err_dbg;
+ goto err_pm;
}
/* Filters */
@@ -1399,7 +1399,6 @@ err_filter:
bdisp_hw_free_filters(bdisp->dev);
err_pm:
pm_runtime_put(dev);
-err_dbg:
bdisp_debugfs_remove(bdisp);
err_v4l2:
v4l2_device_unregister(&bdisp->v4l2_dev);
diff --git a/drivers/media/platform/sti/delta/delta-v4l2.c b/drivers/media/platform/sti/delta/delta-v4l2.c
index 0b42acd4e3a6..53dc6da2b09e 100644
--- a/drivers/media/platform/sti/delta/delta-v4l2.c
+++ b/drivers/media/platform/sti/delta/delta-v4l2.c
@@ -954,8 +954,10 @@ static void delta_run_work(struct work_struct *work)
/* enable the hardware */
if (!dec->pm) {
ret = delta_get_sync(ctx);
- if (ret)
+ if (ret) {
+ delta_put_autosuspend(ctx);
goto err;
+ }
}
/* decode this access unit */
diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c
index 7917fd2c4bd4..d826c011c095 100644
--- a/drivers/media/platform/sti/hva/hva-hw.c
+++ b/drivers/media/platform/sti/hva/hva-hw.c
@@ -272,6 +272,7 @@ static unsigned long int hva_hw_get_ip_version(struct hva_dev *hva)
if (pm_runtime_get_sync(dev) < 0) {
dev_err(dev, "%s failed to get pm_runtime\n", HVA_PREFIX);
+ pm_runtime_put_noidle(dev);
mutex_unlock(&hva->protect_mutex);
return -EFAULT;
}
@@ -392,7 +393,7 @@ int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "%s failed to set PM\n", HVA_PREFIX);
- goto err_clk;
+ goto err_pm;
}
/* check IP hardware version */
@@ -557,6 +558,7 @@ void hva_hw_dump_regs(struct hva_dev *hva, struct seq_file *s)
if (pm_runtime_get_sync(dev) < 0) {
seq_puts(s, "Cannot wake up IP\n");
+ pm_runtime_put_noidle(dev);
mutex_unlock(&hva->protect_mutex);
return;
}
diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
index 18d0b5641789..ee1a21179767 100644
--- a/drivers/media/platform/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/stm32/stm32-dcmi.c
@@ -587,7 +587,7 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
if (ret < 0) {
dev_err(dcmi->dev, "%s: Failed to start streaming, cannot get sync (%d)\n",
__func__, ret);
- goto err_release_buffers;
+ goto err_pm_put;
}
/* Enable stream on the sub device */
@@ -682,8 +682,6 @@ err_subdev_streamoff:
err_pm_put:
pm_runtime_put(dcmi->dev);
-
-err_release_buffers:
spin_lock_irq(&dcmi->irqlock);
/*
* Return all buffers to vb2 in QUEUED state.
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
index d1febe5baa6d..d945323fc437 100644
--- a/drivers/media/platform/ti-vpe/cal.c
+++ b/drivers/media/platform/ti-vpe/cal.c
@@ -541,16 +541,16 @@ static void enable_irqs(struct cal_ctx *ctx)
static void disable_irqs(struct cal_ctx *ctx)
{
+ u32 val;
+
/* Disable IRQ_WDMA_END 0/1 */
- reg_write_field(ctx->dev,
- CAL_HL_IRQENABLE_CLR(2),
- CAL_HL_IRQ_CLEAR,
- CAL_HL_IRQ_MASK(ctx->csi2_port));
+ val = 0;
+ set_field(&val, CAL_HL_IRQ_CLEAR, CAL_HL_IRQ_MASK(ctx->csi2_port));
+ reg_write(ctx->dev, CAL_HL_IRQENABLE_CLR(2), val);
/* Disable IRQ_WDMA_START 0/1 */
- reg_write_field(ctx->dev,
- CAL_HL_IRQENABLE_CLR(3),
- CAL_HL_IRQ_CLEAR,
- CAL_HL_IRQ_MASK(ctx->csi2_port));
+ val = 0;
+ set_field(&val, CAL_HL_IRQ_CLEAR, CAL_HL_IRQ_MASK(ctx->csi2_port));
+ reg_write(ctx->dev, CAL_HL_IRQENABLE_CLR(3), val);
/* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */
reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0);
}
@@ -684,12 +684,13 @@ static void pix_proc_config(struct cal_ctx *ctx)
}
static void cal_wr_dma_config(struct cal_ctx *ctx,
- unsigned int width)
+ unsigned int width, unsigned int height)
{
u32 val;
val = reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port));
set_field(&val, ctx->csi2_port, CAL_WR_DMA_CTRL_CPORT_MASK);
+ set_field(&val, height, CAL_WR_DMA_CTRL_YSIZE_MASK);
set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT,
CAL_WR_DMA_CTRL_DTAG_MASK);
set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST,
@@ -1315,7 +1316,8 @@ static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
csi2_lane_config(ctx);
csi2_ctx_config(ctx);
pix_proc_config(ctx);
- cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline);
+ cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline,
+ ctx->v_fmt.fmt.pix.height);
cal_wr_dma_addr(ctx, addr);
csi2_ppi_enable(ctx);
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index a285b9db7ee8..70a8371b7e9a 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -2451,6 +2451,8 @@ static int vpe_runtime_get(struct platform_device *pdev)
r = pm_runtime_get_sync(&pdev->dev);
WARN_ON(r < 0);
+ if (r)
+ pm_runtime_put_noidle(&pdev->dev);
return r < 0 ? r : 0;
}
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index 31db363602e5..b603ca412387 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -174,13 +174,13 @@ static const u8 vivid_hdmi_edid[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x7b,
- 0x02, 0x03, 0x3f, 0xf0, 0x51, 0x61, 0x60, 0x5f,
+ 0x02, 0x03, 0x3f, 0xf1, 0x51, 0x61, 0x60, 0x5f,
0x5e, 0x5d, 0x10, 0x1f, 0x04, 0x13, 0x22, 0x21,
0x20, 0x05, 0x14, 0x02, 0x11, 0x01, 0x23, 0x09,
0x07, 0x07, 0x83, 0x01, 0x00, 0x00, 0x6d, 0x03,
0x0c, 0x00, 0x10, 0x00, 0x00, 0x3c, 0x21, 0x00,
0x60, 0x01, 0x02, 0x03, 0x67, 0xd8, 0x5d, 0xc4,
- 0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xea, 0xe3,
+ 0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xca, 0xe3,
0x05, 0x00, 0x00, 0xe3, 0x06, 0x01, 0x00, 0x4d,
0xd0, 0x00, 0xa0, 0xf0, 0x70, 0x3e, 0x80, 0x30,
0x20, 0x35, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00,
@@ -189,7 +189,7 @@ static const u8 vivid_hdmi_edid[256] = {
0x00, 0x00, 0x1a, 0x1a, 0x1d, 0x00, 0x80, 0x51,
0xd0, 0x1c, 0x20, 0x40, 0x80, 0x35, 0x00, 0xc0,
0x1c, 0x32, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x82,
};
static int vidioc_querycap(struct file *file, void *priv,
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
index 0f909500a0b8..ecd9e36ef3f6 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -998,7 +998,7 @@ int vivid_vid_out_s_fbuf(struct file *file, void *fh,
return -EINVAL;
}
dev->fbuf_out_flags &= ~(chroma_flags | alpha_flags);
- dev->fbuf_out_flags = a->flags & (chroma_flags | alpha_flags);
+ dev->fbuf_out_flags |= a->flags & (chroma_flags | alpha_flags);
return 0;
}
diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c
index a5634ca85a31..a07caf981e15 100644
--- a/drivers/media/platform/vsp1/vsp1_dl.c
+++ b/drivers/media/platform/vsp1/vsp1_dl.c
@@ -431,6 +431,8 @@ vsp1_dl_cmd_pool_create(struct vsp1_device *vsp1, enum vsp1_extcmd_type type,
if (!pool)
return NULL;
+ pool->vsp1 = vsp1;
+
spin_lock_init(&pool->lock);
INIT_LIST_HEAD(&pool->free);
diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c
index 8d86f618ec77..8824c4ce67f1 100644
--- a/drivers/media/platform/vsp1/vsp1_drm.c
+++ b/drivers/media/platform/vsp1/vsp1_drm.c
@@ -243,7 +243,7 @@ static int vsp1_du_pipeline_setup_brx(struct vsp1_device *vsp1,
brx = &vsp1->bru->entity;
else if (pipe->brx && !drm_pipe->force_brx_release)
brx = pipe->brx;
- else if (!vsp1->bru->entity.pipe)
+ else if (vsp1_feature(vsp1, VSP1_HAS_BRU) && !vsp1->bru->entity.pipe)
brx = &vsp1->bru->entity;
else
brx = &vsp1->brs->entity;
@@ -460,9 +460,9 @@ static int vsp1_du_pipeline_setup_inputs(struct vsp1_device *vsp1,
* make sure it is present in the pipeline's list of entities if it
* wasn't already.
*/
- if (!use_uif) {
+ if (drm_pipe->uif && !use_uif) {
drm_pipe->uif->pipe = NULL;
- } else if (!drm_pipe->uif->pipe) {
+ } else if (drm_pipe->uif && !drm_pipe->uif->pipe) {
drm_pipe->uif->pipe = pipe;
list_add_tail(&drm_pipe->uif->list_pipe, &pipe->entities);
}
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index b6619c9c18bb..022f84569d0e 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -562,7 +562,12 @@ int vsp1_device_get(struct vsp1_device *vsp1)
int ret;
ret = pm_runtime_get_sync(vsp1->dev);
- return ret < 0 ? ret : 0;
+ if (ret < 0) {
+ pm_runtime_put_noidle(vsp1->dev);
+ return ret;
+ }
+
+ return 0;
}
/*
@@ -845,12 +850,12 @@ static int vsp1_probe(struct platform_device *pdev)
/* Configure device parameters based on the version register. */
pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = vsp1_device_get(vsp1);
if (ret < 0)
goto done;
vsp1->version = vsp1_read(vsp1, VI6_IP_VERSION);
- pm_runtime_put_sync(&pdev->dev);
+ vsp1_device_put(vsp1);
for (i = 0; i < ARRAY_SIZE(vsp1_device_infos); ++i) {
if ((vsp1->version & VI6_IP_VERSION_MODEL_MASK) ==
@@ -877,8 +882,10 @@ static int vsp1_probe(struct platform_device *pdev)
}
done:
- if (ret)
+ if (ret) {
pm_runtime_disable(&pdev->dev);
+ rcar_fcp_put(vsp1->fcp);
+ }
return ret;
}
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index 8e82610ffaad..01c82da8e9aa 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -845,6 +845,10 @@ static int ati_remote_probe(struct usb_interface *interface,
err("%s: endpoint_in message size==0? \n", __func__);
return -ENODEV;
}
+ if (!usb_endpoint_is_int_out(endpoint_out)) {
+ err("%s: Unexpected endpoint_out\n", __func__);
+ return -ENODEV;
+ }
ati_remote = kzalloc(sizeof (struct ati_remote), GFP_KERNEL);
rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE);
diff --git a/drivers/media/rc/gpio-ir-tx.c b/drivers/media/rc/gpio-ir-tx.c
index cd476cab9782..4e70b67ccd18 100644
--- a/drivers/media/rc/gpio-ir-tx.c
+++ b/drivers/media/rc/gpio-ir-tx.c
@@ -87,13 +87,8 @@ static int gpio_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
// space
edge = ktime_add_us(edge, txbuf[i]);
delta = ktime_us_delta(edge, ktime_get());
- if (delta > 10) {
- spin_unlock_irqrestore(&gpio_ir->lock, flags);
- usleep_range(delta, delta + 10);
- spin_lock_irqsave(&gpio_ir->lock, flags);
- } else if (delta > 0) {
+ if (delta > 0)
udelay(delta);
- }
} else {
// pulse
ktime_t last = ktime_add_us(edge, txbuf[i]);
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index de77d22c30a7..18f3718315a8 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -285,8 +285,14 @@ static irqreturn_t ite_cir_isr(int irq, void *data)
/* read the interrupt flags */
iflags = dev->params.get_irq_causes(dev);
+ /* Check for RX overflow */
+ if (iflags & ITE_IRQ_RX_FIFO_OVERRUN) {
+ dev_warn(&dev->rdev->dev, "receive overflow\n");
+ ir_raw_event_reset(dev->rdev);
+ }
+
/* check for the receive interrupt */
- if (iflags & (ITE_IRQ_RX_FIFO | ITE_IRQ_RX_FIFO_OVERRUN)) {
+ if (iflags & ITE_IRQ_RX_FIFO) {
/* read the FIFO bytes */
rx_bytes =
dev->params.get_rx_bytes(dev, rx_buf,
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index f1dfb8409432..845583e2af4d 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -685,11 +685,18 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
data[0], data[1]);
break;
case MCE_RSP_EQIRCFS:
+ if (!data[0] && !data[1]) {
+ dev_dbg(dev, "%s: no carrier", inout);
+ break;
+ }
+ // prescaler should make sense
+ if (data[0] > 8)
+ break;
period = DIV_ROUND_CLOSEST((1U << data[0] * 2) *
(data[1] + 1), 10);
if (!period)
break;
- carrier = (1000 * 1000) / period;
+ carrier = USEC_PER_SEC / period;
dev_dbg(dev, "%s carrier of %u Hz (period %uus)",
inout, carrier, period);
break;
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index c30affbd43a9..6ea6038b9d36 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -1245,6 +1245,10 @@ static ssize_t store_protocols(struct device *device,
}
mutex_lock(&dev->lock);
+ if (!dev->registered) {
+ mutex_unlock(&dev->lock);
+ return -ENODEV;
+ }
old_protocols = *current_protocols;
new_protocols = old_protocols;
@@ -1383,6 +1387,10 @@ static ssize_t store_filter(struct device *device,
return -EINVAL;
mutex_lock(&dev->lock);
+ if (!dev->registered) {
+ mutex_unlock(&dev->lock);
+ return -ENODEV;
+ }
new_filter = *filter;
if (fattr->mask)
@@ -1497,6 +1505,10 @@ static ssize_t store_wakeup_protocols(struct device *device,
int i;
mutex_lock(&dev->lock);
+ if (!dev->registered) {
+ mutex_unlock(&dev->lock);
+ return -ENODEV;
+ }
allowed = dev->allowed_wakeup_protocols;
@@ -1556,25 +1568,25 @@ static void rc_dev_release(struct device *device)
kfree(dev);
}
-#define ADD_HOTPLUG_VAR(fmt, val...) \
- do { \
- int err = add_uevent_var(env, fmt, val); \
- if (err) \
- return err; \
- } while (0)
-
static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env)
{
struct rc_dev *dev = to_rc_dev(device);
+ int ret = 0;
- if (dev->rc_map.name)
- ADD_HOTPLUG_VAR("NAME=%s", dev->rc_map.name);
- if (dev->driver_name)
- ADD_HOTPLUG_VAR("DRV_NAME=%s", dev->driver_name);
- if (dev->device_name)
- ADD_HOTPLUG_VAR("DEV_NAME=%s", dev->device_name);
+ mutex_lock(&dev->lock);
- return 0;
+ if (!dev->registered)
+ ret = -ENODEV;
+ if (ret == 0 && dev->rc_map.name)
+ ret = add_uevent_var(env, "NAME=%s", dev->rc_map.name);
+ if (ret == 0 && dev->driver_name)
+ ret = add_uevent_var(env, "DRV_NAME=%s", dev->driver_name);
+ if (ret == 0 && dev->device_name)
+ ret = add_uevent_var(env, "DEV_NAME=%s", dev->device_name);
+
+ mutex_unlock(&dev->lock);
+
+ return ret;
}
/*
@@ -1863,6 +1875,8 @@ int rc_register_device(struct rc_dev *dev)
goto out_raw;
}
+ dev->registered = true;
+
rc = device_add(&dev->dev);
if (rc)
goto out_rx_free;
@@ -1872,8 +1886,6 @@ int rc_register_device(struct rc_dev *dev)
dev->device_name ?: "Unspecified device", path ?: "N/A");
kfree(path);
- dev->registered = true;
-
/*
* once the the input device is registered in rc_setup_rx_device,
* userspace can open the input device and rc_open() will be called
@@ -1958,14 +1970,14 @@ void rc_unregister_device(struct rc_dev *dev)
del_timer_sync(&dev->timer_keyup);
del_timer_sync(&dev->timer_repeat);
- rc_free_rx_device(dev);
-
mutex_lock(&dev->lock);
if (dev->users && dev->close)
dev->close(dev);
dev->registered = false;
mutex_unlock(&dev->lock);
+ rc_free_rx_device(dev);
+
/*
* lirc device should be freed with dev->registered = false, so
* that userspace polling will get notified.
diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c
index f500cea228a9..0114e81fa6fa 100644
--- a/drivers/media/rc/sunxi-cir.c
+++ b/drivers/media/rc/sunxi-cir.c
@@ -129,6 +129,8 @@ static irqreturn_t sunxi_ir_irq(int irqno, void *dev_id)
} else if (status & REG_RXINT_RPEI_EN) {
ir_raw_event_set_idle(ir->rc, true);
ir_raw_event_handle(ir->rc);
+ } else {
+ ir_raw_event_handle(ir->rc);
}
spin_unlock(&ir->ir_lock);
diff --git a/drivers/media/tuners/m88rs6000t.c b/drivers/media/tuners/m88rs6000t.c
index 3df2f23a40be..3fe13de48777 100644
--- a/drivers/media/tuners/m88rs6000t.c
+++ b/drivers/media/tuners/m88rs6000t.c
@@ -534,7 +534,7 @@ static int m88rs6000t_get_rf_strength(struct dvb_frontend *fe, u16 *strength)
PGA2_cri = PGA2_GC >> 2;
PGA2_crf = PGA2_GC & 0x03;
- for (i = 0; i <= RF_GC; i++)
+ for (i = 0; i <= RF_GC && i < ARRAY_SIZE(RFGS); i++)
RFG += RFGS[i];
if (RF_GC == 0)
@@ -546,12 +546,12 @@ static int m88rs6000t_get_rf_strength(struct dvb_frontend *fe, u16 *strength)
if (RF_GC == 3)
RFG += 100;
- for (i = 0; i <= IF_GC; i++)
+ for (i = 0; i <= IF_GC && i < ARRAY_SIZE(IFGS); i++)
IFG += IFGS[i];
TIAG = TIA_GC * TIA_GS;
- for (i = 0; i <= BB_GC; i++)
+ for (i = 0; i <= BB_GC && i < ARRAY_SIZE(BBGS); i++)
BBG += BBGS[i];
PGA2G = PGA2_cri * PGA2_cri_GS + PGA2_crf * PGA2_crf_GS;
diff --git a/drivers/media/tuners/qm1d1c0042.c b/drivers/media/tuners/qm1d1c0042.c
index 83ca5dc047ea..baa9950783b6 100644
--- a/drivers/media/tuners/qm1d1c0042.c
+++ b/drivers/media/tuners/qm1d1c0042.c
@@ -343,8 +343,10 @@ static int qm1d1c0042_init(struct dvb_frontend *fe)
if (val == reg_initval[reg_index][0x00])
break;
}
- if (reg_index >= QM1D1C0042_NUM_REG_ROWS)
+ if (reg_index >= QM1D1C0042_NUM_REG_ROWS) {
+ ret = -EINVAL;
goto failed;
+ }
memcpy(state->regs, reg_initval[reg_index], QM1D1C0042_NUM_REGS);
usleep_range(2000, 3000);
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index a08d8fe2bb1b..13770b038048 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -84,24 +84,23 @@ static int si2157_init(struct dvb_frontend *fe)
struct si2157_cmd cmd;
const struct firmware *fw;
const char *fw_name;
- unsigned int uitmp, chip_id;
+ unsigned int chip_id, xtal_trim;
dev_dbg(&client->dev, "\n");
- /* Returned IF frequency is garbage when firmware is not running */
- memcpy(cmd.args, "\x15\x00\x06\x07", 4);
+ /* Try to get Xtal trim property, to verify tuner still running */
+ memcpy(cmd.args, "\x15\x00\x04\x02", 4);
cmd.wlen = 4;
cmd.rlen = 4;
ret = si2157_cmd_execute(client, &cmd);
- if (ret)
- goto err;
- uitmp = cmd.args[2] << 0 | cmd.args[3] << 8;
- dev_dbg(&client->dev, "if_frequency kHz=%u\n", uitmp);
+ xtal_trim = cmd.args[2] | (cmd.args[3] << 8);
- if (uitmp == dev->if_frequency / 1000)
+ if (ret == 0 && xtal_trim < 16)
goto warm;
+ dev->if_frequency = 0; /* we no longer know current tuner state */
+
/* power up */
if (dev->chiptype == SI2157_CHIPTYPE_SI2146) {
memcpy(cmd.args, "\xc0\x05\x01\x00\x00\x0b\x00\x00\x01", 9);
diff --git a/drivers/media/tuners/tuner-simple.c b/drivers/media/tuners/tuner-simple.c
index 29c1473f2e9f..81e24cf0c8b8 100644
--- a/drivers/media/tuners/tuner-simple.c
+++ b/drivers/media/tuners/tuner-simple.c
@@ -499,7 +499,7 @@ static int simple_radio_bandswitch(struct dvb_frontend *fe, u8 *buffer)
case TUNER_TENA_9533_DI:
case TUNER_YMEC_TVF_5533MF:
tuner_dbg("This tuner doesn't have FM. Most cards have a TEA5767 for FM\n");
- return 0;
+ return -EINVAL;
case TUNER_PHILIPS_FM1216ME_MK3:
case TUNER_PHILIPS_FM1236_MK3:
case TUNER_PHILIPS_FMD1216ME_MK3:
@@ -701,7 +701,8 @@ static int simple_set_radio_freq(struct dvb_frontend *fe,
TUNER_RATIO_SELECT_50; /* 50 kHz step */
/* Bandswitch byte */
- simple_radio_bandswitch(fe, &buffer[0]);
+ if (simple_radio_bandswitch(fe, &buffer[0]))
+ return 0;
/* Convert from 1/16 kHz V4L steps to 1/20 MHz (=50 kHz) PLL steps
freq * (1 Mhz / 16000 V4L steps) * (20 PLL steps / 1 MHz) =
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index 0750a975bcb8..316edb2dd6c4 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -436,7 +436,7 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap)
ep = usb_pipe_endpoint(d->udev, lme_int->lme_urb->pipe);
if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK)
- lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa),
+ lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa);
lme_int->lme_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
diff --git a/drivers/media/usb/dvb-usb/dibusb-mb.c b/drivers/media/usb/dvb-usb/dibusb-mb.c
index 408920577716..94f59c7765dc 100644
--- a/drivers/media/usb/dvb-usb/dibusb-mb.c
+++ b/drivers/media/usb/dvb-usb/dibusb-mb.c
@@ -84,7 +84,7 @@ static int dibusb_tuner_probe_and_attach(struct dvb_usb_adapter *adap)
if (i2c_transfer(&adap->dev->i2c_adap, msg, 2) != 2) {
err("tuner i2c write failed.");
- ret = -EREMOTEIO;
+ return -EREMOTEIO;
}
if (adap->fe_adap[0].fe->ops.i2c_gate_ctrl)
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
index 39ac22486bcd..4b1445d806e5 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
@@ -82,11 +82,17 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
}
}
- if ((ret = dvb_usb_adapter_stream_init(adap)) ||
- (ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs)) ||
- (ret = dvb_usb_adapter_frontend_init(adap))) {
+ ret = dvb_usb_adapter_stream_init(adap);
+ if (ret)
return ret;
- }
+
+ ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs);
+ if (ret)
+ goto dvb_init_err;
+
+ ret = dvb_usb_adapter_frontend_init(adap);
+ if (ret)
+ goto frontend_init_err;
/* use exclusive FE lock if there is multiple shared FEs */
if (adap->fe_adap[1].fe)
@@ -106,6 +112,12 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
}
return 0;
+
+frontend_init_err:
+ dvb_usb_adapter_dvb_exit(adap);
+dvb_init_err:
+ dvb_usb_adapter_stream_exit(adap);
+ return ret;
}
static int dvb_usb_adapter_exit(struct dvb_usb_device *d)
diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h
index 317ed6a82d19..89702eef66aa 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb.h
+++ b/drivers/media/usb/dvb-usb/dvb-usb.h
@@ -475,7 +475,7 @@ extern int __must_check
dvb_usb_generic_write(struct dvb_usb_device *, u8 *, u16);
/* commonly used remote control parsing */
-extern int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *, u8[], u32 *, int *);
+extern int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *, u8[5], u32 *, int *);
/* commonly used firmware download types and function */
struct hexline {
diff --git a/drivers/media/usb/dvb-usb/gp8psk.c b/drivers/media/usb/dvb-usb/gp8psk.c
index 13e96b0aeb0f..d97eab01cb8c 100644
--- a/drivers/media/usb/dvb-usb/gp8psk.c
+++ b/drivers/media/usb/dvb-usb/gp8psk.c
@@ -185,7 +185,7 @@ out_rel_fw:
static int gp8psk_power_ctrl(struct dvb_usb_device *d, int onoff)
{
- u8 status, buf;
+ u8 status = 0, buf;
int gp_product_id = le16_to_cpu(d->udev->descriptor.idProduct);
if (onoff) {
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index 69445c8e38e2..d0f95a5cb4d2 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -955,14 +955,10 @@ int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk,
usb_bufs->buf[i] = kzalloc(sb_size, GFP_KERNEL);
if (!usb_bufs->buf[i]) {
- em28xx_uninit_usb_xfer(dev, mode);
-
for (i--; i >= 0; i--)
kfree(usb_bufs->buf[i]);
- kfree(usb_bufs->buf);
- usb_bufs->buf = NULL;
-
+ em28xx_uninit_usb_xfer(dev, mode);
return -ENOMEM;
}
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index a73faf12f7e4..e1946237ac8c 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -1924,6 +1924,7 @@ ret:
return result;
out_free:
+ em28xx_uninit_usb_xfer(dev, EM28XX_DIGITAL_MODE);
kfree(dvb);
dev->dvb = NULL;
goto ret;
diff --git a/drivers/media/usb/go7007/go7007-usb.c b/drivers/media/usb/go7007/go7007-usb.c
index 19c6a0354ce0..b84a6f654861 100644
--- a/drivers/media/usb/go7007/go7007-usb.c
+++ b/drivers/media/usb/go7007/go7007-usb.c
@@ -1052,6 +1052,7 @@ static int go7007_usb_probe(struct usb_interface *intf,
struct go7007_usb *usb;
const struct go7007_usb_board *board;
struct usb_device *usbdev = interface_to_usbdev(intf);
+ struct usb_host_endpoint *ep;
unsigned num_i2c_devs;
char *name;
int video_pipe, i, v_urb_len;
@@ -1148,7 +1149,8 @@ static int go7007_usb_probe(struct usb_interface *intf,
if (usb->intr_urb->transfer_buffer == NULL)
goto allocfail;
- if (go->board_id == GO7007_BOARDID_SENSORAY_2250)
+ ep = usb->usbdev->ep_in[4];
+ if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK)
usb_fill_bulk_urb(usb->intr_urb, usb->usbdev,
usb_rcvbulkpipe(usb->usbdev, 4),
usb->intr_urb->transfer_buffer, 2*sizeof(u16),
diff --git a/drivers/media/usb/go7007/snd-go7007.c b/drivers/media/usb/go7007/snd-go7007.c
index 137fc253b122..96c37a131deb 100644
--- a/drivers/media/usb/go7007/snd-go7007.c
+++ b/drivers/media/usb/go7007/snd-go7007.c
@@ -244,22 +244,18 @@ int go7007_snd_init(struct go7007 *go)
gosnd->capturing = 0;
ret = snd_card_new(go->dev, index[dev], id[dev], THIS_MODULE, 0,
&gosnd->card);
- if (ret < 0) {
- kfree(gosnd);
- return ret;
- }
+ if (ret < 0)
+ goto free_snd;
+
ret = snd_device_new(gosnd->card, SNDRV_DEV_LOWLEVEL, go,
&go7007_snd_device_ops);
- if (ret < 0) {
- kfree(gosnd);
- return ret;
- }
+ if (ret < 0)
+ goto free_card;
+
ret = snd_pcm_new(gosnd->card, "go7007", 0, 0, 1, &gosnd->pcm);
- if (ret < 0) {
- snd_card_free(gosnd->card);
- kfree(gosnd);
- return ret;
- }
+ if (ret < 0)
+ goto free_card;
+
strlcpy(gosnd->card->driver, "go7007", sizeof(gosnd->card->driver));
strlcpy(gosnd->card->shortname, go->name, sizeof(gosnd->card->driver));
strlcpy(gosnd->card->longname, gosnd->card->shortname,
@@ -270,11 +266,8 @@ int go7007_snd_init(struct go7007 *go)
&go7007_snd_capture_ops);
ret = snd_card_register(gosnd->card);
- if (ret < 0) {
- snd_card_free(gosnd->card);
- kfree(gosnd);
- return ret;
- }
+ if (ret < 0)
+ goto free_card;
gosnd->substream = NULL;
go->snd_context = gosnd;
@@ -282,6 +275,12 @@ int go7007_snd_init(struct go7007 *go)
++dev;
return 0;
+
+free_card:
+ snd_card_free(gosnd->card);
+free_snd:
+ kfree(gosnd);
+ return ret;
}
EXPORT_SYMBOL(go7007_snd_init);
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index c9a2b29a60a5..f0562b8eef56 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -1585,6 +1585,9 @@ out:
input_unregister_device(gspca_dev->input_dev);
#endif
v4l2_ctrl_handler_free(gspca_dev->vdev.ctrl_handler);
+ v4l2_device_unregister(&gspca_dev->v4l2_dev);
+ if (sd_desc->probe_error)
+ sd_desc->probe_error(gspca_dev);
kfree(gspca_dev->usb_buf);
kfree(gspca_dev);
return ret;
diff --git a/drivers/media/usb/gspca/gspca.h b/drivers/media/usb/gspca/gspca.h
index b0ced2e14006..a6554d5e9e1a 100644
--- a/drivers/media/usb/gspca/gspca.h
+++ b/drivers/media/usb/gspca/gspca.h
@@ -105,6 +105,7 @@ struct sd_desc {
cam_cf_op config; /* called on probe */
cam_op init; /* called on probe and resume */
cam_op init_controls; /* called on probe */
+ cam_v_op probe_error; /* called if probe failed, do cleanup here */
cam_op start; /* called on stream on after URBs creation */
cam_pkt_op pkt_scan;
/* optional operations */
diff --git a/drivers/media/usb/gspca/m5602/m5602_po1030.c b/drivers/media/usb/gspca/m5602/m5602_po1030.c
index 37d2891e5f5b..81d8eb72ac41 100644
--- a/drivers/media/usb/gspca/m5602/m5602_po1030.c
+++ b/drivers/media/usb/gspca/m5602/m5602_po1030.c
@@ -159,6 +159,7 @@ static const struct v4l2_ctrl_config po1030_greenbal_cfg = {
int po1030_probe(struct sd *sd)
{
u8 dev_id_h = 0, i;
+ int err;
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
if (force_sensor) {
@@ -177,10 +178,13 @@ int po1030_probe(struct sd *sd)
for (i = 0; i < ARRAY_SIZE(preinit_po1030); i++) {
u8 data = preinit_po1030[i][2];
if (preinit_po1030[i][0] == SENSOR)
- m5602_write_sensor(sd,
- preinit_po1030[i][1], &data, 1);
+ err = m5602_write_sensor(sd, preinit_po1030[i][1],
+ &data, 1);
else
- m5602_write_bridge(sd, preinit_po1030[i][1], data);
+ err = m5602_write_bridge(sd, preinit_po1030[i][1],
+ data);
+ if (err < 0)
+ return err;
}
if (m5602_read_sensor(sd, PO1030_DEVID_H, &dev_id_h, 1))
diff --git a/drivers/media/usb/gspca/sq905.c b/drivers/media/usb/gspca/sq905.c
index ffea9c35b0a0..13676af42cfc 100644
--- a/drivers/media/usb/gspca/sq905.c
+++ b/drivers/media/usb/gspca/sq905.c
@@ -167,7 +167,7 @@ static int
sq905_read_data(struct gspca_dev *gspca_dev, u8 *data, int size, int need_lock)
{
int ret;
- int act_len;
+ int act_len = 0;
gspca_dev->usb_buf[0] = '\0';
if (need_lock)
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx.c b/drivers/media/usb/gspca/stv06xx/stv06xx.c
index b7ea4f982964..ccec6138f678 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx.c
@@ -538,12 +538,21 @@ static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
static int stv06xx_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id);
+static void stv06xx_probe_error(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *)gspca_dev;
+
+ kfree(sd->sensor_priv);
+ sd->sensor_priv = NULL;
+}
+
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
.config = stv06xx_config,
.init = stv06xx_init,
.init_controls = stv06xx_init_controls,
+ .probe_error = stv06xx_probe_error,
.start = stv06xx_start,
.stopN = stv06xx_stopN,
.pkt_scan = stv06xx_pkt_scan,
diff --git a/drivers/media/usb/msi2500/msi2500.c b/drivers/media/usb/msi2500/msi2500.c
index 65ef755adfdc..b2adde978c9b 100644
--- a/drivers/media/usb/msi2500/msi2500.c
+++ b/drivers/media/usb/msi2500/msi2500.c
@@ -1250,7 +1250,7 @@ static int msi2500_probe(struct usb_interface *intf,
}
dev->master = master;
- master->bus_num = 0;
+ master->bus_num = -1;
master->num_chipselect = 1;
master->transfer_one_message = msi2500_transfer_one_message;
spi_master_set_devdata(master, dev);
diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
index 3db2fd7f5d7c..e4f2160f46ff 100644
--- a/drivers/media/usb/tm6000/tm6000-dvb.c
+++ b/drivers/media/usb/tm6000/tm6000-dvb.c
@@ -149,6 +149,10 @@ static int tm6000_start_stream(struct tm6000_core *dev)
if (ret < 0) {
printk(KERN_ERR "tm6000: error %i in %s during pipe reset\n",
ret, __func__);
+
+ kfree(dvb->bulk_urb->transfer_buffer);
+ usb_free_urb(dvb->bulk_urb);
+ dvb->bulk_urb = NULL;
return ret;
} else
printk(KERN_ERR "tm6000: pipe resetted\n");
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index 96055de6e8ce..62f012841971 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -463,11 +463,12 @@ static int tm6000_alloc_urb_buffers(struct tm6000_core *dev)
if (dev->urb_buffer)
return 0;
- dev->urb_buffer = kmalloc_array(num_bufs, sizeof(void *), GFP_KERNEL);
+ dev->urb_buffer = kmalloc_array(num_bufs, sizeof(*dev->urb_buffer),
+ GFP_KERNEL);
if (!dev->urb_buffer)
return -ENOMEM;
- dev->urb_dma = kmalloc_array(num_bufs, sizeof(dma_addr_t *),
+ dev->urb_dma = kmalloc_array(num_bufs, sizeof(*dev->urb_dma),
GFP_KERNEL);
if (!dev->urb_dma)
return -ENOMEM;
diff --git a/drivers/media/usb/usbtv/usbtv-audio.c b/drivers/media/usb/usbtv/usbtv-audio.c
index 4ce38246ed64..95657a7029c9 100644
--- a/drivers/media/usb/usbtv/usbtv-audio.c
+++ b/drivers/media/usb/usbtv/usbtv-audio.c
@@ -399,7 +399,7 @@ void usbtv_audio_free(struct usbtv *usbtv)
cancel_work_sync(&usbtv->snd_trigger);
if (usbtv->snd && usbtv->udev) {
- snd_card_free(usbtv->snd);
+ snd_card_free_when_closed(usbtv->snd);
usbtv->snd = NULL;
}
}
diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c
index ee9c656d121f..2308c0b4f5e7 100644
--- a/drivers/media/usb/usbtv/usbtv-core.c
+++ b/drivers/media/usb/usbtv/usbtv-core.c
@@ -113,7 +113,8 @@ static int usbtv_probe(struct usb_interface *intf,
usbtv_audio_fail:
/* we must not free at this point */
- usb_get_dev(usbtv->udev);
+ v4l2_device_get(&usbtv->v4l2_dev);
+ /* this will undo the v4l2_device_get() */
usbtv_video_free(usbtv);
usbtv_video_fail:
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index f2854337cdca..cb6046481aed 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -778,12 +778,16 @@ static s32 uvc_get_le_value(struct uvc_control_mapping *mapping,
offset &= 7;
mask = ((1LL << bits) - 1) << offset;
- for (; bits > 0; data++) {
+ while (1) {
u8 byte = *data & mask;
value |= offset > 0 ? (byte >> offset) : (byte << (-offset));
bits -= 8 - (offset > 0 ? offset : 0);
+ if (bits <= 0)
+ break;
+
offset -= 8;
mask = (1 << bits) - 1;
+ data++;
}
/* Sign-extend the value if needed. */
@@ -1849,30 +1853,35 @@ int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
{
struct uvc_entity *entity;
struct uvc_control *ctrl;
- unsigned int i, found = 0;
+ unsigned int i;
+ bool found;
u32 reqflags;
u16 size;
u8 *data = NULL;
int ret;
/* Find the extension unit. */
+ found = false;
list_for_each_entry(entity, &chain->entities, chain) {
if (UVC_ENTITY_TYPE(entity) == UVC_VC_EXTENSION_UNIT &&
- entity->id == xqry->unit)
+ entity->id == xqry->unit) {
+ found = true;
break;
+ }
}
- if (entity->id != xqry->unit) {
+ if (!found) {
uvc_trace(UVC_TRACE_CONTROL, "Extension unit %u not found.\n",
xqry->unit);
return -ENOENT;
}
/* Find the control and perform delayed initialization if needed. */
+ found = false;
for (i = 0; i < entity->ncontrols; ++i) {
ctrl = &entity->controls[i];
if (ctrl->index == xqry->selector - 1) {
- found = 1;
+ found = true;
break;
}
}
@@ -2029,13 +2038,6 @@ static int uvc_ctrl_add_info(struct uvc_device *dev, struct uvc_control *ctrl,
goto done;
}
- /*
- * Retrieve control flags from the device. Ignore errors and work with
- * default flag values from the uvc_ctrl array when the device doesn't
- * properly implement GET_INFO on standard controls.
- */
- uvc_ctrl_get_flags(dev, ctrl, &ctrl->info);
-
ctrl->initialized = 1;
uvc_trace(UVC_TRACE_CONTROL, "Added control %pUl/%u to device %s "
@@ -2258,6 +2260,13 @@ static void uvc_ctrl_init_ctrl(struct uvc_device *dev, struct uvc_control *ctrl)
if (uvc_entity_match_guid(ctrl->entity, info->entity) &&
ctrl->index == info->index) {
uvc_ctrl_add_info(dev, ctrl, info);
+ /*
+ * Retrieve control flags from the device. Ignore errors
+ * and work with default flag values from the uvc_ctrl
+ * array when the device doesn't properly implement
+ * GET_INFO on standard controls.
+ */
+ uvc_ctrl_get_flags(dev, ctrl, &ctrl->info);
break;
}
}
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 38c73cdbef70..998ce712978a 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -940,7 +940,10 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id,
unsigned int i;
extra_size = roundup(extra_size, sizeof(*entity->pads));
- num_inputs = (type & UVC_TERM_OUTPUT) ? num_pads : num_pads - 1;
+ if (num_pads)
+ num_inputs = type & UVC_TERM_OUTPUT ? num_pads : num_pads - 1;
+ else
+ num_inputs = 0;
size = sizeof(*entity) + extra_size + sizeof(*entity->pads) * num_pads
+ num_inputs;
entity = kzalloc(size, GFP_KERNEL);
@@ -956,7 +959,7 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id,
for (i = 0; i < num_inputs; ++i)
entity->pads[i].flags = MEDIA_PAD_FL_SINK;
- if (!UVC_ENTITY_IS_OTERM(entity))
+ if (!UVC_ENTITY_IS_OTERM(entity) && num_pads)
entity->pads[num_pads-1].flags = MEDIA_PAD_FL_SOURCE;
entity->bNrInPins = num_inputs;
diff --git a/drivers/media/usb/uvc/uvc_entity.c b/drivers/media/usb/uvc/uvc_entity.c
index 554063c07d7a..f2457953f27c 100644
--- a/drivers/media/usb/uvc/uvc_entity.c
+++ b/drivers/media/usb/uvc/uvc_entity.c
@@ -78,10 +78,45 @@ static int uvc_mc_init_entity(struct uvc_video_chain *chain,
int ret;
if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING) {
+ u32 function;
+
v4l2_subdev_init(&entity->subdev, &uvc_subdev_ops);
strlcpy(entity->subdev.name, entity->name,
sizeof(entity->subdev.name));
+ switch (UVC_ENTITY_TYPE(entity)) {
+ case UVC_VC_SELECTOR_UNIT:
+ function = MEDIA_ENT_F_VID_MUX;
+ break;
+ case UVC_VC_PROCESSING_UNIT:
+ case UVC_VC_EXTENSION_UNIT:
+ /* For lack of a better option. */
+ function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
+ break;
+ case UVC_COMPOSITE_CONNECTOR:
+ case UVC_COMPONENT_CONNECTOR:
+ function = MEDIA_ENT_F_CONN_COMPOSITE;
+ break;
+ case UVC_SVIDEO_CONNECTOR:
+ function = MEDIA_ENT_F_CONN_SVIDEO;
+ break;
+ case UVC_ITT_CAMERA:
+ function = MEDIA_ENT_F_CAM_SENSOR;
+ break;
+ case UVC_TT_VENDOR_SPECIFIC:
+ case UVC_ITT_VENDOR_SPECIFIC:
+ case UVC_ITT_MEDIA_TRANSPORT_INPUT:
+ case UVC_OTT_VENDOR_SPECIFIC:
+ case UVC_OTT_DISPLAY:
+ case UVC_OTT_MEDIA_TRANSPORT_OUTPUT:
+ case UVC_EXTERNAL_VENDOR_SPECIFIC:
+ default:
+ function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
+ break;
+ }
+
+ entity->subdev.entity.function = function;
+
ret = media_entity_pads_init(&entity->subdev.entity,
entity->num_pads, entity->pads);
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 18a7384b50ee..06167c51af12 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -252,11 +252,41 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
if (ret < 0)
goto done;
+ /* After the probe, update fmt with the values returned from
+ * negotiation with the device. Some devices return invalid bFormatIndex
+ * and bFrameIndex values, in which case we can only assume they have
+ * accepted the requested format as-is.
+ */
+ for (i = 0; i < stream->nformats; ++i) {
+ if (probe->bFormatIndex == stream->format[i].index) {
+ format = &stream->format[i];
+ break;
+ }
+ }
+
+ if (i == stream->nformats)
+ uvc_trace(UVC_TRACE_FORMAT,
+ "Unknown bFormatIndex %u, using default\n",
+ probe->bFormatIndex);
+
+ for (i = 0; i < format->nframes; ++i) {
+ if (probe->bFrameIndex == format->frame[i].bFrameIndex) {
+ frame = &format->frame[i];
+ break;
+ }
+ }
+
+ if (i == format->nframes)
+ uvc_trace(UVC_TRACE_FORMAT,
+ "Unknown bFrameIndex %u, using default\n",
+ probe->bFrameIndex);
+
fmt->fmt.pix.width = frame->wWidth;
fmt->fmt.pix.height = frame->wHeight;
fmt->fmt.pix.field = V4L2_FIELD_NONE;
fmt->fmt.pix.bytesperline = uvc_v4l2_get_bytesperline(format, frame);
fmt->fmt.pix.sizeimage = probe->dwMaxVideoFrameSize;
+ fmt->fmt.pix.pixelformat = format->fcc;
fmt->fmt.pix.colorspace = format->colorspace;
fmt->fmt.pix.priv = 0;
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index f75d892b6f03..76ef79733a4e 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -2939,7 +2939,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
v4l2_kioctl func)
{
char sbuf[128];
- void *mbuf = NULL;
+ void *mbuf = NULL, *array_buf = NULL;
void *parg = (void *)arg;
long err = -EINVAL;
bool has_array_args;
@@ -2998,20 +2998,14 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
has_array_args = err;
if (has_array_args) {
- /*
- * When adding new types of array args, make sure that the
- * parent argument to ioctl (which contains the pointer to the
- * array) fits into sbuf (so that mbuf will still remain
- * unused up to here).
- */
- mbuf = kvmalloc(array_size, GFP_KERNEL);
+ array_buf = kvmalloc(array_size, GFP_KERNEL);
err = -ENOMEM;
- if (NULL == mbuf)
+ if (array_buf == NULL)
goto out_array_args;
err = -EFAULT;
- if (copy_from_user(mbuf, user_ptr, array_size))
+ if (copy_from_user(array_buf, user_ptr, array_size))
goto out_array_args;
- *kernel_ptr = mbuf;
+ *kernel_ptr = array_buf;
}
/* Handles IOCTL */
@@ -3030,7 +3024,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
if (has_array_args) {
*kernel_ptr = (void __force *)user_ptr;
- if (copy_to_user(user_ptr, mbuf, array_size))
+ if (copy_to_user(user_ptr, array_buf, array_size))
err = -EFAULT;
goto out_array_args;
}
@@ -3052,6 +3046,7 @@ out_array_args:
}
out:
+ kvfree(array_buf);
kvfree(mbuf);
return err;
}
diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c
index 2f214440008c..1c6b2cc6269a 100644
--- a/drivers/memory/emif.c
+++ b/drivers/memory/emif.c
@@ -165,35 +165,12 @@ static const struct file_operations emif_mr4_fops = {
static int __init_or_module emif_debugfs_init(struct emif_data *emif)
{
- struct dentry *dentry;
- int ret;
-
- dentry = debugfs_create_dir(dev_name(emif->dev), NULL);
- if (!dentry) {
- ret = -ENOMEM;
- goto err0;
- }
- emif->debugfs_root = dentry;
-
- dentry = debugfs_create_file("regcache_dump", S_IRUGO,
- emif->debugfs_root, emif, &emif_regdump_fops);
- if (!dentry) {
- ret = -ENOMEM;
- goto err1;
- }
-
- dentry = debugfs_create_file("mr4", S_IRUGO,
- emif->debugfs_root, emif, &emif_mr4_fops);
- if (!dentry) {
- ret = -ENOMEM;
- goto err1;
- }
-
+ emif->debugfs_root = debugfs_create_dir(dev_name(emif->dev), NULL);
+ debugfs_create_file("regcache_dump", S_IRUGO, emif->debugfs_root, emif,
+ &emif_regdump_fops);
+ debugfs_create_file("mr4", S_IRUGO, emif->debugfs_root, emif,
+ &emif_mr4_fops);
return 0;
-err1:
- debugfs_remove_recursive(emif->debugfs_root);
-err0:
- return ret;
}
static void __exit emif_debugfs_exit(struct emif_data *emif)
diff --git a/drivers/memory/fsl-corenet-cf.c b/drivers/memory/fsl-corenet-cf.c
index 662d050243be..2fbf8d09af36 100644
--- a/drivers/memory/fsl-corenet-cf.c
+++ b/drivers/memory/fsl-corenet-cf.c
@@ -215,10 +215,8 @@ static int ccf_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, ccf);
irq = platform_get_irq(pdev, 0);
- if (!irq) {
- dev_err(&pdev->dev, "%s: no irq\n", __func__);
- return -ENXIO;
- }
+ if (irq < 0)
+ return irq;
ret = devm_request_irq(&pdev->dev, irq, ccf_irq, 0, pdev->name, ccf);
if (ret) {
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 1c6a7c16e0c1..d8f2cacea750 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -951,7 +951,7 @@ static int gpmc_cs_remap(int cs, u32 base)
int ret;
u32 old_base, size;
- if (cs > gpmc_cs_num) {
+ if (cs >= gpmc_cs_num) {
pr_err("%s: requested chip-select is disabled\n", __func__);
return -ENODEV;
}
@@ -986,7 +986,7 @@ int gpmc_cs_request(int cs, unsigned long size, unsigned long *base)
struct resource *res = &gpmc->mem;
int r = -1;
- if (cs > gpmc_cs_num) {
+ if (cs >= gpmc_cs_num) {
pr_err("%s: requested chip-select is disabled\n", __func__);
return -ENODEV;
}
@@ -1028,8 +1028,8 @@ EXPORT_SYMBOL(gpmc_cs_request);
void gpmc_cs_free(int cs)
{
- struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
- struct resource *res = &gpmc->mem;
+ struct gpmc_cs_data *gpmc;
+ struct resource *res;
spin_lock(&gpmc_mem_lock);
if (cs >= gpmc_cs_num || cs < 0 || !gpmc_cs_reserved(cs)) {
@@ -1038,6 +1038,9 @@ void gpmc_cs_free(int cs)
spin_unlock(&gpmc_mem_lock);
return;
}
+ gpmc = &gpmc_cs[cs];
+ res = &gpmc->mem;
+
gpmc_cs_disable_mem(cs);
if (res->flags)
release_resource(res);
@@ -2278,6 +2281,10 @@ static void gpmc_probe_dt_children(struct platform_device *pdev)
}
}
#else
+void gpmc_read_settings_dt(struct device_node *np, struct gpmc_settings *p)
+{
+ memset(p, 0, sizeof(*p));
+}
static int gpmc_probe_dt(struct platform_device *pdev)
{
return 0;
diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
index 475e5b3790ed..be3d978648f5 100644
--- a/drivers/memory/ti-aemif.c
+++ b/drivers/memory/ti-aemif.c
@@ -381,8 +381,10 @@ static int aemif_probe(struct platform_device *pdev)
*/
for_each_available_child_of_node(np, child_np) {
ret = of_aemif_parse_abus_config(pdev, child_np);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(child_np);
goto error;
+ }
}
} else if (pdata && pdata->num_abus_data > 0) {
for (i = 0; i < pdata->num_abus_data; i++, aemif->num_cs++) {
@@ -408,8 +410,10 @@ static int aemif_probe(struct platform_device *pdev)
for_each_available_child_of_node(np, child_np) {
ret = of_platform_populate(child_np, NULL,
dev_lookup, dev);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(child_np);
goto error;
+ }
}
} else if (pdata) {
for (i = 0; i < pdata->num_sub_devices; i++) {
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index b1564cacd19e..20ae8652adf4 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -469,7 +469,6 @@ static void memstick_check(struct work_struct *work)
host->card = card;
if (device_register(&card->dev)) {
put_device(&card->dev);
- kfree(host->card);
host->card = NULL;
}
} else
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 716fc8ed31d3..8a02f11076f9 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -2146,7 +2146,7 @@ static int msb_init_disk(struct memstick_dev *card)
set_disk_ro(msb->disk, 1);
msb_start(card);
- device_add_disk(&card->dev, msb->disk);
+ device_add_disk(&card->dev, msb->disk, NULL);
dbg("Disk added");
return 0;
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 5ee932631fae..0cd30dcb6801 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -1236,7 +1236,7 @@ static int mspro_block_init_disk(struct memstick_dev *card)
set_capacity(msb->disk, capacity);
dev_dbg(&card->dev, "capacity set %ld\n", capacity);
- device_add_disk(&card->dev, msb->disk);
+ device_add_disk(&card->dev, msb->disk, NULL);
msb->active = 1;
return 0;
diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
index 627d6e62fe31..4559593ecd5a 100644
--- a/drivers/memstick/host/r592.c
+++ b/drivers/memstick/host/r592.c
@@ -762,8 +762,10 @@ static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto error3;
dev->mmio = pci_ioremap_bar(pdev, 0);
- if (!dev->mmio)
+ if (!dev->mmio) {
+ error = -ENOMEM;
goto error4;
+ }
dev->irq = pdev->irq;
spin_lock_init(&dev->irq_lock);
@@ -789,12 +791,14 @@ static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id)
&dev->dummy_dma_page_physical_address, GFP_KERNEL);
r592_stop_dma(dev , 0);
- if (request_irq(dev->irq, &r592_irq, IRQF_SHARED,
- DRV_NAME, dev))
+ error = request_irq(dev->irq, &r592_irq, IRQF_SHARED,
+ DRV_NAME, dev);
+ if (error)
goto error6;
r592_update_card_detect(dev);
- if (memstick_add_host(host))
+ error = memstick_add_host(host);
+ if (error)
goto error7;
message("driver successfully loaded");
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 6ba07c7feb92..cec867c10968 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -118,8 +118,6 @@ int mptscsih_suspend(struct pci_dev *pdev, pm_message_t state);
int mptscsih_resume(struct pci_dev *pdev);
#endif
-#define SNS_LEN(scp) SCSI_SENSE_BUFFERSIZE
-
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
@@ -1176,8 +1174,10 @@ mptscsih_remove(struct pci_dev *pdev)
MPT_SCSI_HOST *hd;
int sz1;
- if((hd = shost_priv(host)) == NULL)
- return;
+ if (host == NULL)
+ hd = NULL;
+ else
+ hd = shost_priv(host);
mptscsih_shutdown(pdev);
@@ -1193,14 +1193,15 @@ mptscsih_remove(struct pci_dev *pdev)
"Free'd ScsiLookup (%d) memory\n",
ioc->name, sz1));
- kfree(hd->info_kbuf);
+ if (hd)
+ kfree(hd->info_kbuf);
/* NULL the Scsi_Host pointer
*/
ioc->sh = NULL;
- scsi_host_put(host);
-
+ if (host)
+ scsi_host_put(host);
mpt_detach(pdev);
}
@@ -2420,7 +2421,7 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
/* Copy the sense received into the scsi command block. */
req_index = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
sense_data = ((u8 *)ioc->sense_buf_pool + (req_index * MPT_SENSE_BUFFER_ALLOC));
- memcpy(sc->sense_buffer, sense_data, SNS_LEN(sc));
+ memcpy(sc->sense_buffer, sense_data, MPT_SENSE_BUFFER_ALLOC);
/* Log SMART data (asc = 0x5D, non-IM case only) if required.
*/
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index a4403a57ddc8..09acaa2cf74a 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -1433,6 +1433,15 @@ err_irq:
arizona_irq_exit(arizona);
err_pm:
pm_runtime_disable(arizona->dev);
+
+ switch (arizona->pdata.clk32k_src) {
+ case ARIZONA_32KZ_MCLK1:
+ case ARIZONA_32KZ_MCLK2:
+ arizona_clk32k_disable(arizona);
+ break;
+ default:
+ break;
+ }
err_reset:
arizona_enable_reset(arizona);
regulator_disable(arizona->dcvdd);
@@ -1455,6 +1464,15 @@ int arizona_dev_exit(struct arizona *arizona)
regulator_disable(arizona->dcvdd);
regulator_put(arizona->dcvdd);
+ switch (arizona->pdata.clk32k_src) {
+ case ARIZONA_32KZ_MCLK1:
+ case ARIZONA_32KZ_MCLK2:
+ arizona_clk32k_disable(arizona);
+ break;
+ default:
+ break;
+ }
+
mfd_remove_devices(arizona->dev);
arizona_free_irq(arizona, ARIZONA_IRQ_UNDERCLOCKED, arizona);
arizona_free_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, arizona);
diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c
index fab3cdc27ed6..19d57a45134c 100644
--- a/drivers/mfd/bd9571mwv.c
+++ b/drivers/mfd/bd9571mwv.c
@@ -185,9 +185,9 @@ static int bd9571mwv_probe(struct i2c_client *client,
return ret;
}
- ret = mfd_add_devices(bd->dev, PLATFORM_DEVID_AUTO, bd9571mwv_cells,
- ARRAY_SIZE(bd9571mwv_cells), NULL, 0,
- regmap_irq_get_domain(bd->irq_data));
+ ret = devm_mfd_add_devices(bd->dev, PLATFORM_DEVID_AUTO,
+ bd9571mwv_cells, ARRAY_SIZE(bd9571mwv_cells),
+ NULL, 0, regmap_irq_get_domain(bd->irq_data));
if (ret) {
regmap_del_irq_chip(bd->irq, bd->irq_data);
return ret;
diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c
index 1476465ce803..fe614ba5fec9 100644
--- a/drivers/mfd/dln2.c
+++ b/drivers/mfd/dln2.c
@@ -93,6 +93,11 @@ struct dln2_mod_rx_slots {
spinlock_t lock;
};
+enum dln2_endpoint {
+ DLN2_EP_OUT = 0,
+ DLN2_EP_IN = 1,
+};
+
struct dln2_dev {
struct usb_device *usb_dev;
struct usb_interface *interface;
@@ -285,7 +290,11 @@ static void dln2_rx(struct urb *urb)
len = urb->actual_length - sizeof(struct dln2_header);
if (handle == DLN2_HANDLE_EVENT) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&dln2->event_cb_lock, flags);
dln2_run_event_callbacks(dln2, id, echo, data, len);
+ spin_unlock_irqrestore(&dln2->event_cb_lock, flags);
} else {
/* URB will be re-submitted in _dln2_transfer (free_rx_slot) */
if (dln2_transfer_complete(dln2, urb, handle, echo))
@@ -736,10 +745,10 @@ static int dln2_probe(struct usb_interface *interface,
hostif->desc.bNumEndpoints < 2)
return -ENODEV;
- epin = &hostif->endpoint[0].desc;
- epout = &hostif->endpoint[1].desc;
+ epout = &hostif->endpoint[DLN2_EP_OUT].desc;
if (!usb_endpoint_is_bulk_out(epout))
return -ENODEV;
+ epin = &hostif->endpoint[DLN2_EP_IN].desc;
if (!usb_endpoint_is_bulk_in(epin))
return -ENODEV;
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index 742d6c1973f4..adea7ff63132 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -176,6 +176,9 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x1ac4), (kernel_ulong_t)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x1ac6), (kernel_ulong_t)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x1aee), (kernel_ulong_t)&bxt_uart_info },
+ /* EBG */
+ { PCI_VDEVICE(INTEL, 0x1bad), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x1bae), (kernel_ulong_t)&bxt_uart_info },
/* GLK */
{ PCI_VDEVICE(INTEL, 0x31ac), (kernel_ulong_t)&glk_i2c_info },
{ PCI_VDEVICE(INTEL, 0x31ae), (kernel_ulong_t)&glk_i2c_info },
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 95e217e6b6d7..7577afd42842 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -397,7 +397,7 @@ int intel_lpss_probe(struct device *dev,
if (!lpss)
return -ENOMEM;
- lpss->priv = devm_ioremap(dev, info->mem->start + LPSS_PRIV_OFFSET,
+ lpss->priv = devm_ioremap_uc(dev, info->mem->start + LPSS_PRIV_OFFSET,
LPSS_PRIV_SIZE);
if (!lpss->priv)
return -ENOMEM;
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 182973df1aed..77c965c6a65f 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -32,6 +32,11 @@ int mfd_cell_enable(struct platform_device *pdev)
const struct mfd_cell *cell = mfd_get_cell(pdev);
int err = 0;
+ if (!cell->enable) {
+ dev_dbg(&pdev->dev, "No .enable() call-back registered\n");
+ return 0;
+ }
+
/* only call enable hook if the cell wasn't previously enabled */
if (atomic_inc_return(cell->usage_count) == 1)
err = cell->enable(pdev);
@@ -49,6 +54,11 @@ int mfd_cell_disable(struct platform_device *pdev)
const struct mfd_cell *cell = mfd_get_cell(pdev);
int err = 0;
+ if (!cell->disable) {
+ dev_dbg(&pdev->dev, "No .disable() call-back registered\n");
+ return 0;
+ }
+
/* only disable if no other clients are using it */
if (atomic_dec_return(cell->usage_count) == 0)
err = cell->disable(pdev);
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index e0173bf4b0dc..ec1ac61a21ed 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -1429,8 +1429,14 @@ static int sm501_plat_probe(struct platform_device *dev)
goto err_claim;
}
- return sm501_init_dev(sm);
+ ret = sm501_init_dev(sm);
+ if (ret)
+ goto err_unmap;
+
+ return 0;
+ err_unmap:
+ iounmap(sm->regs);
err_claim:
release_resource(sm->regs_claim);
kfree(sm->regs_claim);
diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c
index 69df27769c21..3ba8cfa4b3b7 100644
--- a/drivers/mfd/sprd-sc27xx-spi.c
+++ b/drivers/mfd/sprd-sc27xx-spi.c
@@ -212,7 +212,7 @@ static int sprd_pmic_probe(struct spi_device *spi)
}
ret = devm_regmap_add_irq_chip(&spi->dev, ddata->regmap, ddata->irq,
- IRQF_ONESHOT | IRQF_NO_SUSPEND, 0,
+ IRQF_ONESHOT, 0,
&ddata->irq_chip, &ddata->irq_data);
if (ret) {
dev_err(&spi->dev, "Failed to add PMIC irq chip %d\n", ret);
@@ -228,9 +228,34 @@ static int sprd_pmic_probe(struct spi_device *spi)
return ret;
}
+ device_init_wakeup(&spi->dev, true);
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int sprd_pmic_suspend(struct device *dev)
+{
+ struct sprd_pmic *ddata = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(ddata->irq);
+
+ return 0;
+}
+
+static int sprd_pmic_resume(struct device *dev)
+{
+ struct sprd_pmic *ddata = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(ddata->irq);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(sprd_pmic_pm_ops, sprd_pmic_suspend, sprd_pmic_resume);
+
static const struct of_device_id sprd_pmic_match[] = {
{ .compatible = "sprd,sc2731", .data = &sc2731_data },
{},
@@ -242,6 +267,7 @@ static struct spi_driver sprd_pmic_driver = {
.name = "sc27xx-pmic",
.bus = &spi_bus_type,
.of_match_table = sprd_pmic_match,
+ .pm = &sprd_pmic_pm_ops,
},
.probe = sprd_pmic_probe,
};
diff --git a/drivers/mfd/stm32-timers.c b/drivers/mfd/stm32-timers.c
index efcd4b980c94..1adba6a46dcb 100644
--- a/drivers/mfd/stm32-timers.c
+++ b/drivers/mfd/stm32-timers.c
@@ -158,13 +158,18 @@ static const struct regmap_config stm32_timers_regmap_cfg = {
static void stm32_timers_get_arr_size(struct stm32_timers *ddata)
{
+ u32 arr;
+
+ /* Backup ARR to restore it after getting the maximum value */
+ regmap_read(ddata->regmap, TIM_ARR, &arr);
+
/*
* Only the available bits will be written so when readback
* we get the maximum value of auto reload register
*/
regmap_write(ddata->regmap, TIM_ARR, ~0L);
regmap_read(ddata->regmap, TIM_ARR, &ddata->max_arr);
- regmap_write(ddata->regmap, TIM_ARR, 0x0);
+ regmap_write(ddata->regmap, TIM_ARR, arr);
}
static void stm32_timers_dma_probe(struct device *dev,
diff --git a/drivers/mfd/wm831x-auxadc.c b/drivers/mfd/wm831x-auxadc.c
index fd789d2eb0f5..9f7ae1e1ebcd 100644
--- a/drivers/mfd/wm831x-auxadc.c
+++ b/drivers/mfd/wm831x-auxadc.c
@@ -98,11 +98,10 @@ static int wm831x_auxadc_read_irq(struct wm831x *wm831x,
wait_for_completion_timeout(&req->done, msecs_to_jiffies(500));
mutex_lock(&wm831x->auxadc_lock);
-
- list_del(&req->list);
ret = req->val;
out:
+ list_del(&req->list);
mutex_unlock(&wm831x->auxadc_lock);
kfree(req);
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 22bd6525e09c..933a50049d72 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -704,3 +704,4 @@ module_i2c_driver(wm8994_i2c_driver);
MODULE_DESCRIPTION("Core support for the WM8994 audio CODEC");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_SOFTDEP("pre: wm8994_regulator");
diff --git a/drivers/misc/aspeed-lpc-snoop.c b/drivers/misc/aspeed-lpc-snoop.c
index c10be21a1663..e2cb0b9607d1 100644
--- a/drivers/misc/aspeed-lpc-snoop.c
+++ b/drivers/misc/aspeed-lpc-snoop.c
@@ -15,6 +15,7 @@
*/
#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/fs.h>
#include <linux/kfifo.h>
@@ -71,6 +72,7 @@ struct aspeed_lpc_snoop_channel {
struct aspeed_lpc_snoop {
struct regmap *regmap;
int irq;
+ struct clk *clk;
struct aspeed_lpc_snoop_channel chan[NUM_SNOOP_CHANNELS];
};
@@ -97,8 +99,10 @@ static ssize_t snoop_file_read(struct file *file, char __user *buffer,
return -EINTR;
}
ret = kfifo_to_user(&chan->fifo, buffer, count, &copied);
+ if (ret)
+ return ret;
- return ret ? ret : copied;
+ return copied;
}
static __poll_t snoop_file_poll(struct file *file,
@@ -286,22 +290,42 @@ static int aspeed_lpc_snoop_probe(struct platform_device *pdev)
return -ENODEV;
}
+ lpc_snoop->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(lpc_snoop->clk)) {
+ rc = PTR_ERR(lpc_snoop->clk);
+ if (rc != -EPROBE_DEFER)
+ dev_err(dev, "couldn't get clock\n");
+ return rc;
+ }
+ rc = clk_prepare_enable(lpc_snoop->clk);
+ if (rc) {
+ dev_err(dev, "couldn't enable clock\n");
+ return rc;
+ }
+
rc = aspeed_lpc_snoop_config_irq(lpc_snoop, pdev);
if (rc)
- return rc;
+ goto err;
rc = aspeed_lpc_enable_snoop(lpc_snoop, dev, 0, port);
if (rc)
- return rc;
+ goto err;
/* Configuration of 2nd snoop channel port is optional */
if (of_property_read_u32_index(dev->of_node, "snoop-ports",
1, &port) == 0) {
rc = aspeed_lpc_enable_snoop(lpc_snoop, dev, 1, port);
- if (rc)
+ if (rc) {
aspeed_lpc_disable_snoop(lpc_snoop, 0);
+ goto err;
+ }
}
+ return 0;
+
+err:
+ clk_disable_unprepare(lpc_snoop->clk);
+
return rc;
}
@@ -313,6 +337,8 @@ static int aspeed_lpc_snoop_remove(struct platform_device *pdev)
aspeed_lpc_disable_snoop(lpc_snoop, 0);
aspeed_lpc_disable_snoop(lpc_snoop, 1);
+ clk_disable_unprepare(lpc_snoop->clk);
+
return 0;
}
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index d8e3cc2dc747..f9caf233e2cc 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -13,7 +13,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <linux/atmel-ssc.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -23,7 +23,7 @@
#include "../../sound/soc/atmel/atmel_ssc_dai.h"
/* Serialize access to ssc_list and user count */
-static DEFINE_SPINLOCK(user_lock);
+static DEFINE_MUTEX(user_lock);
static LIST_HEAD(ssc_list);
struct ssc_device *ssc_request(unsigned int ssc_num)
@@ -31,7 +31,7 @@ struct ssc_device *ssc_request(unsigned int ssc_num)
int ssc_valid = 0;
struct ssc_device *ssc;
- spin_lock(&user_lock);
+ mutex_lock(&user_lock);
list_for_each_entry(ssc, &ssc_list, list) {
if (ssc->pdev->dev.of_node) {
if (of_alias_get_id(ssc->pdev->dev.of_node, "ssc")
@@ -47,18 +47,18 @@ struct ssc_device *ssc_request(unsigned int ssc_num)
}
if (!ssc_valid) {
- spin_unlock(&user_lock);
+ mutex_unlock(&user_lock);
pr_err("ssc: ssc%d platform device is missing\n", ssc_num);
return ERR_PTR(-ENODEV);
}
if (ssc->user) {
- spin_unlock(&user_lock);
+ mutex_unlock(&user_lock);
dev_dbg(&ssc->pdev->dev, "module busy\n");
return ERR_PTR(-EBUSY);
}
ssc->user++;
- spin_unlock(&user_lock);
+ mutex_unlock(&user_lock);
clk_prepare(ssc->clk);
@@ -70,14 +70,14 @@ void ssc_free(struct ssc_device *ssc)
{
bool disable_clk = true;
- spin_lock(&user_lock);
+ mutex_lock(&user_lock);
if (ssc->user)
ssc->user--;
else {
disable_clk = false;
dev_dbg(&ssc->pdev->dev, "device already free\n");
}
- spin_unlock(&user_lock);
+ mutex_unlock(&user_lock);
if (disable_clk)
clk_unprepare(ssc->clk);
@@ -240,9 +240,9 @@ static int ssc_probe(struct platform_device *pdev)
return -ENXIO;
}
- spin_lock(&user_lock);
+ mutex_lock(&user_lock);
list_add_tail(&ssc->list, &ssc_list);
- spin_unlock(&user_lock);
+ mutex_unlock(&user_lock);
platform_set_drvdata(pdev, ssc);
@@ -261,9 +261,9 @@ static int ssc_remove(struct platform_device *pdev)
ssc_sound_dai_remove(ssc);
- spin_lock(&user_lock);
+ mutex_lock(&user_lock);
list_del(&ssc->list);
- spin_unlock(&user_lock);
+ mutex_unlock(&user_lock);
return 0;
}
diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
index f0e845c8e6a7..d3ce9fe08eaf 100644
--- a/drivers/misc/cardreader/rts5227.c
+++ b/drivers/misc/cardreader/rts5227.c
@@ -339,6 +339,11 @@ static int rts522a_extra_init_hw(struct rtsx_pcr *pcr)
{
rts5227_extra_init_hw(pcr);
+ /* Power down OCP for power consumption */
+ if (!pcr->card_exist)
+ rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
+ OC_POWER_DOWN);
+
rtsx_pci_write_register(pcr, FUNC_FORCE_CTL, FUNC_FORCE_UPME_XMT_DBG,
FUNC_FORCE_UPME_XMT_DBG);
rtsx_pci_write_register(pcr, PCLK_CTL, 0x04, 0x04);
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index da445223f4cc..3eb3c237f339 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -155,6 +155,9 @@ static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
rtsx_disable_aspm(pcr);
+ /* Fixes DMA transfer timout issue after disabling ASPM on RTS5260 */
+ msleep(1);
+
if (option->ltr_enabled)
rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
@@ -1521,12 +1524,14 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
if (ret < 0)
- goto disable_irq;
+ goto free_slots;
schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
return 0;
+free_slots:
+ kfree(pcr->slots);
disable_irq:
free_irq(pcr->irq, (void *)pcr);
disable_msi:
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 787a69a2a726..b9d3c87e9318 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -397,8 +397,8 @@ int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid,
*capp_unit_id = get_capp_unit_id(np, *phb_index);
of_node_put(np);
if (!*capp_unit_id) {
- pr_err("cxl: invalid capp unit id (phb_index: %d)\n",
- *phb_index);
+ pr_err("cxl: No capp unit found for PHB[%lld,%d]. Make sure the adapter is on a capi-compatible slot\n",
+ *chipid, *phb_index);
return -ENODEV;
}
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index 629e2e156412..0baa229d2b7d 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -628,7 +628,7 @@ static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int c
rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type,
&afu->dev.kobj, "cr%i", cr->cr);
if (rc)
- goto err;
+ goto err1;
rc = sysfs_create_bin_file(&cr->kobj, &cr->config_attr);
if (rc)
diff --git a/drivers/misc/echo/echo.c b/drivers/misc/echo/echo.c
index 8a5adc0d2e88..3ebe5d75ad6a 100644
--- a/drivers/misc/echo/echo.c
+++ b/drivers/misc/echo/echo.c
@@ -381,7 +381,7 @@ int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx)
*/
ec->factor = 0;
ec->shift = 0;
- if ((ec->nonupdate_dwell == 0)) {
+ if (!ec->nonupdate_dwell) {
int p, logp, shift;
/* Determine:
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 840afb398f9e..8dd5c610c438 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -362,7 +362,7 @@ static int at25_probe(struct spi_device *spi)
at25->nvmem_config.reg_read = at25_ee_read;
at25->nvmem_config.reg_write = at25_ee_write;
at25->nvmem_config.priv = at25;
- at25->nvmem_config.stride = 4;
+ at25->nvmem_config.stride = 1;
at25->nvmem_config.word_size = 1;
at25->nvmem_config.size = chip.byte_len;
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index 38766968bfa2..182feab6da25 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -38,6 +38,10 @@ static const struct eeprom_93xx46_devtype_data atmel_at93c46d_data = {
EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH,
};
+static const struct eeprom_93xx46_devtype_data microchip_93lc46b_data = {
+ .quirks = EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE,
+};
+
struct eeprom_93xx46_dev {
struct spi_device *spi;
struct eeprom_93xx46_platform_data *pdata;
@@ -58,6 +62,11 @@ static inline bool has_quirk_instruction_length(struct eeprom_93xx46_dev *edev)
return edev->pdata->quirks & EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH;
}
+static inline bool has_quirk_extra_read_cycle(struct eeprom_93xx46_dev *edev)
+{
+ return edev->pdata->quirks & EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE;
+}
+
static int eeprom_93xx46_read(void *priv, unsigned int off,
void *val, size_t count)
{
@@ -99,6 +108,11 @@ static int eeprom_93xx46_read(void *priv, unsigned int off,
dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n",
cmd_addr, edev->spi->max_speed_hz);
+ if (has_quirk_extra_read_cycle(edev)) {
+ cmd_addr <<= 1;
+ bits += 1;
+ }
+
spi_message_init(&m);
t[0].tx_buf = (char *)&cmd_addr;
@@ -366,6 +380,7 @@ static void select_deassert(void *context)
static const struct of_device_id eeprom_93xx46_of_table[] = {
{ .compatible = "eeprom-93xx46", },
{ .compatible = "atmel,at93c46d", .data = &atmel_at93c46d_data, },
+ { .compatible = "microchip,93lc46b", .data = &microchip_93lc46b_data, },
{}
};
MODULE_DEVICE_TABLE(of, eeprom_93xx46_of_table);
@@ -523,3 +538,4 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Driver for 93xx46 EEPROMs");
MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
MODULE_ALIAS("spi:93xx46");
+MODULE_ALIAS("spi:eeprom-93xx46");
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 8b01257783dd..49e08b6133f5 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -107,19 +107,20 @@
#include <asm/sections.h>
-#define v1printk(a...) do { \
- if (verbose) \
- printk(KERN_INFO a); \
- } while (0)
-#define v2printk(a...) do { \
- if (verbose > 1) \
- printk(KERN_INFO a); \
- touch_nmi_watchdog(); \
- } while (0)
-#define eprintk(a...) do { \
- printk(KERN_ERR a); \
- WARN_ON(1); \
- } while (0)
+#define v1printk(a...) do { \
+ if (verbose) \
+ printk(KERN_INFO a); \
+} while (0)
+#define v2printk(a...) do { \
+ if (verbose > 1) { \
+ printk(KERN_INFO a); \
+ } \
+ touch_nmi_watchdog(); \
+} while (0)
+#define eprintk(a...) do { \
+ printk(KERN_ERR a); \
+ WARN_ON(1); \
+} while (0)
#define MAX_CONFIG_LEN 40
static struct kgdb_io kgdbts_io_ops;
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index e9bb1cfa6a7a..21ac34b38395 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -221,7 +221,7 @@ static int lis3_3dc_rates[16] = {0, 1, 10, 25, 50, 100, 200, 400, 1600, 5000};
static int lis3_3dlh_rates[4] = {50, 100, 400, 1000};
/* ODR is Output Data Rate */
-static int lis3lv02d_get_odr(struct lis3lv02d *lis3)
+static int lis3lv02d_get_odr_index(struct lis3lv02d *lis3)
{
u8 ctrl;
int shift;
@@ -229,15 +229,23 @@ static int lis3lv02d_get_odr(struct lis3lv02d *lis3)
lis3->read(lis3, CTRL_REG1, &ctrl);
ctrl &= lis3->odr_mask;
shift = ffs(lis3->odr_mask) - 1;
- return lis3->odrs[(ctrl >> shift)];
+ return (ctrl >> shift);
}
static int lis3lv02d_get_pwron_wait(struct lis3lv02d *lis3)
{
- int div = lis3lv02d_get_odr(lis3);
+ int odr_idx = lis3lv02d_get_odr_index(lis3);
+ int div = lis3->odrs[odr_idx];
- if (WARN_ONCE(div == 0, "device returned spurious data"))
+ if (div == 0) {
+ if (odr_idx == 0) {
+ /* Power-down mode, not sampling no need to sleep */
+ return 0;
+ }
+
+ dev_err(&lis3->pdev->dev, "Error unknown odrs-index: %d\n", odr_idx);
return -ENXIO;
+ }
/* LIS3 power on delay is quite long */
msleep(lis3->pwron_delay / div);
@@ -820,9 +828,12 @@ static ssize_t lis3lv02d_rate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lis3lv02d *lis3 = dev_get_drvdata(dev);
+ int odr_idx;
lis3lv02d_sysfs_poweron(lis3);
- return sprintf(buf, "%d\n", lis3lv02d_get_odr(lis3));
+
+ odr_idx = lis3lv02d_get_odr_index(lis3);
+ return sprintf(buf, "%d\n", lis3->odrs[odr_idx]);
}
static ssize_t lis3lv02d_rate_set(struct device *dev,
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
index c439c827eea8..0ef759671b54 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.h
+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
@@ -284,6 +284,7 @@ struct lis3lv02d {
int regs_size;
u8 *reg_cache;
bool regs_stored;
+ bool init_required;
u8 odr_mask; /* ODR bit mask */
u8 whoami; /* indicates measurement precision */
s16 (*read_data) (struct lis3lv02d *lis3, int reg);
diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile
index cce47a15a79f..aeb960cb096d 100644
--- a/drivers/misc/lkdtm/Makefile
+++ b/drivers/misc/lkdtm/Makefile
@@ -13,7 +13,7 @@ KCOV_INSTRUMENT_rodata.o := n
OBJCOPYFLAGS :=
OBJCOPYFLAGS_rodata_objcopy.o := \
- --rename-section .text=.rodata,alloc,readonly,load
+ --rename-section .noinstr.text=.rodata,alloc,readonly,load
targets += rodata.o rodata_objcopy.o
$(obj)/rodata_objcopy.o: $(obj)/rodata.o FORCE
$(call if_changed,objcopy)
diff --git a/drivers/misc/lkdtm/rodata.c b/drivers/misc/lkdtm/rodata.c
index 58d180af72cf..baacb876d1d9 100644
--- a/drivers/misc/lkdtm/rodata.c
+++ b/drivers/misc/lkdtm/rodata.c
@@ -5,7 +5,7 @@
*/
#include "lkdtm.h"
-void notrace lkdtm_rodata_do_nothing(void)
+void noinstr lkdtm_rodata_do_nothing(void)
{
/* Does nothing. We just want an architecture agnostic "return". */
}
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index bb2e1387b119..57b5868c9e8b 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -755,9 +755,8 @@ static int mei_cl_device_remove(struct device *dev)
mei_cl_bus_module_put(cldev);
module_put(THIS_MODULE);
- dev->driver = NULL;
- return ret;
+ return ret;
}
static ssize_t name_show(struct device *dev, struct device_attribute *a,
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index ebdcf0b450e2..524b8c0d371b 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -276,6 +276,7 @@ void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
down_write(&dev->me_clients_rwsem);
me_cl = __mei_me_cl_by_uuid(dev, uuid);
__mei_me_cl_del(dev, me_cl);
+ mei_me_cl_put(me_cl);
up_write(&dev->me_clients_rwsem);
}
@@ -297,6 +298,7 @@ void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
down_write(&dev->me_clients_rwsem);
me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
__mei_me_cl_del(dev, me_cl);
+ mei_me_cl_put(me_cl);
up_write(&dev->me_clients_rwsem);
}
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 64e318f589b4..0d23efcc74ff 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -138,11 +138,11 @@ static inline u8 mei_cl_me_id(const struct mei_cl *cl)
*
* @cl: host client
*
- * Return: mtu
+ * Return: mtu or 0 if client is not connected
*/
static inline size_t mei_cl_mtu(const struct mei_cl *cl)
{
- return cl->me_cl->props.max_msg_length;
+ return cl->me_cl ? cl->me_cl->props.max_msg_length : 0;
}
/**
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 5a661cbdf2ae..66f4d12d0060 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -224,6 +224,9 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
return ret;
}
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_request_autosuspend(dev->dev);
+
list_move_tail(&cb->list, &cl->rd_pending);
return 0;
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index 0e4193cb08cf..e1f59b17715d 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -1403,6 +1403,8 @@ retry:
NULL);
up_write(&mm->mmap_sem);
if (nr_pages != pinned_pages->nr_pages) {
+ if (pinned_pages->nr_pages < 0)
+ pinned_pages->nr_pages = 0;
if (try_upgrade) {
if (ulimit)
__scif_dec_pinned_vm_lock(mm,
@@ -1423,7 +1425,6 @@ retry:
if (pinned_pages->nr_pages < nr_pages) {
err = -EFAULT;
- pinned_pages->nr_pages = nr_pages;
goto dec_pinned;
}
@@ -1436,7 +1437,6 @@ dec_pinned:
__scif_dec_pinned_vm_lock(mm, nr_pages, 0);
/* Something went wrong! Rollback */
error_unmap:
- pinned_pages->nr_pages = nr_pages;
scif_destroy_pinned_pages(pinned_pages);
*pages = NULL;
dev_dbg(scif_info.mdev.this_device,
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
index de7f035a176d..f4332a97c691 100644
--- a/drivers/misc/mic/vop/vop_main.c
+++ b/drivers/misc/mic/vop/vop_main.c
@@ -301,7 +301,7 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev,
/* First assign the vring's allocated in host memory */
vqconfig = _vop_vq_config(vdev->desc) + index;
memcpy_fromio(&config, vqconfig, sizeof(config));
- _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN);
+ _vr_size = round_up(vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN), 4);
vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
va = vpdev->hw_ops->ioremap(vpdev, le64_to_cpu(config.address),
vr_size);
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
index cbc8ebcff5cf..a252c2199b93 100644
--- a/drivers/misc/mic/vop/vop_vringh.c
+++ b/drivers/misc/mic/vop/vop_vringh.c
@@ -308,7 +308,7 @@ static int vop_virtio_add_device(struct vop_vdev *vdev,
num = le16_to_cpu(vqconfig[i].num);
mutex_init(&vvr->vr_mutex);
- vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) +
+ vr_size = PAGE_ALIGN(round_up(vring_size(num, MIC_VIRTIO_RING_ALIGN), 4) +
sizeof(struct _mic_vring_info));
vr->va = (void *)
__get_free_pages(GFP_KERNEL | __GFP_ZERO,
@@ -320,7 +320,7 @@ static int vop_virtio_add_device(struct vop_vdev *vdev,
goto err;
}
vr->len = vr_size;
- vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN);
+ vr->info = vr->va + round_up(vring_size(num, MIC_VIRTIO_RING_ALIGN), 4);
vr->info->magic = cpu_to_le32(MIC_MAGIC + vdev->virtio_id + i);
vr_addr = dma_map_single(&vpdev->dev, vr->va, vr_size,
DMA_BIDIRECTIONAL);
@@ -611,6 +611,7 @@ static int vop_virtio_copy_from_user(struct vop_vdev *vdev, void __user *ubuf,
size_t partlen;
bool dma = VOP_USE_DMA;
int err = 0;
+ size_t offset = 0;
if (daddr & (dma_alignment - 1)) {
vdev->tx_dst_unaligned += len;
@@ -659,13 +660,20 @@ memcpy:
* We are copying to IO below and should ideally use something
* like copy_from_user_toio(..) if it existed.
*/
- if (copy_from_user((void __force *)dbuf, ubuf, len)) {
- err = -EFAULT;
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, err);
- goto err;
+ while (len) {
+ partlen = min_t(size_t, len, VOP_INT_DMA_BUF_SIZE);
+
+ if (copy_from_user(vvr->buf, ubuf + offset, partlen)) {
+ err = -EFAULT;
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, err);
+ goto err;
+ }
+ memcpy_toio(dbuf + offset, vvr->buf, partlen);
+ offset += partlen;
+ vdev->out_bytes += partlen;
+ len -= partlen;
}
- vdev->out_bytes += len;
err = 0;
err:
vpdev->hw_ops->iounmap(vpdev, dbuf);
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
index 540845651b8c..309703e9c42e 100644
--- a/drivers/misc/pch_phub.c
+++ b/drivers/misc/pch_phub.c
@@ -64,7 +64,6 @@
#define CLKCFG_UARTCLKSEL (1 << 18)
/* Macros for ML7213 */
-#define PCI_VENDOR_ID_ROHM 0x10db
#define PCI_DEVICE_ID_ROHM_ML7213_PHUB 0x801A
/* Macros for ML7223 */
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index 727dc6ec427d..7d166f57f624 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -75,6 +75,11 @@
#define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
#define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
+#define PCI_DEVICE_ID_TI_AM654 0xb00c
+
+#define is_am654_pci_dev(pdev) \
+ ((pdev)->device == PCI_DEVICE_ID_TI_AM654)
+
static DEFINE_IDA(pci_endpoint_test_ida);
#define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
@@ -593,6 +598,7 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
int ret = -EINVAL;
enum pci_barno bar;
struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
+ struct pci_dev *pdev = test->pdev;
mutex_lock(&test->mutex);
switch (cmd) {
@@ -600,6 +606,8 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
bar = arg;
if (bar < 0 || bar > 5)
goto ret;
+ if (is_am654_pci_dev(pdev) && bar == BAR_0)
+ goto ret;
ret = pci_endpoint_test_bar(test, bar);
break;
case PCITEST_LEGACY_IRQ:
@@ -792,10 +800,20 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static const struct pci_endpoint_test_data am654_data = {
+ .test_reg_bar = BAR_2,
+ .alignment = SZ_64K,
+ .irq_type = IRQ_TYPE_MSI,
+};
+
static const struct pci_device_id pci_endpoint_test_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) },
- { PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0xedda) },
+ { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0) },
+ { PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
+ .driver_data = (kernel_ulong_t)&am654_data
+ },
{ }
};
MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c
index bc089e634a75..26e20b091160 100644
--- a/drivers/misc/vmw_vmci/vmci_context.c
+++ b/drivers/misc/vmw_vmci/vmci_context.c
@@ -751,7 +751,7 @@ static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context,
return VMCI_ERROR_MORE_DATA;
}
- dbells = kmalloc(data_size, GFP_ATOMIC);
+ dbells = kzalloc(data_size, GFP_ATOMIC);
if (!dbells)
return VMCI_ERROR_NO_MEM;
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
index f005206d9033..4581210349d2 100644
--- a/drivers/misc/vmw_vmci/vmci_doorbell.c
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
@@ -334,7 +334,7 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn)
{
int result;
- struct vmci_notify_bm_set_msg bitmap_set_msg;
+ struct vmci_notify_bm_set_msg bitmap_set_msg = { };
bitmap_set_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
VMCI_SET_NOTIFY_BITMAP);
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index dad5abee656e..dd20ea4ad0c4 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -169,7 +169,7 @@ static int vmci_check_host_caps(struct pci_dev *pdev)
VMCI_UTIL_NUM_RESOURCES * sizeof(u32);
struct vmci_datagram *check_msg;
- check_msg = kmalloc(msg_size, GFP_KERNEL);
+ check_msg = kzalloc(msg_size, GFP_KERNEL);
if (!check_msg) {
dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__);
return -ENOMEM;
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index bd52f29b4a4e..9bc97d6a651d 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -552,6 +552,9 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size)
queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page);
+ if (queue_size + queue_page_size > KMALLOC_MAX_SIZE)
+ return NULL;
+
queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
if (queue) {
queue->q_header = NULL;
@@ -645,7 +648,7 @@ static void qp_release_pages(struct page **pages,
for (i = 0; i < num_pages; i++) {
if (dirty)
- set_page_dirty(pages[i]);
+ set_page_dirty_lock(pages[i]);
put_page(pages[i]);
pages[i] = NULL;
@@ -671,8 +674,9 @@ static int qp_host_get_user_memory(u64 produce_uva,
if (retval < (int)produce_q->kernel_if->num_pages) {
pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
retval);
- qp_release_pages(produce_q->kernel_if->u.h.header_page,
- retval, false);
+ if (retval > 0)
+ qp_release_pages(produce_q->kernel_if->u.h.header_page,
+ retval, false);
err = VMCI_ERROR_NO_MEM;
goto out;
}
@@ -683,8 +687,9 @@ static int qp_host_get_user_memory(u64 produce_uva,
if (retval < (int)consume_q->kernel_if->num_pages) {
pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
retval);
- qp_release_pages(consume_q->kernel_if->u.h.header_page,
- retval, false);
+ if (retval > 0)
+ qp_release_pages(consume_q->kernel_if->u.h.header_page,
+ retval, false);
qp_release_pages(produce_q->kernel_if->u.h.header_page,
produce_q->kernel_if->num_pages, false);
err = VMCI_ERROR_NO_MEM;
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 60eac66dc9f0..c2c45c148ae7 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -623,6 +623,18 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
}
/*
+ * Make sure to update CACHE_CTRL in case it was changed. The cache
+ * will get turned back on if the card is re-initialized, e.g.
+ * suspend/resume or hw reset in recovery.
+ */
+ if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_CACHE_CTRL) &&
+ (cmd.opcode == MMC_SWITCH)) {
+ u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg) & 1;
+
+ card->ext_csd.cache_ctrl = value;
+ }
+
+ /*
* According to the SD specs, some commands require a delay after
* issuing the command.
*/
@@ -631,7 +643,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
- if (idata->rpmb || (cmd.flags & MMC_RSP_R1B)) {
+ if (idata->rpmb || (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
/*
* Ensure RPMB/R1B command has completed by polling CMD13
* "Send Status".
@@ -1424,6 +1436,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
struct mmc_request *mrq = &mqrq->brq.mrq;
struct request_queue *q = req->q;
struct mmc_host *host = mq->card->host;
+ enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
unsigned long flags;
bool put_card;
int err;
@@ -1453,7 +1466,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
spin_lock_irqsave(q->queue_lock, flags);
- mq->in_flight[mmc_issue_type(mq, req)] -= 1;
+ mq->in_flight[issue_type] -= 1;
put_card = (mmc_tot_in_flight(mq) == 0);
@@ -2223,6 +2236,10 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
case MMC_ISSUE_ASYNC:
switch (req_op(req)) {
case REQ_OP_FLUSH:
+ if (!mmc_cache_enabled(host)) {
+ blk_mq_end_request(req, BLK_STS_OK);
+ return MMC_REQ_FINISHED;
+ }
ret = mmc_blk_cqe_issue_flush(mq, req);
break;
case REQ_OP_READ:
@@ -2484,8 +2501,8 @@ static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp)
struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
struct mmc_rpmb_data, chrdev);
- put_device(&rpmb->dev);
mmc_blk_put(rpmb->md);
+ put_device(&rpmb->dev);
return 0;
}
@@ -2670,7 +2687,7 @@ static int mmc_add_disk(struct mmc_blk_data *md)
int ret;
struct mmc_card *card = md->queue.card;
- device_add_disk(md->parent, md->disk);
+ device_add_disk(md->parent, md->disk, NULL);
md->force_ro.show = force_ro_show;
md->force_ro.store = force_ro_store;
sysfs_attr_init(&md->force_ro.attr);
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index fc92c6c1c9a4..941f39ca4eb7 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -376,11 +376,6 @@ void mmc_remove_card(struct mmc_card *card)
mmc_remove_card_debugfs(card);
#endif
- if (host->cqe_enabled) {
- host->cqe_ops->cqe_disable(host);
- host->cqe_enabled = false;
- }
-
if (mmc_card_present(card)) {
if (mmc_host_is_spi(card->host)) {
pr_info("%s: SPI card removed\n",
@@ -393,6 +388,10 @@ void mmc_remove_card(struct mmc_card *card)
of_node_put(card->dev.of_node);
}
+ if (host->cqe_enabled) {
+ host->cqe_ops->cqe_disable(host);
+ host->cqe_enabled = false;
+ }
+
put_device(&card->dev);
}
-
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 56f7f3600469..798bcb65b4df 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1537,7 +1537,7 @@ int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err)
- return err;
+ goto power_cycle;
if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
return -EIO;
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 087ba68b2920..3071b644e94a 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -32,6 +32,7 @@ struct mmc_bus_ops {
int (*shutdown)(struct mmc_host *);
int (*hw_reset)(struct mmc_host *);
int (*sw_reset)(struct mmc_host *);
+ bool (*cache_enabled)(struct mmc_host *);
};
void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
@@ -173,4 +174,12 @@ static inline void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
host->ops->post_req(host, mrq, err);
}
+static inline bool mmc_cache_enabled(struct mmc_host *host)
+{
+ if (host->bus_ops->cache_enabled)
+ return host->bus_ops->cache_enabled(host);
+
+ return false;
+}
+
#endif
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 5ca53e225382..d9202f2726d1 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -300,7 +300,7 @@ static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd)
}
}
-static void mmc_part_add(struct mmc_card *card, unsigned int size,
+static void mmc_part_add(struct mmc_card *card, u64 size,
unsigned int part_cfg, char *name, int idx, bool ro,
int area_type)
{
@@ -316,7 +316,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
{
int idx;
u8 hc_erase_grp_sz, hc_wp_grp_sz;
- unsigned int part_size;
+ u64 part_size;
/*
* General purpose partition feature support --
@@ -346,8 +346,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
(ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
<< 8) +
ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
- part_size *= (size_t)(hc_erase_grp_sz *
- hc_wp_grp_sz);
+ part_size *= (hc_erase_grp_sz * hc_wp_grp_sz);
mmc_part_add(card, part_size << 19,
EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
"gp%d", idx, false,
@@ -365,7 +364,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
{
int err = 0, idx;
- unsigned int part_size;
+ u64 part_size;
struct device_node *np;
bool broken_hpi = false;
@@ -427,10 +426,6 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
/* EXT_CSD value is in units of 10ms, but we store in ms */
card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
- /* Some eMMC set the value too low so set a minimum */
- if (card->ext_csd.part_time &&
- card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
- card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
/* Sleep / awake timeout in 100ns units */
if (sa_shift > 0 && sa_shift <= 0x17)
@@ -620,6 +615,17 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
card->ext_csd.data_sector_size = 512;
}
+ /*
+ * GENERIC_CMD6_TIME is to be used "unless a specific timeout is defined
+ * when accessing a specific field", so use it here if there is no
+ * PARTITION_SWITCH_TIME.
+ */
+ if (!card->ext_csd.part_time)
+ card->ext_csd.part_time = card->ext_csd.generic_cmd6_time;
+ /* Some eMMC set the value too low so set a minimum */
+ if (card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
+ card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
+
/* eMMC v5 or later */
if (card->ext_csd.rev >= 7) {
memcpy(card->ext_csd.fwrev, &ext_csd[EXT_CSD_FIRMWARE_VERSION],
@@ -2003,6 +2009,12 @@ static void mmc_detect(struct mmc_host *host)
}
}
+static bool _mmc_cache_enabled(struct mmc_host *host)
+{
+ return host->card->ext_csd.cache_size > 0 &&
+ host->card->ext_csd.cache_ctrl & 1;
+}
+
static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
{
int err = 0;
@@ -2187,6 +2199,7 @@ static const struct mmc_bus_ops mmc_ops = {
.alive = mmc_alive,
.shutdown = mmc_shutdown,
.hw_reset = _mmc_hw_reset,
+ .cache_enabled = _mmc_cache_enabled,
};
/*
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 693b99eff74b..334678707deb 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -1014,9 +1014,7 @@ int mmc_flush_cache(struct mmc_card *card)
{
int err = 0;
- if (mmc_card_mmc(card) &&
- (card->ext_csd.cache_size > 0) &&
- (card->ext_csd.cache_ctrl & 1)) {
+ if (mmc_cache_enabled(card->host)) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_FLUSH_CACHE, 1, 0);
if (err)
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index becc6594a8a4..9aaf5a2d83c3 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -108,11 +108,10 @@ static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
case MMC_ISSUE_DCMD:
if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
if (recovery_needed)
- __mmc_cqe_recovery_notifier(mq);
+ mmc_cqe_recovery_notifier(mrq);
return BLK_EH_RESET_TIMER;
}
- /* No timeout (XXX: huh? comment doesn't make much sense) */
- blk_mq_complete_request(req);
+ /* The request has gone already */
return BLK_EH_DONE;
default:
/* Timeout is handled by mmc core */
@@ -126,18 +125,13 @@ static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
struct request_queue *q = req->q;
struct mmc_queue *mq = q->queuedata;
unsigned long flags;
- int ret;
+ bool ignore_tout;
spin_lock_irqsave(q->queue_lock, flags);
-
- if (mq->recovery_needed || !mq->use_cqe)
- ret = BLK_EH_RESET_TIMER;
- else
- ret = mmc_cqe_timed_out(req);
-
+ ignore_tout = mq->recovery_needed || !mq->use_cqe;
spin_unlock_irqrestore(q->queue_lock, flags);
- return ret;
+ return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req);
}
static void mmc_mq_recovery_handler(struct work_struct *work)
@@ -191,7 +185,7 @@ static void mmc_queue_setup_discard(struct request_queue *q,
q->limits.discard_granularity = card->pref_erase << 9;
/* granularity must not be greater than max. discard */
if (card->pref_erase > max_discard)
- q->limits.discard_granularity = 0;
+ q->limits.discard_granularity = SECTOR_SIZE;
if (mmc_can_secure_erase_trim(card))
blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
}
@@ -370,8 +364,10 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
min(host->max_blk_count, host->max_req_size / 512));
blk_queue_max_segments(mq->queue, host->max_segs);
- if (mmc_card_mmc(card))
+ if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) {
block_size = card->ext_csd.data_sector_size;
+ WARN_ON(block_size != 512 && block_size != 4096);
+ }
blk_queue_logical_block_size(mq->queue, block_size);
blk_queue_max_segment_size(mq->queue,
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 04738359ec02..aa3de584b90c 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -138,6 +138,9 @@ static int mmc_decode_csd(struct mmc_card *card)
csd->erase_size = UNSTUFF_BITS(resp, 39, 7) + 1;
csd->erase_size <<= csd->write_blkbits - 9;
}
+
+ if (UNSTUFF_BITS(resp, 13, 1))
+ mmc_card_set_readonly(card);
break;
case 1:
/*
@@ -172,6 +175,9 @@ static int mmc_decode_csd(struct mmc_card *card)
csd->write_blkbits = 9;
csd->write_partial = 0;
csd->erase_size = 1;
+
+ if (UNSTUFF_BITS(resp, 13, 1))
+ mmc_card_set_readonly(card);
break;
default:
pr_err("%s: unrecognised CSD structure version %d\n",
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 0aa99694b937..4e72ad24322f 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -720,9 +720,8 @@ try_again:
/* Retry init sequence, but without R4_18V_PRESENT. */
retries = 0;
goto try_again;
- } else {
- goto remove;
}
+ return err;
}
/*
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index f8c372839d24..dca72444b312 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -24,12 +24,17 @@
#include "sdio_cis.h"
#include "sdio_ops.h"
+#define SDIO_READ_CIS_TIMEOUT_MS (10 * 1000) /* 10s */
+
static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
const unsigned char *buf, unsigned size)
{
unsigned i, nr_strings;
char **buffer, *string;
+ if (size < 2)
+ return 0;
+
/* Find all null-terminated (including zero length) strings in
the TPLLV1_INFO field. Trailing garbage is ignored. */
buf += 2;
@@ -267,6 +272,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
do {
unsigned char tpl_code, tpl_link;
+ unsigned long timeout = jiffies +
+ msecs_to_jiffies(SDIO_READ_CIS_TIMEOUT_MS);
ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code);
if (ret)
@@ -319,6 +326,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
prev = &this->next;
if (ret == -ENOENT) {
+ if (time_after(jiffies, timeout))
+ break;
/* warn about unknown tuples */
pr_warn_ratelimited("%s: queuing unknown"
" CIS tuple 0x%02x (%u bytes)\n",
diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c
index 28f5aaca505a..2c5a6e7aadc0 100644
--- a/drivers/mmc/host/cqhci.c
+++ b/drivers/mmc/host/cqhci.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include <linux/highmem.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
@@ -351,12 +352,16 @@ static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
/* CQHCI is idle and should halt immediately, so set a small timeout */
#define CQHCI_OFF_TIMEOUT 100
+static u32 cqhci_read_ctl(struct cqhci_host *cq_host)
+{
+ return cqhci_readl(cq_host, CQHCI_CTL);
+}
+
static void cqhci_off(struct mmc_host *mmc)
{
struct cqhci_host *cq_host = mmc->cqe_private;
- ktime_t timeout;
- bool timed_out;
u32 reg;
+ int err;
if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
return;
@@ -366,15 +371,9 @@ static void cqhci_off(struct mmc_host *mmc)
cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
- timeout = ktime_add_us(ktime_get(), CQHCI_OFF_TIMEOUT);
- while (1) {
- timed_out = ktime_compare(ktime_get(), timeout) > 0;
- reg = cqhci_readl(cq_host, CQHCI_CTL);
- if ((reg & CQHCI_HALT) || timed_out)
- break;
- }
-
- if (timed_out)
+ err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg,
+ reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT);
+ if (err < 0)
pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
else
pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
index f6c76be2be0d..27837a794e7b 100644
--- a/drivers/mmc/host/meson-mx-sdio.c
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -249,6 +249,9 @@ static void meson_mx_mmc_request_done(struct meson_mx_mmc_host *host)
mrq = host->mrq;
+ if (host->cmd->error)
+ meson_mx_mmc_soft_reset(host);
+
host->mrq = NULL;
host->cmd = NULL;
@@ -360,14 +363,6 @@ static void meson_mx_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
meson_mx_mmc_start_cmd(mmc, mrq->cmd);
}
-static int meson_mx_mmc_card_busy(struct mmc_host *mmc)
-{
- struct meson_mx_mmc_host *host = mmc_priv(mmc);
- u32 irqc = readl(host->base + MESON_MX_SDIO_IRQC);
-
- return !!(irqc & MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK);
-}
-
static void meson_mx_mmc_read_response(struct mmc_host *mmc,
struct mmc_command *cmd)
{
@@ -509,7 +504,6 @@ static void meson_mx_mmc_timeout(struct timer_list *t)
static struct mmc_host_ops meson_mx_mmc_ops = {
.request = meson_mx_mmc_request,
.set_ios = meson_mx_mmc_set_ios,
- .card_busy = meson_mx_mmc_card_busy,
.get_cd = mmc_gpio_get_cd,
.get_ro = mmc_gpio_get_ro,
};
@@ -573,7 +567,7 @@ static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host)
mmc->f_max = clk_round_rate(host->cfg_div_clk,
clk_get_rate(host->parent_clk));
- mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
+ mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY;
mmc->ops = &meson_mx_mmc_ops;
ret = mmc_of_parse(mmc);
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 9ecf86ba4bb0..967e47770af6 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -928,13 +928,13 @@ static void msdc_track_cmd_data(struct msdc_host *host,
static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq)
{
unsigned long flags;
- bool ret;
- ret = cancel_delayed_work(&host->req_timeout);
- if (!ret) {
- /* delay work already running */
- return;
- }
+ /*
+ * No need check the return value of cancel_delayed_work, as only ONE
+ * path will go here!
+ */
+ cancel_delayed_work(&host->req_timeout);
+
spin_lock_irqsave(&host->lock, flags);
host->mrq = NULL;
spin_unlock_irqrestore(&host->lock, flags);
@@ -952,7 +952,7 @@ static bool msdc_cmd_done(struct msdc_host *host, int events,
bool done = false;
bool sbc_error;
unsigned long flags;
- u32 *rsp = cmd->resp;
+ u32 *rsp;
if (mrq->sbc && cmd == mrq->cmd &&
(events & (MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR
@@ -973,6 +973,7 @@ static bool msdc_cmd_done(struct msdc_host *host, int events,
if (done)
return true;
+ rsp = cmd->resp;
sdr_clr_bits(host->base + MSDC_INTEN, cmd_ints_mask);
@@ -1154,7 +1155,7 @@ static void msdc_data_xfer_next(struct msdc_host *host,
static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
struct mmc_request *mrq, struct mmc_data *data)
{
- struct mmc_command *stop = data->stop;
+ struct mmc_command *stop;
unsigned long flags;
bool done;
unsigned int check_data = events &
@@ -1170,6 +1171,7 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
if (done)
return true;
+ stop = data->stop;
if (check_data || (stop && stop->error)) {
dev_dbg(host->dev, "DMA status: 0x%8X\n",
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index add1e70195ea..7125687faf76 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -659,7 +659,7 @@ static int mxs_mmc_probe(struct platform_device *pdev)
ret = mmc_of_parse(mmc);
if (ret)
- goto out_clk_disable;
+ goto out_free_dma;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 61f0faddfd88..e8ab582551f8 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -764,6 +764,7 @@ int renesas_sdhi_remove(struct platform_device *pdev)
tmio_mmc_host_remove(host);
renesas_sdhi_clk_disable(host);
+ tmio_mmc_host_free(host);
return 0;
}
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 382172fb3da8..6e2685c9e9cb 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -180,8 +180,8 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
mmc_get_dma_dir(data)))
goto force_pio;
- /* This DMAC cannot handle if buffer is not 8-bytes alignment */
- if (!IS_ALIGNED(sg_dma_address(sg), 8))
+ /* This DMAC cannot handle if buffer is not 128-bytes alignment */
+ if (!IS_ALIGNED(sg_dma_address(sg), 128))
goto force_pio_with_unmap;
if (data->flags & MMC_DATA_READ) {
@@ -222,15 +222,12 @@ static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg)
DTRAN_CTRL_DM_START);
}
-static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg)
+static bool renesas_sdhi_internal_dmac_complete(struct tmio_mmc_host *host)
{
- struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
enum dma_data_direction dir;
- spin_lock_irq(&host->lock);
-
if (!host->data)
- goto out;
+ return false;
if (host->data->flags & MMC_DATA_READ)
dir = DMA_FROM_DEVICE;
@@ -243,6 +240,17 @@ static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg)
if (dir == DMA_FROM_DEVICE)
clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
+ return true;
+}
+
+static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg)
+{
+ struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
+
+ spin_lock_irq(&host->lock);
+ if (!renesas_sdhi_internal_dmac_complete(host))
+ goto out;
+
tmio_mmc_do_data_irq(host);
out:
spin_unlock_irq(&host->lock);
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 57c1ec322e42..6cc187ce3a32 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -546,16 +546,55 @@ static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev,
(host->mmc->caps & MMC_CAP_1_8V_DDR))
host->mmc->caps2 = MMC_CAP2_HS400_1_8V;
+ /*
+ * There are two types of presets out in the wild:
+ * 1) Default/broken presets.
+ * These presets have two sets of problems:
+ * a) The clock divisor for SDR12, SDR25, and SDR50 is too small.
+ * This results in clock frequencies that are 2x higher than
+ * acceptable. i.e., SDR12 = 25 MHz, SDR25 = 50 MHz, SDR50 =
+ * 100 MHz.x
+ * b) The HS200 and HS400 driver strengths don't match.
+ * By default, the SDR104 preset register has a driver strength of
+ * A, but the (internal) HS400 preset register has a driver
+ * strength of B. As part of initializing HS400, HS200 tuning
+ * needs to be performed. Having different driver strengths
+ * between tuning and operation is wrong. It results in different
+ * rise/fall times that lead to incorrect sampling.
+ * 2) Firmware with properly initialized presets.
+ * These presets have proper clock divisors. i.e., SDR12 => 12MHz,
+ * SDR25 => 25 MHz, SDR50 => 50 MHz. Additionally the HS200 and
+ * HS400 preset driver strengths match.
+ *
+ * Enabling presets for HS400 doesn't work for the following reasons:
+ * 1) sdhci_set_ios has a hard coded list of timings that are used
+ * to determine if presets should be enabled.
+ * 2) sdhci_get_preset_value is using a non-standard register to
+ * read out HS400 presets. The AMD controller doesn't support this
+ * non-standard register. In fact, it doesn't expose the HS400
+ * preset register anywhere in the SDHCI memory map. This results
+ * in reading a garbage value and using the wrong presets.
+ *
+ * Since HS400 and HS200 presets must be identical, we could
+ * instead use the the SDR104 preset register.
+ *
+ * If the above issues are resolved we could remove this quirk for
+ * firmware that that has valid presets (i.e., SDR12 <= 12 MHz).
+ */
+ host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
+
host->mmc_host_ops.select_drive_strength = amd_select_drive_strength;
host->mmc_host_ops.set_ios = amd_set_ios;
return 0;
}
static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = {
- .chip = &sdhci_acpi_chip_amd,
- .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
- .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE |
- SDHCI_QUIRK_32BIT_ADMA_SIZE,
+ .chip = &sdhci_acpi_chip_amd,
+ .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
+ .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
+ SDHCI_QUIRK_32BIT_DMA_SIZE |
+ SDHCI_QUIRK_32BIT_ADMA_SIZE,
+ .quirks2 = SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
.probe_slot = sdhci_acpi_emmc_amd_probe_slot,
};
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 629860f7327c..5099353e6f13 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -82,7 +82,7 @@
#define ESDHC_STD_TUNING_EN (1 << 24)
/* NOTE: the minimum valid tuning start tap for mx6sl is 1 */
#define ESDHC_TUNING_START_TAP_DEFAULT 0x1
-#define ESDHC_TUNING_START_TAP_MASK 0xff
+#define ESDHC_TUNING_START_TAP_MASK 0x7f
#define ESDHC_TUNING_STEP_MASK 0x00070000
#define ESDHC_TUNING_STEP_SHIFT 16
@@ -1370,9 +1370,10 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
- int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
+ int dead;
pm_runtime_get_sync(&pdev->dev);
+ dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 17b2054d9b62..4970cd40813b 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1060,7 +1060,7 @@ static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable)
static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host = mmc_priv(mmc);
- int tuning_seq_cnt = 3;
+ int tuning_seq_cnt = 10;
u8 phase, tuned_phases[16], tuned_phase_cnt = 0;
int rc;
struct mmc_ios ios = host->mmc->ios;
@@ -1084,6 +1084,12 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
msm_host->use_cdr = true;
/*
+ * Clear tuning_done flag before tuning to ensure proper
+ * HS400 settings.
+ */
+ msm_host->tuning_done = 0;
+
+ /*
* For HS400 tuning in HS200 timing requires:
* - select MCLK/2 in VENDOR_SPEC
* - program MCLK to 400MHz (or nearest supported) in GCC
@@ -1118,6 +1124,22 @@ retry:
} while (++phase < ARRAY_SIZE(tuned_phases));
if (tuned_phase_cnt) {
+ if (tuned_phase_cnt == ARRAY_SIZE(tuned_phases)) {
+ /*
+ * All phases valid is _almost_ as bad as no phases
+ * valid. Probably all phases are not really reliable
+ * but we didn't detect where the unreliable place is.
+ * That means we'll essentially be guessing and hoping
+ * we get a good phase. Better to try a few times.
+ */
+ dev_dbg(mmc_dev(mmc), "%s: All phases valid; try again\n",
+ mmc_hostname(mmc));
+ if (--tuning_seq_cnt) {
+ tuned_phase_cnt = 0;
+ goto retry;
+ }
+ }
+
rc = msm_find_most_appropriate_phase(host, tuned_phases,
tuned_phase_cnt);
if (rc < 0)
@@ -1700,7 +1722,9 @@ static const struct sdhci_ops sdhci_msm_ops = {
static const struct sdhci_pltfm_data sdhci_msm_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
SDHCI_QUIRK_SINGLE_POWER_WRITE |
- SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
+ SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
.ops = &sdhci_msm_ops,
};
@@ -1909,6 +1933,8 @@ static int sdhci_msm_probe(struct platform_device *pdev)
goto clk_disable;
}
+ msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
+
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index 1b7cd144fb01..a34b17b284a5 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -28,6 +28,7 @@ static const struct sdhci_ops sdhci_dwcmshc_ops = {
static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = {
.ops = &sdhci_dwcmshc_ops,
.quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
};
static int dwcmshc_probe(struct platform_device *pdev)
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 65985dc3e1a7..cb4a0458f098 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -465,6 +465,7 @@ struct intel_host {
int drv_strength;
bool d3_retune;
bool rpm_retune_ok;
+ bool needs_pwr_off;
u32 glk_rx_ctrl1;
u32 glk_tun_val;
};
@@ -556,6 +557,9 @@ static int intel_select_drive_strength(struct mmc_card *card,
struct sdhci_pci_slot *slot = sdhci_priv(host);
struct intel_host *intel_host = sdhci_pci_priv(slot);
+ if (!(mmc_driver_type_mask(intel_host->drv_strength) & card_drv))
+ return 0;
+
return intel_host->drv_strength;
}
@@ -587,9 +591,25 @@ out:
static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
unsigned short vdd)
{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct intel_host *intel_host = sdhci_pci_priv(slot);
int cntr;
u8 reg;
+ /*
+ * Bus power may control card power, but a full reset still may not
+ * reset the power, whereas a direct write to SDHCI_POWER_CONTROL can.
+ * That might be needed to initialize correctly, if the card was left
+ * powered on previously.
+ */
+ if (intel_host->needs_pwr_off) {
+ intel_host->needs_pwr_off = false;
+ if (mode != MMC_POWER_OFF) {
+ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+ usleep_range(10000, 12500);
+ }
+ }
+
sdhci_set_power(host, mode, vdd);
if (mode == MMC_POWER_OFF)
@@ -736,7 +756,8 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
static bool glk_broken_cqhci(struct sdhci_pci_slot *slot)
{
return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
- dmi_match(DMI_BIOS_VENDOR, "LENOVO");
+ (dmi_match(DMI_BIOS_VENDOR, "LENOVO") ||
+ dmi_match(DMI_SYS_VENDOR, "IRBIS"));
}
static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
@@ -922,6 +943,14 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
return 0;
}
+static void byt_needs_pwr_off(struct sdhci_pci_slot *slot)
+{
+ struct intel_host *intel_host = sdhci_pci_priv(slot);
+ u8 reg = sdhci_readb(slot->host, SDHCI_POWER_CONTROL);
+
+ intel_host->needs_pwr_off = reg & SDHCI_POWER_ON;
+}
+
static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
{
byt_probe_slot(slot);
@@ -939,6 +968,8 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
slot->chip->pdev->subsystem_device == PCI_SUBDEVICE_ID_NI_78E3)
slot->host->mmc->caps2 |= MMC_CAP2_AVOID_3_3V;
+ byt_needs_pwr_off(slot);
+
return 0;
}
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index a0b5089b3274..ca34fa424634 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -170,7 +170,12 @@ static void xenon_reset_exit(struct sdhci_host *host,
/* Disable tuning request and auto-retuning again */
xenon_retune_setup(host);
- xenon_set_acg(host, true);
+ /*
+ * The ACG should be turned off at the early init time, in order
+ * to solve a possible issues with the 1.8V regulator stabilization.
+ * The feature is enabled in later stage.
+ */
+ xenon_set_acg(host, false);
xenon_set_sdclk_off_idle(host, sdhc_id, false);
@@ -238,6 +243,16 @@ static void xenon_voltage_switch(struct sdhci_host *host)
{
/* Wait for 5ms after set 1.8V signal enable bit */
usleep_range(5000, 5500);
+
+ /*
+ * For some reason the controller's Host Control2 register reports
+ * the bit representing 1.8V signaling as 0 when read after it was
+ * written as 1. Subsequent read reports 1.
+ *
+ * Since this may cause some issues, do an empty read of the Host
+ * Control2 register here to circumvent this.
+ */
+ sdhci_readw(host, SDHCI_HOST_CONTROL2);
}
static const struct sdhci_ops sdhci_xenon_ops = {
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 5a7fd89a8f2b..499a3d2a8e31 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -133,7 +133,7 @@ static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
u32 present;
if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
- !mmc_card_is_removable(host->mmc))
+ !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc))
return;
if (enable) {
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index cdfeb15b6f05..ef3aa8b52078 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -1866,10 +1866,12 @@ static int usdhi6_probe(struct platform_device *pdev)
ret = mmc_add_host(mmc);
if (ret < 0)
- goto e_clk_off;
+ goto e_release_dma;
return 0;
+e_release_dma:
+ usdhi6_dma_release(host);
e_clk_off:
clk_disable_unprepare(host->clk);
e_free_mmc:
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 32c4211506fc..9fdb92729c28 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -323,6 +323,8 @@ struct via_crdr_mmc_host {
/* some devices need a very long delay for power to stabilize */
#define VIA_CRDR_QUIRK_300MS_PWRDELAY 0x0001
+#define VIA_CMD_TIMEOUT_MS 1000
+
static const struct pci_device_id via_ids[] = {
{PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_9530,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,},
@@ -555,14 +557,17 @@ static void via_sdc_send_command(struct via_crdr_mmc_host *host,
{
void __iomem *addrbase;
struct mmc_data *data;
+ unsigned int timeout_ms;
u32 cmdctrl = 0;
WARN_ON(host->cmd);
data = cmd->data;
- mod_timer(&host->timer, jiffies + HZ);
host->cmd = cmd;
+ timeout_ms = cmd->busy_timeout ? cmd->busy_timeout : VIA_CMD_TIMEOUT_MS;
+ mod_timer(&host->timer, jiffies + msecs_to_jiffies(timeout_ms));
+
/*Command index*/
cmdctrl = cmd->opcode << 8;
@@ -1268,11 +1273,14 @@ static void via_init_sdc_pm(struct via_crdr_mmc_host *host)
static int via_sd_suspend(struct pci_dev *pcidev, pm_message_t state)
{
struct via_crdr_mmc_host *host;
+ unsigned long flags;
host = pci_get_drvdata(pcidev);
+ spin_lock_irqsave(&host->lock, flags);
via_save_pcictrlreg(host);
via_save_sdcreg(host);
+ spin_unlock_irqrestore(&host->lock, flags);
pci_save_state(pcidev);
pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index ba44ea6d497e..3ab75d3e2ce3 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -727,7 +727,6 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
kfree(mtd->eraseregions);
kfree(mtd);
kfree(cfi->cmdset_priv);
- kfree(cfi->cfiq);
return NULL;
}
@@ -1882,7 +1881,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
continue;
}
- if (time_after(jiffies, timeo) && !chip_ready(map, adr))
+ /*
+ * We check "time_after" and "!chip_good" before checking "chip_good" to avoid
+ * the failure due to scheduling.
+ */
+ if (time_after(jiffies, timeo) && !chip_good(map, adr, datum))
break;
if (chip_good(map, adr, datum)) {
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index 3ea44cff9b75..c24a26fbbffb 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -231,12 +231,41 @@ static int mtdpart_setup_real(char *s)
struct cmdline_mtd_partition *this_mtd;
struct mtd_partition *parts;
int mtd_id_len, num_parts;
- char *p, *mtd_id;
+ char *p, *mtd_id, *semicol, *open_parenth;
+
+ /*
+ * Replace the first ';' by a NULL char so strrchr can work
+ * properly.
+ */
+ semicol = strchr(s, ';');
+ if (semicol)
+ *semicol = '\0';
+
+ /*
+ * make sure that part-names with ":" will not be handled as
+ * part of the mtd-id with an ":"
+ */
+ open_parenth = strchr(s, '(');
+ if (open_parenth)
+ *open_parenth = '\0';
mtd_id = s;
- /* fetch <mtd-id> */
- p = strchr(s, ':');
+ /*
+ * fetch <mtd-id>. We use strrchr to ignore all ':' that could
+ * be present in the MTD name, only the last one is interpreted
+ * as an <mtd-id>/<part-definition> separator.
+ */
+ p = strrchr(s, ':');
+
+ /* Restore the '(' now. */
+ if (open_parenth)
+ *open_parenth = '(';
+
+ /* Restore the ';' now. */
+ if (semicol)
+ *semicol = ';';
+
if (!p) {
pr_err("no mtd-id\n");
return -EINVAL;
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 9ee04b5f9311..5a04ff638688 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -240,22 +240,25 @@ static int phram_setup(const char *val)
ret = parse_num64(&start, token[1]);
if (ret) {
- kfree(name);
parse_err("illegal start address\n");
+ goto error;
}
ret = parse_num64(&len, token[2]);
if (ret) {
- kfree(name);
parse_err("illegal device length\n");
+ goto error;
}
ret = register_device(name, start, len);
- if (!ret)
- pr_info("%s device: %#llx at %#llx\n", name, len, start);
- else
- kfree(name);
+ if (ret)
+ goto error;
+
+ pr_info("%s device: %#llx at %#llx\n", name, len, start);
+ return 0;
+error:
+ kfree(name);
return ret;
}
diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c
index c950c880ad59..90e6cb64db69 100644
--- a/drivers/mtd/lpddr/lpddr2_nvm.c
+++ b/drivers/mtd/lpddr/lpddr2_nvm.c
@@ -402,6 +402,17 @@ static int lpddr2_nvm_lock(struct mtd_info *mtd, loff_t start_add,
return lpddr2_nvm_do_block_op(mtd, start_add, len, LPDDR2_NVM_LOCK);
}
+static const struct mtd_info lpddr2_nvm_mtd_info = {
+ .type = MTD_RAM,
+ .writesize = 1,
+ .flags = (MTD_CAP_NVRAM | MTD_POWERUP_LOCK),
+ ._read = lpddr2_nvm_read,
+ ._write = lpddr2_nvm_write,
+ ._erase = lpddr2_nvm_erase,
+ ._unlock = lpddr2_nvm_unlock,
+ ._lock = lpddr2_nvm_lock,
+};
+
/*
* lpddr2_nvm driver probe method
*/
@@ -442,6 +453,7 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
.pfow_base = OW_BASE_ADDRESS,
.fldrv_priv = pcm_data,
};
+
if (IS_ERR(map->virt))
return PTR_ERR(map->virt);
@@ -453,22 +465,13 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
return PTR_ERR(pcm_data->ctl_regs);
/* Populate mtd_info data structure */
- *mtd = (struct mtd_info) {
- .dev = { .parent = &pdev->dev },
- .name = pdev->dev.init_name,
- .type = MTD_RAM,
- .priv = map,
- .size = resource_size(add_range),
- .erasesize = ERASE_BLOCKSIZE * pcm_data->bus_width,
- .writesize = 1,
- .writebufsize = WRITE_BUFFSIZE * pcm_data->bus_width,
- .flags = (MTD_CAP_NVRAM | MTD_POWERUP_LOCK),
- ._read = lpddr2_nvm_read,
- ._write = lpddr2_nvm_write,
- ._erase = lpddr2_nvm_erase,
- ._unlock = lpddr2_nvm_unlock,
- ._lock = lpddr2_nvm_lock,
- };
+ *mtd = lpddr2_nvm_mtd_info;
+ mtd->dev.parent = &pdev->dev;
+ mtd->name = pdev->dev.init_name;
+ mtd->priv = map;
+ mtd->size = resource_size(add_range);
+ mtd->erasesize = ERASE_BLOCKSIZE * pcm_data->bus_width;
+ mtd->writebufsize = WRITE_BUFFSIZE * pcm_data->bus_width;
/* Verify the presence of the device looking for PFOW string */
if (!lpddr2_nvm_pfow_present(map)) {
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index b13557fe52bd..947bb710bf16 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -81,7 +81,6 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)
shared = kmalloc_array(lpddr->numchips, sizeof(struct flchip_shared),
GFP_KERNEL);
if (!shared) {
- kfree(lpddr);
kfree(mtd);
return NULL;
}
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 29c0bfd74e8a..6a41dfa3c36b 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -447,7 +447,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
if (new->readonly)
set_disk_ro(gd, 1);
- device_add_disk(&new->mtd->dev, gd);
+ device_add_disk(&new->mtd->dev, gd, NULL);
if (new->disk_attributes) {
ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 02389528f622..82d38001d517 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -368,9 +368,6 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
uint32_t retlen;
int ret = 0;
- if (!(file->f_mode & FMODE_WRITE))
- return -EPERM;
-
if (length > 4096)
return -EINVAL;
@@ -655,6 +652,48 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
pr_debug("MTD_ioctl\n");
+ /*
+ * Check the file mode to require "dangerous" commands to have write
+ * permissions.
+ */
+ switch (cmd) {
+ /* "safe" commands */
+ case MEMGETREGIONCOUNT:
+ case MEMGETREGIONINFO:
+ case MEMGETINFO:
+ case MEMREADOOB:
+ case MEMREADOOB64:
+ case MEMISLOCKED:
+ case MEMGETOOBSEL:
+ case MEMGETBADBLOCK:
+ case OTPSELECT:
+ case OTPGETREGIONCOUNT:
+ case OTPGETREGIONINFO:
+ case ECCGETLAYOUT:
+ case ECCGETSTATS:
+ case MTDFILEMODE:
+ case BLKPG:
+ case BLKRRPART:
+ break;
+
+ /* "dangerous" commands */
+ case MEMERASE:
+ case MEMERASE64:
+ case MEMLOCK:
+ case MEMUNLOCK:
+ case MEMSETBADBLOCK:
+ case MEMWRITEOOB:
+ case MEMWRITEOOB64:
+ case MEMWRITE:
+ case OTPLOCK:
+ if (!(file->f_mode & FMODE_WRITE))
+ return -EPERM;
+ break;
+
+ default:
+ return -ENOTTY;
+ }
+
switch (cmd) {
case MEMGETREGIONCOUNT:
if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
@@ -702,9 +741,6 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
{
struct erase_info *erase;
- if(!(file->f_mode & FMODE_WRITE))
- return -EPERM;
-
erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
if (!erase)
ret = -ENOMEM;
@@ -997,9 +1033,6 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
ret = 0;
break;
}
-
- default:
- ret = -ENOTTY;
}
return ret;
@@ -1043,6 +1076,11 @@ static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd,
struct mtd_oob_buf32 buf;
struct mtd_oob_buf32 __user *buf_user = argp;
+ if (!(file->f_mode & FMODE_WRITE)) {
+ ret = -EPERM;
+ break;
+ }
+
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 97ac219c082e..a0b1a7814e2e 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -712,6 +712,9 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
/* Prefer parsed partitions over driver-provided fallback */
ret = parse_mtd_partitions(mtd, types, parser_data);
+ if (ret == -EPROBE_DEFER)
+ goto out;
+
if (ret > 0)
ret = 0;
else if (nr_parts)
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index e078fc41aa61..feeffde2d4fa 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -293,12 +293,13 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper,
kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE,
record_size - MTDOOPS_HEADER_SIZE, NULL);
- /* Panics must be written immediately */
- if (reason != KMSG_DUMP_OOPS)
+ if (reason != KMSG_DUMP_OOPS) {
+ /* Panics must be written immediately */
mtdoops_write(cxt, 1);
-
- /* For other cases, schedule work to write it "nicely" */
- schedule_work(&cxt->work_write);
+ } else {
+ /* For other cases, schedule work to write it "nicely" */
+ schedule_work(&cxt->work_write);
+ }
}
static void mtdoops_notify_add(struct mtd_info *mtd)
diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c
index 37a3cc21c7bc..acf7971e815d 100644
--- a/drivers/mtd/nand/raw/ams-delta.c
+++ b/drivers/mtd/nand/raw/ams-delta.c
@@ -235,7 +235,7 @@ static int ams_delta_init(struct platform_device *pdev)
goto out_gpio;
/* Scan to find existence of the device */
- err = nand_scan(ams_delta_mtd, 1);
+ err = nand_scan(this, 1);
if (err)
goto out_mtd;
@@ -264,7 +264,7 @@ static int ams_delta_cleanup(struct platform_device *pdev)
void __iomem *io_base = platform_get_drvdata(pdev);
/* Release resources, unregister device */
- nand_release(ams_delta_mtd);
+ nand_release(mtd_to_nand(ams_delta_mtd));
gpio_free_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio));
gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB);
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index ea022712edee..788762930487 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -826,10 +826,12 @@ static int atmel_nand_pmecc_correct_data(struct nand_chip *chip, void *buf,
NULL, 0,
chip->ecc.strength);
- if (ret >= 0)
+ if (ret >= 0) {
+ mtd->ecc_stats.corrected += ret;
max_bitflips = max(ret, max_bitflips);
- else
+ } else {
mtd->ecc_stats.failed++;
+ }
databuf += chip->ecc.size;
eccbuf += chip->ecc.bytes;
@@ -1694,7 +1696,7 @@ atmel_nand_controller_add_nand(struct atmel_nand_controller *nc,
nc->caps->ops->nand_init(nc, nand);
- ret = nand_scan(mtd, nand->numcs);
+ ret = nand_scan(chip, nand->numcs);
if (ret) {
dev_err(nc->dev, "NAND scan failed: %d\n", ret);
return ret;
diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c
index 35f5c84cd331..b45fb36537e7 100644
--- a/drivers/mtd/nand/raw/au1550nd.c
+++ b/drivers/mtd/nand/raw/au1550nd.c
@@ -466,7 +466,7 @@ static int au1550nd_probe(struct platform_device *pdev)
this->write_buf = (pd->devwidth) ? au_write_buf16 : au_write_buf;
this->read_buf = (pd->devwidth) ? au_read_buf16 : au_read_buf;
- ret = nand_scan(mtd, 1);
+ ret = nand_scan(this, 1);
if (ret) {
dev_err(&pdev->dev, "NAND scan failed with %d\n", ret);
goto out3;
@@ -492,7 +492,7 @@ static int au1550nd_remove(struct platform_device *pdev)
struct au1550nd_ctx *ctx = platform_get_drvdata(pdev);
struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- nand_release(nand_to_mtd(&ctx->chip));
+ nand_release(&ctx->chip);
iounmap(ctx->base);
release_mem_region(r->start, 0x1000);
kfree(ctx);
diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/main.c b/drivers/mtd/nand/raw/bcm47xxnflash/main.c
index fb31429b70a9..d79694160845 100644
--- a/drivers/mtd/nand/raw/bcm47xxnflash/main.c
+++ b/drivers/mtd/nand/raw/bcm47xxnflash/main.c
@@ -65,7 +65,7 @@ static int bcm47xxnflash_remove(struct platform_device *pdev)
{
struct bcm47xxnflash *nflash = platform_get_drvdata(pdev);
- nand_release(nand_to_mtd(&nflash->nand_chip));
+ nand_release(&nflash->nand_chip);
return 0;
}
diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
index 60874de430eb..9b62bc2d25a0 100644
--- a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
+++ b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
@@ -423,7 +423,7 @@ int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n)
(w4 << 24 | w3 << 18 | w2 << 12 | w1 << 6 | w0));
/* Scan NAND */
- err = nand_scan(nand_to_mtd(&b47n->nand_chip), 1);
+ err = nand_scan(&b47n->nand_chip, 1);
if (err) {
pr_err("Could not scan NAND flash: %d\n", err);
goto exit;
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 4b90d5b380c2..774ffa9e23f3 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -491,8 +491,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
} else {
ctrl->cs_offsets = brcmnand_cs_offsets;
- /* v5.0 and earlier has a different CS0 offset layout */
- if (ctrl->nand_version <= 0x0500)
+ /* v3.3-5.0 have a different CS0 offset layout */
+ if (ctrl->nand_version >= 0x0303 &&
+ ctrl->nand_version <= 0x0500)
ctrl->cs0_offsets = brcmnand_cs_offsets_cs0;
}
@@ -911,11 +912,14 @@ static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section,
if (!section) {
/*
* Small-page NAND use byte 6 for BBI while large-page
- * NAND use byte 0.
+ * NAND use bytes 0 and 1.
*/
- if (cfg->page_size > 512)
- oobregion->offset++;
- oobregion->length--;
+ if (cfg->page_size > 512) {
+ oobregion->offset += 2;
+ oobregion->length -= 2;
+ } else {
+ oobregion->length--;
+ }
}
}
@@ -2235,6 +2239,12 @@ static int brcmnand_attach_chip(struct nand_chip *chip)
ret = brcmstb_choose_ecc_layout(host);
+ /* If OOB is written with ECC enabled it will cause ECC errors */
+ if (is_hamming_ecc(host->ctrl, &host->hwcfg)) {
+ chip->ecc.write_oob = brcmnand_write_oob_raw;
+ chip->ecc.read_oob = brcmnand_read_oob_raw;
+ }
+
return ret;
}
@@ -2301,7 +2311,7 @@ static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
nand_writereg(ctrl, cfg_offs,
nand_readreg(ctrl, cfg_offs) & ~CFG_BUS_WIDTH);
- ret = nand_scan(mtd, 1);
+ ret = nand_scan(chip, 1);
if (ret)
return ret;
@@ -2616,7 +2626,7 @@ int brcmnand_remove(struct platform_device *pdev)
struct brcmnand_host *host;
list_for_each_entry(host, &ctrl->host_list, node)
- nand_release(nand_to_mtd(&host->chip));
+ nand_release(&host->chip);
clk_disable_unprepare(ctrl->clk);
diff --git a/drivers/mtd/nand/raw/cafe_nand.c b/drivers/mtd/nand/raw/cafe_nand.c
index 1dbe43adcfe7..3304594177c6 100644
--- a/drivers/mtd/nand/raw/cafe_nand.c
+++ b/drivers/mtd/nand/raw/cafe_nand.c
@@ -783,7 +783,7 @@ static int cafe_nand_probe(struct pci_dev *pdev,
/* Scan to find existence of the device */
cafe->nand.dummy_controller.ops = &cafe_nand_controller_ops;
- err = nand_scan(mtd, 2);
+ err = nand_scan(&cafe->nand, 2);
if (err)
goto out_irq;
@@ -819,7 +819,7 @@ static void cafe_nand_remove(struct pci_dev *pdev)
/* Disable NAND IRQ in global IRQ mask register */
cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
free_irq(pdev->irq, mtd);
- nand_release(mtd);
+ nand_release(chip);
free_rs(cafe->rs);
pci_iounmap(pdev, cafe->mmio);
dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
diff --git a/drivers/mtd/nand/raw/cmx270_nand.c b/drivers/mtd/nand/raw/cmx270_nand.c
index b66e254b6802..2eb933a8f99e 100644
--- a/drivers/mtd/nand/raw/cmx270_nand.c
+++ b/drivers/mtd/nand/raw/cmx270_nand.c
@@ -193,7 +193,7 @@ static int __init cmx270_init(void)
this->write_buf = cmx270_write_buf;
/* Scan to find existence of the device */
- ret = nand_scan(cmx270_nand_mtd, 1);
+ ret = nand_scan(this, 1);
if (ret) {
pr_notice("No NAND device\n");
goto err_scan;
@@ -228,7 +228,7 @@ module_init(cmx270_init);
static void __exit cmx270_cleanup(void)
{
/* Release resources, unregister device */
- nand_release(cmx270_nand_mtd);
+ nand_release(mtd_to_nand(cmx270_nand_mtd));
gpio_free(GPIO_NAND_RB);
gpio_free(GPIO_NAND_CS);
diff --git a/drivers/mtd/nand/raw/cs553x_nand.c b/drivers/mtd/nand/raw/cs553x_nand.c
index beafad62e7d5..d4be416bb2fa 100644
--- a/drivers/mtd/nand/raw/cs553x_nand.c
+++ b/drivers/mtd/nand/raw/cs553x_nand.c
@@ -241,7 +241,7 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
}
/* Scan to find existence of the device */
- err = nand_scan(new_mtd, 1);
+ err = nand_scan(this, 1);
if (err)
goto out_free;
@@ -336,7 +336,7 @@ static void __exit cs553x_cleanup(void)
mmio_base = this->IO_ADDR_R;
/* Release resources, unregister device */
- nand_release(mtd);
+ nand_release(this);
kfree(mtd->name);
cs553x_mtd[i] = NULL;
diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c
index 40145e206a6b..66d3d5966013 100644
--- a/drivers/mtd/nand/raw/davinci_nand.c
+++ b/drivers/mtd/nand/raw/davinci_nand.c
@@ -807,7 +807,7 @@ static int nand_davinci_probe(struct platform_device *pdev)
/* Scan to find existence of the device(s) */
info->chip.dummy_controller.ops = &davinci_nand_controller_ops;
- ret = nand_scan(mtd, pdata->mask_chipsel ? 2 : 1);
+ ret = nand_scan(&info->chip, pdata->mask_chipsel ? 2 : 1);
if (ret < 0) {
dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
return ret;
@@ -841,7 +841,7 @@ static int nand_davinci_remove(struct platform_device *pdev)
ecc4_busy = false;
spin_unlock_irq(&davinci_nand_lock);
- nand_release(nand_to_mtd(&info->chip));
+ nand_release(&info->chip);
return 0;
}
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index 2242e999a76b..277e84aa6166 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -1384,7 +1384,7 @@ int denali_init(struct denali_nand_info *denali)
chip->setup_data_interface = denali_setup_data_interface;
chip->dummy_controller.ops = &denali_controller_ops;
- ret = nand_scan(mtd, denali->max_banks);
+ ret = nand_scan(chip, denali->max_banks);
if (ret)
goto disable_irq;
@@ -1407,9 +1407,7 @@ EXPORT_SYMBOL(denali_init);
void denali_remove(struct denali_nand_info *denali)
{
- struct mtd_info *mtd = nand_to_mtd(&denali->nand);
-
- nand_release(mtd);
+ nand_release(&denali->nand);
denali_disable_irq(denali);
}
EXPORT_SYMBOL(denali_remove);
diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
index 3c46188dd6d2..ac3792b6fb33 100644
--- a/drivers/mtd/nand/raw/diskonchip.c
+++ b/drivers/mtd/nand/raw/diskonchip.c
@@ -1620,14 +1620,11 @@ static int __init doc_probe(unsigned long physadr)
else
numchips = doc2001_init(mtd);
- if ((ret = nand_scan(mtd, numchips)) || (ret = doc->late_init(mtd))) {
- /* DBB note: i believe nand_release is necessary here, as
+ if ((ret = nand_scan(nand, numchips)) || (ret = doc->late_init(mtd))) {
+ /* DBB note: i believe nand_cleanup is necessary here, as
buffers may have been allocated in nand_base. Check with
Thomas. FIX ME! */
- /* nand_release will call mtd_device_unregister, but we
- haven't yet added it. This is handled without incident by
- mtd_device_unregister, as far as I can tell. */
- nand_release(mtd);
+ nand_cleanup(nand);
goto fail;
}
@@ -1662,7 +1659,7 @@ static void release_nanddoc(void)
doc = nand_get_controller_data(nand);
nextmtd = doc->nextdoc;
- nand_release(mtd);
+ nand_release(nand);
iounmap(doc->virtadr);
release_mem_region(doc->physadr, DOC_IOREMAP_LEN);
free_rs(doc->rs_decoder);
diff --git a/drivers/mtd/nand/raw/docg4.c b/drivers/mtd/nand/raw/docg4.c
index 427fcbc1b71c..2d86bc5a886d 100644
--- a/drivers/mtd/nand/raw/docg4.c
+++ b/drivers/mtd/nand/raw/docg4.c
@@ -1391,7 +1391,7 @@ static int __init probe_docg4(struct platform_device *pdev)
* ->attach_chip callback.
*/
nand->dummy_controller.ops = &docg4_controller_ops;
- retval = nand_scan(mtd, 0);
+ retval = nand_scan(nand, 0);
if (retval)
goto free_nand;
@@ -1420,7 +1420,7 @@ unmap:
static int __exit cleanup_docg4(struct platform_device *pdev)
{
struct docg4_priv *doc = platform_get_drvdata(pdev);
- nand_release(doc->mtd);
+ nand_release(mtd_to_nand(doc->mtd));
kfree(mtd_to_nand(doc->mtd));
iounmap(doc->virtadr);
return 0;
diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c
index 55f449b711fd..22bcd64a66c8 100644
--- a/drivers/mtd/nand/raw/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c
@@ -915,7 +915,7 @@ static int fsl_elbc_nand_probe(struct platform_device *pdev)
goto err;
priv->chip.controller->ops = &fsl_elbc_controller_ops;
- ret = nand_scan(mtd, 1);
+ ret = nand_scan(&priv->chip, 1);
if (ret)
goto err;
@@ -942,9 +942,8 @@ static int fsl_elbc_nand_remove(struct platform_device *pdev)
{
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
struct fsl_elbc_mtd *priv = dev_get_drvdata(&pdev->dev);
- struct mtd_info *mtd = nand_to_mtd(&priv->chip);
- nand_release(mtd);
+ nand_release(&priv->chip);
fsl_elbc_chip_remove(priv);
mutex_lock(&fsl_elbc_nand_mutex);
diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c
index 7e7729df7827..70bf8e1552a5 100644
--- a/drivers/mtd/nand/raw/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c
@@ -1079,7 +1079,7 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)
goto err;
priv->chip.controller->ops = &fsl_ifc_controller_ops;
- ret = nand_scan(mtd, 1);
+ ret = nand_scan(&priv->chip, 1);
if (ret)
goto err;
@@ -1105,9 +1105,8 @@ err:
static int fsl_ifc_nand_remove(struct platform_device *dev)
{
struct fsl_ifc_mtd *priv = dev_get_drvdata(&dev->dev);
- struct mtd_info *mtd = nand_to_mtd(&priv->chip);
- nand_release(mtd);
+ nand_release(&priv->chip);
fsl_ifc_chip_remove(priv);
mutex_lock(&fsl_ifc_nand_mutex);
diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c
index a88e2cf66e0f..2763fd3ad512 100644
--- a/drivers/mtd/nand/raw/fsl_upm.c
+++ b/drivers/mtd/nand/raw/fsl_upm.c
@@ -191,7 +191,7 @@ static int fun_chip_init(struct fsl_upm_nand *fun,
goto err;
}
- ret = nand_scan(mtd, fun->mchip_count);
+ ret = nand_scan(&fun->chip, fun->mchip_count);
if (ret)
goto err;
@@ -326,7 +326,7 @@ static int fun_remove(struct platform_device *ofdev)
struct mtd_info *mtd = nand_to_mtd(&fun->chip);
int i;
- nand_release(mtd);
+ nand_release(&fun->chip);
kfree(mtd->name);
for (i = 0; i < fun->mchip_count; i++) {
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
index f418236fa020..a31bb1da44ec 100644
--- a/drivers/mtd/nand/raw/fsmc_nand.c
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -1099,11 +1099,13 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
host->read_dma_chan = dma_request_channel(mask, filter, NULL);
if (!host->read_dma_chan) {
dev_err(&pdev->dev, "Unable to get read dma channel\n");
+ ret = -ENODEV;
goto disable_clk;
}
host->write_dma_chan = dma_request_channel(mask, filter, NULL);
if (!host->write_dma_chan) {
dev_err(&pdev->dev, "Unable to get write dma channel\n");
+ ret = -ENODEV;
goto release_dma_read_chan;
}
}
@@ -1125,7 +1127,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
* Scan to find existence of the device
*/
nand->dummy_controller.ops = &fsmc_nand_controller_ops;
- ret = nand_scan(mtd, 1);
+ ret = nand_scan(nand, 1);
if (ret)
goto release_dma_write_chan;
@@ -1161,7 +1163,7 @@ static int fsmc_nand_remove(struct platform_device *pdev)
struct fsmc_nand_data *host = platform_get_drvdata(pdev);
if (host) {
- nand_release(nand_to_mtd(&host->nand));
+ nand_release(&host->nand);
if (host->mode == USE_DMA_ACCESS) {
dma_release_channel(host->write_dma_chan);
diff --git a/drivers/mtd/nand/raw/gpio.c b/drivers/mtd/nand/raw/gpio.c
index 2780af26d9ab..0e7d00faf33c 100644
--- a/drivers/mtd/nand/raw/gpio.c
+++ b/drivers/mtd/nand/raw/gpio.c
@@ -194,7 +194,7 @@ static int gpio_nand_remove(struct platform_device *pdev)
{
struct gpiomtd *gpiomtd = platform_get_drvdata(pdev);
- nand_release(nand_to_mtd(&gpiomtd->nand_chip));
+ nand_release(&gpiomtd->nand_chip);
/* Enable write protection and disable the chip */
if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
@@ -289,7 +289,7 @@ static int gpio_nand_probe(struct platform_device *pdev)
if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
gpiod_direction_output(gpiomtd->nwp, 1);
- ret = nand_scan(mtd, 1);
+ ret = nand_scan(chip, 1);
if (ret)
goto err_wp;
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index 1c1ebbc82824..6bd414bac34d 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -1931,10 +1931,10 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
this->bch_geometry.auxiliary_size = 128;
ret = gpmi_alloc_dma_buffer(this);
if (ret)
- goto err_out;
+ return ret;
chip->dummy_controller.ops = &gpmi_nand_controller_ops;
- ret = nand_scan(mtd, GPMI_IS_MX6(this) ? 2 : 1);
+ ret = nand_scan(chip, GPMI_IS_MX6(this) ? 2 : 1);
if (ret)
goto err_out;
@@ -2026,7 +2026,7 @@ static int gpmi_nand_remove(struct platform_device *pdev)
{
struct gpmi_nand_data *this = platform_get_drvdata(pdev);
- nand_release(nand_to_mtd(&this->nand));
+ nand_release(&this->nand);
gpmi_free_dma_buffer(this);
release_resources(this);
return 0;
diff --git a/drivers/mtd/nand/raw/hisi504_nand.c b/drivers/mtd/nand/raw/hisi504_nand.c
index 950dc7789296..6833fc82e6b1 100644
--- a/drivers/mtd/nand/raw/hisi504_nand.c
+++ b/drivers/mtd/nand/raw/hisi504_nand.c
@@ -811,7 +811,7 @@ static int hisi_nfc_probe(struct platform_device *pdev)
}
chip->dummy_controller.ops = &hisi_nfc_controller_ops;
- ret = nand_scan(mtd, max_chips);
+ ret = nand_scan(chip, max_chips);
if (ret)
return ret;
@@ -828,9 +828,8 @@ static int hisi_nfc_probe(struct platform_device *pdev)
static int hisi_nfc_remove(struct platform_device *pdev)
{
struct hinfc_host *host = platform_get_drvdata(pdev);
- struct mtd_info *mtd = nand_to_mtd(&host->chip);
- nand_release(mtd);
+ nand_release(&host->chip);
return 0;
}
diff --git a/drivers/mtd/nand/raw/jz4740_nand.c b/drivers/mtd/nand/raw/jz4740_nand.c
index a7515452bc59..27603d78b157 100644
--- a/drivers/mtd/nand/raw/jz4740_nand.c
+++ b/drivers/mtd/nand/raw/jz4740_nand.c
@@ -331,7 +331,7 @@ static int jz_nand_detect_bank(struct platform_device *pdev,
if (chipnr == 0) {
/* Detect first chip. */
- ret = nand_scan(mtd, 1);
+ ret = nand_scan(chip, 1);
if (ret)
goto notfound_id;
@@ -507,7 +507,7 @@ static int jz_nand_remove(struct platform_device *pdev)
struct jz_nand *nand = platform_get_drvdata(pdev);
size_t i;
- nand_release(nand_to_mtd(&nand->chip));
+ nand_release(&nand->chip);
/* Deassert and disable all chips */
writel(0, nand->base + JZ_REG_NAND_CTRL);
diff --git a/drivers/mtd/nand/raw/jz4780_nand.c b/drivers/mtd/nand/raw/jz4780_nand.c
index db4fa60bd52a..b072bd5dd7a0 100644
--- a/drivers/mtd/nand/raw/jz4780_nand.c
+++ b/drivers/mtd/nand/raw/jz4780_nand.c
@@ -286,13 +286,13 @@ static int jz4780_nand_init_chip(struct platform_device *pdev,
nand_set_flash_node(chip, np);
chip->controller->ops = &jz4780_nand_controller_ops;
- ret = nand_scan(mtd, 1);
+ ret = nand_scan(chip, 1);
if (ret)
return ret;
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
- nand_release(mtd);
+ nand_release(chip);
return ret;
}
@@ -307,7 +307,7 @@ static void jz4780_nand_cleanup_chips(struct jz4780_nand_controller *nfc)
while (!list_empty(&nfc->chips)) {
chip = list_first_entry(&nfc->chips, struct jz4780_nand_chip, chip_list);
- nand_release(nand_to_mtd(&chip->chip));
+ nand_release(&chip->chip);
list_del(&chip->chip_list);
}
}
diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c
index e82abada130a..d240b8ff40ca 100644
--- a/drivers/mtd/nand/raw/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c
@@ -802,7 +802,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
* SMALL block or LARGE block.
*/
nand_chip->dummy_controller.ops = &lpc32xx_nand_controller_ops;
- res = nand_scan(mtd, 1);
+ res = nand_scan(nand_chip, 1);
if (res)
goto free_irq;
@@ -839,9 +839,8 @@ free_gpio:
static int lpc32xx_nand_remove(struct platform_device *pdev)
{
struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
- struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
- nand_release(mtd);
+ nand_release(&host->nand_chip);
free_irq(host->irq, host);
if (use_dma)
dma_release_channel(host->dma_chan);
diff --git a/drivers/mtd/nand/raw/lpc32xx_slc.c b/drivers/mtd/nand/raw/lpc32xx_slc.c
index a4e8b7e75135..607e4bdfae03 100644
--- a/drivers/mtd/nand/raw/lpc32xx_slc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_slc.c
@@ -925,7 +925,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
/* Find NAND device */
chip->dummy_controller.ops = &lpc32xx_nand_controller_ops;
- res = nand_scan(mtd, 1);
+ res = nand_scan(chip, 1);
if (res)
goto release_dma;
@@ -956,9 +956,8 @@ static int lpc32xx_nand_remove(struct platform_device *pdev)
{
uint32_t tmp;
struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
- struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
- nand_release(mtd);
+ nand_release(&host->nand_chip);
dma_release_channel(host->dma_chan);
/* Force CE high */
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 7a84a8f05b46..00b1adcfad86 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -637,7 +637,7 @@ static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
* In case the interrupt was not served in the required time frame,
* check if the ISR was not served or if something went actually wrong.
*/
- if (ret && !pending) {
+ if (!ret && !pending) {
dev_err(nfc->dev, "Timeout waiting for RB signal\n");
return -ETIMEDOUT;
}
@@ -2551,7 +2551,7 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
chip->options |= NAND_BUSWIDTH_AUTO;
- ret = nand_scan(mtd, marvell_nand->nsels);
+ ret = nand_scan(chip, marvell_nand->nsels);
if (ret) {
dev_err(dev, "could not scan the nand chip\n");
return ret;
@@ -2564,7 +2564,7 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(dev, "failed to register mtd device: %d\n", ret);
- nand_release(mtd);
+ nand_cleanup(chip);
return ret;
}
@@ -2573,6 +2573,16 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
return 0;
}
+static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc)
+{
+ struct marvell_nand_chip *entry, *temp;
+
+ list_for_each_entry_safe(entry, temp, &nfc->chips, node) {
+ nand_release(&entry->chip);
+ list_del(&entry->node);
+ }
+}
+
static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc)
{
struct device_node *np = dev->of_node;
@@ -2607,21 +2617,16 @@ static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc)
ret = marvell_nand_chip_init(dev, nfc, nand_np);
if (ret) {
of_node_put(nand_np);
- return ret;
+ goto cleanup_chips;
}
}
return 0;
-}
-static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc)
-{
- struct marvell_nand_chip *entry, *temp;
+cleanup_chips:
+ marvell_nand_chips_cleanup(nfc);
- list_for_each_entry_safe(entry, temp, &nfc->chips, node) {
- nand_release(nand_to_mtd(&entry->chip));
- list_del(&entry->node);
- }
+ return ret;
}
static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c
index 6d1740d54e0d..b3d6effb3486 100644
--- a/drivers/mtd/nand/raw/mpc5121_nfc.c
+++ b/drivers/mtd/nand/raw/mpc5121_nfc.c
@@ -778,7 +778,7 @@ static int mpc5121_nfc_probe(struct platform_device *op)
}
/* Detect NAND chips */
- retval = nand_scan(mtd, be32_to_cpup(chips_no));
+ retval = nand_scan(chip, be32_to_cpup(chips_no));
if (retval) {
dev_err(dev, "NAND Flash not found !\n");
goto error;
@@ -828,7 +828,7 @@ static int mpc5121_nfc_remove(struct platform_device *op)
struct device *dev = &op->dev;
struct mtd_info *mtd = dev_get_drvdata(dev);
- nand_release(mtd);
+ nand_release(mtd_to_nand(mtd));
mpc5121_nfc_free(dev, mtd);
return 0;
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index ab5a8778c4b2..bb40022e109f 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -1382,14 +1382,14 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
mtk_nfc_hw_init(nfc);
- ret = nand_scan(mtd, nsels);
+ ret = nand_scan(nand, nsels);
if (ret)
return ret;
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(dev, "mtd parse partition error\n");
- nand_release(mtd);
+ nand_cleanup(nand);
return ret;
}
@@ -1555,7 +1555,7 @@ static int mtk_nfc_remove(struct platform_device *pdev)
while (!list_empty(&nfc->chips)) {
chip = list_first_entry(&nfc->chips, struct mtk_nfc_nand_chip,
node);
- nand_release(nand_to_mtd(&chip->nand));
+ nand_release(&chip->nand);
list_del(&chip->node);
}
diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c
index 4c9214dea424..334578d9f4a8 100644
--- a/drivers/mtd/nand/raw/mxc_nand.c
+++ b/drivers/mtd/nand/raw/mxc_nand.c
@@ -1900,7 +1900,7 @@ static int mxcnd_probe(struct platform_device *pdev)
/* Scan the NAND device */
this->dummy_controller.ops = &mxcnd_controller_ops;
- err = nand_scan(mtd, is_imx25_nfc(host) ? 4 : 1);
+ err = nand_scan(this, is_imx25_nfc(host) ? 4 : 1);
if (err)
goto escan;
@@ -1928,7 +1928,7 @@ static int mxcnd_remove(struct platform_device *pdev)
{
struct mxc_nand_host *host = platform_get_drvdata(pdev);
- nand_release(nand_to_mtd(&host->nand));
+ nand_release(&host->nand);
if (host->clk_act)
clk_disable_unprepare(host->clk);
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index d527e448ce19..de3926eaec13 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -5953,7 +5953,7 @@ static int nand_dt_init(struct nand_chip *chip)
/**
* nand_scan_ident - Scan for the NAND device
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @maxchips: number of chips to scan for
* @table: alternative NAND ID table
*
@@ -5965,11 +5965,11 @@ static int nand_dt_init(struct nand_chip *chip)
* prevented dynamic allocations during this phase which was unconvenient and
* as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks.
*/
-static int nand_scan_ident(struct mtd_info *mtd, int maxchips,
+static int nand_scan_ident(struct nand_chip *chip, int maxchips,
struct nand_flash_dev *table)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
int i, nand_maf_id, nand_dev_id;
- struct nand_chip *chip = mtd_to_nand(mtd);
int ret;
/* Enforce the right timings for reset/detection */
@@ -6423,15 +6423,15 @@ static bool nand_ecc_strength_good(struct mtd_info *mtd)
/**
* nand_scan_tail - Scan for the NAND device
- * @mtd: MTD device structure
+ * @chip: NAND chip object
*
* This is the second phase of the normal nand_scan() function. It fills out
* all the uninitialized function pointers with the defaults and scans for a
* bad block table if appropriate.
*/
-static int nand_scan_tail(struct mtd_info *mtd)
+static int nand_scan_tail(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
int ret, i;
@@ -6770,7 +6770,7 @@ static void nand_detach(struct nand_chip *chip)
/**
* nand_scan_with_ids - [NAND Interface] Scan for the NAND device
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @maxchips: number of chips to scan for. @nand_scan_ident() will not be run if
* this parameter is zero (useful for specific drivers that must
* handle this part of the process themselves, e.g docg4).
@@ -6780,14 +6780,13 @@ static void nand_detach(struct nand_chip *chip)
* The flash ID is read and the mtd/chip structures are filled with the
* appropriate values.
*/
-int nand_scan_with_ids(struct mtd_info *mtd, int maxchips,
+int nand_scan_with_ids(struct nand_chip *chip, int maxchips,
struct nand_flash_dev *ids)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
int ret;
if (maxchips) {
- ret = nand_scan_ident(mtd, maxchips, ids);
+ ret = nand_scan_ident(chip, maxchips, ids);
if (ret)
return ret;
}
@@ -6796,7 +6795,7 @@ int nand_scan_with_ids(struct mtd_info *mtd, int maxchips,
if (ret)
goto cleanup_ident;
- ret = nand_scan_tail(mtd);
+ ret = nand_scan_tail(chip);
if (ret)
goto detach_chip;
@@ -6847,12 +6846,12 @@ EXPORT_SYMBOL_GPL(nand_cleanup);
/**
* nand_release - [NAND Interface] Unregister the MTD device and free resources
* held by the NAND device
- * @mtd: MTD device structure
+ * @chip: NAND chip object
*/
-void nand_release(struct mtd_info *mtd)
+void nand_release(struct nand_chip *chip)
{
- mtd_device_unregister(mtd);
- nand_cleanup(mtd_to_nand(mtd));
+ mtd_device_unregister(nand_to_mtd(chip));
+ nand_cleanup(chip);
}
EXPORT_SYMBOL_GPL(nand_release);
diff --git a/drivers/mtd/nand/raw/nand_timings.c b/drivers/mtd/nand/raw/nand_timings.c
index ebc7b5f76f77..efc92ec4d52f 100644
--- a/drivers/mtd/nand/raw/nand_timings.c
+++ b/drivers/mtd/nand/raw/nand_timings.c
@@ -331,10 +331,9 @@ int onfi_fill_data_interface(struct nand_chip *chip,
/* microseconds -> picoseconds */
timings->tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX;
timings->tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX;
- timings->tR_max = 1000000ULL * 200000000ULL;
- /* nanoseconds -> picoseconds */
- timings->tCCS_min = 1000UL * 500000;
+ timings->tR_max = 200000000;
+ timings->tCCS_min = 500000;
}
return 0;
diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c
index 71ac034aee9c..6e908e4fa8e8 100644
--- a/drivers/mtd/nand/raw/nandsim.c
+++ b/drivers/mtd/nand/raw/nandsim.c
@@ -2319,7 +2319,7 @@ static int __init ns_init_module(void)
goto error;
chip->dummy_controller.ops = &ns_controller_ops;
- retval = nand_scan(nsmtd, 1);
+ retval = nand_scan(chip, 1);
if (retval) {
NS_ERR("Could not scan NAND Simulator device\n");
goto error;
@@ -2364,7 +2364,7 @@ static int __init ns_init_module(void)
err_exit:
free_nandsim(nand);
- nand_release(nsmtd);
+ nand_release(chip);
for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i)
kfree(nand->partitions[i].name);
error:
@@ -2386,7 +2386,7 @@ static void __exit ns_cleanup_module(void)
int i;
free_nandsim(ns); /* Free nandsim private resources */
- nand_release(nsmtd); /* Unregister driver */
+ nand_release(chip); /* Unregister driver */
for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
kfree(ns->partitions[i].name);
kfree(mtd_to_nand(nsmtd)); /* Free other structures */
diff --git a/drivers/mtd/nand/raw/ndfc.c b/drivers/mtd/nand/raw/ndfc.c
index 540fa1a0ea24..6e96c633ac29 100644
--- a/drivers/mtd/nand/raw/ndfc.c
+++ b/drivers/mtd/nand/raw/ndfc.c
@@ -181,7 +181,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
goto err;
}
- ret = nand_scan(mtd, 1);
+ ret = nand_scan(chip, 1);
if (ret)
goto err;
@@ -258,7 +258,7 @@ static int ndfc_remove(struct platform_device *ofdev)
struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev);
struct mtd_info *mtd = nand_to_mtd(&ndfc->chip);
- nand_release(mtd);
+ nand_release(&ndfc->chip);
kfree(mtd->name);
return 0;
diff --git a/drivers/mtd/nand/raw/nuc900_nand.c b/drivers/mtd/nand/raw/nuc900_nand.c
index af5b32c9a791..0c675b6c0b6e 100644
--- a/drivers/mtd/nand/raw/nuc900_nand.c
+++ b/drivers/mtd/nand/raw/nuc900_nand.c
@@ -270,7 +270,7 @@ static int nuc900_nand_probe(struct platform_device *pdev)
nuc900_nand_enable(nuc900_nand);
- if (nand_scan(mtd, 1))
+ if (nand_scan(chip, 1))
return -ENXIO;
mtd_device_register(mtd, partitions, ARRAY_SIZE(partitions));
@@ -284,7 +284,7 @@ static int nuc900_nand_remove(struct platform_device *pdev)
{
struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev);
- nand_release(nand_to_mtd(&nuc900_nand->chip));
+ nand_release(&nuc900_nand->chip);
clk_disable(nuc900_nand->clk);
return 0;
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index b1683d7a7e04..7a4af5f3e3d3 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -2254,7 +2254,7 @@ static int omap_nand_probe(struct platform_device *pdev)
/* scan NAND device connected to chip controller */
nand_chip->options |= info->devsize & NAND_BUSWIDTH_16;
- err = nand_scan(mtd, 1);
+ err = nand_scan(nand_chip, 1);
if (err)
goto return_error;
@@ -2290,7 +2290,7 @@ static int omap_nand_remove(struct platform_device *pdev)
}
if (info->dma)
dma_release_channel(info->dma);
- nand_release(mtd);
+ nand_release(nand_chip);
return 0;
}
diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c
index a3f32f939cc1..6736777a4156 100644
--- a/drivers/mtd/nand/raw/omap_elm.c
+++ b/drivers/mtd/nand/raw/omap_elm.c
@@ -421,6 +421,7 @@ static int elm_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
if (pm_runtime_get_sync(&pdev->dev) < 0) {
ret = -EINVAL;
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
dev_err(&pdev->dev, "can't enable clock\n");
return ret;
diff --git a/drivers/mtd/nand/raw/orion_nand.c b/drivers/mtd/nand/raw/orion_nand.c
index 52d435285a3f..caffebab6c9b 100644
--- a/drivers/mtd/nand/raw/orion_nand.c
+++ b/drivers/mtd/nand/raw/orion_nand.c
@@ -174,14 +174,14 @@ static int __init orion_nand_probe(struct platform_device *pdev)
return ret;
}
- ret = nand_scan(mtd, 1);
+ ret = nand_scan(nc, 1);
if (ret)
goto no_dev;
mtd->name = "orion_nand";
ret = mtd_device_register(mtd, board->parts, board->nr_parts);
if (ret) {
- nand_release(mtd);
+ nand_cleanup(nc);
goto no_dev;
}
@@ -196,9 +196,8 @@ static int orion_nand_remove(struct platform_device *pdev)
{
struct orion_nand_info *info = platform_get_drvdata(pdev);
struct nand_chip *chip = &info->chip;
- struct mtd_info *mtd = nand_to_mtd(chip);
- nand_release(mtd);
+ nand_release(chip);
clk_disable_unprepare(info->clk);
diff --git a/drivers/mtd/nand/raw/oxnas_nand.c b/drivers/mtd/nand/raw/oxnas_nand.c
index 01b00bb69c1e..c58269c12759 100644
--- a/drivers/mtd/nand/raw/oxnas_nand.c
+++ b/drivers/mtd/nand/raw/oxnas_nand.c
@@ -36,6 +36,7 @@ struct oxnas_nand_ctrl {
void __iomem *io_base;
struct clk *clk;
struct nand_chip *chips[OXNAS_NAND_MAX_CHIPS];
+ unsigned int nchips;
};
static uint8_t oxnas_nand_read_byte(struct mtd_info *mtd)
@@ -86,9 +87,9 @@ static int oxnas_nand_probe(struct platform_device *pdev)
struct nand_chip *chip;
struct mtd_info *mtd;
struct resource *res;
- int nchips = 0;
int count = 0;
int err = 0;
+ int i;
/* Allocate memory for the device structure (and zero it) */
oxnas = devm_kzalloc(&pdev->dev, sizeof(*oxnas),
@@ -123,7 +124,7 @@ static int oxnas_nand_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!chip) {
err = -ENOMEM;
- goto err_clk_unprepare;
+ goto err_release_child;
}
chip->controller = &oxnas->base;
@@ -142,22 +143,20 @@ static int oxnas_nand_probe(struct platform_device *pdev)
chip->chip_delay = 30;
/* Scan to find existence of the device */
- err = nand_scan(mtd, 1);
+ err = nand_scan(chip, 1);
if (err)
- goto err_clk_unprepare;
+ goto err_release_child;
err = mtd_device_register(mtd, NULL, 0);
- if (err) {
- nand_release(mtd);
- goto err_clk_unprepare;
- }
+ if (err)
+ goto err_cleanup_nand;
- oxnas->chips[nchips] = chip;
- ++nchips;
+ oxnas->chips[oxnas->nchips] = chip;
+ ++oxnas->nchips;
}
/* Exit if no chips found */
- if (!nchips) {
+ if (!oxnas->nchips) {
err = -ENODEV;
goto err_clk_unprepare;
}
@@ -166,6 +165,17 @@ static int oxnas_nand_probe(struct platform_device *pdev)
return 0;
+err_cleanup_nand:
+ nand_cleanup(chip);
+err_release_child:
+ of_node_put(nand_np);
+
+ for (i = 0; i < oxnas->nchips; i++) {
+ chip = oxnas->chips[i];
+ WARN_ON(mtd_device_unregister(nand_to_mtd(chip)));
+ nand_cleanup(chip);
+ }
+
err_clk_unprepare:
clk_disable_unprepare(oxnas->clk);
return err;
@@ -174,9 +184,13 @@ err_clk_unprepare:
static int oxnas_nand_remove(struct platform_device *pdev)
{
struct oxnas_nand_ctrl *oxnas = platform_get_drvdata(pdev);
+ struct nand_chip *chip;
+ int i;
- if (oxnas->chips[0])
- nand_release(nand_to_mtd(oxnas->chips[0]));
+ for (i = 0; i < oxnas->nchips; i++) {
+ chip = oxnas->chips[i];
+ nand_release(chip);
+ }
clk_disable_unprepare(oxnas->clk);
diff --git a/drivers/mtd/nand/raw/pasemi_nand.c b/drivers/mtd/nand/raw/pasemi_nand.c
index a47a7e4bd25a..8713ea770006 100644
--- a/drivers/mtd/nand/raw/pasemi_nand.c
+++ b/drivers/mtd/nand/raw/pasemi_nand.c
@@ -156,14 +156,14 @@ static int pasemi_nand_probe(struct platform_device *ofdev)
chip->bbt_options = NAND_BBT_USE_FLASH;
/* Scan to find existence of the device */
- err = nand_scan(pasemi_nand_mtd, 1);
+ err = nand_scan(chip, 1);
if (err)
goto out_lpc;
if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) {
dev_err(dev, "Unable to register MTD device\n");
err = -ENODEV;
- goto out_lpc;
+ goto out_cleanup_nand;
}
dev_info(dev, "PA Semi NAND flash at %pR, control at I/O %x\n", &res,
@@ -171,6 +171,8 @@ static int pasemi_nand_probe(struct platform_device *ofdev)
return 0;
+ out_cleanup_nand:
+ nand_cleanup(chip);
out_lpc:
release_region(lpcctl, 4);
out_ior:
@@ -191,7 +193,7 @@ static int pasemi_nand_remove(struct platform_device *ofdev)
chip = mtd_to_nand(pasemi_nand_mtd);
/* Release resources, unregister device */
- nand_release(pasemi_nand_mtd);
+ nand_release(chip);
release_region(lpcctl, 4);
diff --git a/drivers/mtd/nand/raw/plat_nand.c b/drivers/mtd/nand/raw/plat_nand.c
index 222626df4b96..27365bd8675c 100644
--- a/drivers/mtd/nand/raw/plat_nand.c
+++ b/drivers/mtd/nand/raw/plat_nand.c
@@ -84,7 +84,7 @@ static int plat_nand_probe(struct platform_device *pdev)
}
/* Scan to find existence of the device */
- err = nand_scan(mtd, pdata->chip.nr_chips);
+ err = nand_scan(&data->chip, pdata->chip.nr_chips);
if (err)
goto out;
@@ -97,7 +97,7 @@ static int plat_nand_probe(struct platform_device *pdev)
if (!err)
return err;
- nand_release(mtd);
+ nand_cleanup(&data->chip);
out:
if (pdata->ctrl.remove)
pdata->ctrl.remove(pdev);
@@ -112,7 +112,7 @@ static int plat_nand_remove(struct platform_device *pdev)
struct plat_nand_data *data = platform_get_drvdata(pdev);
struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev);
- nand_release(nand_to_mtd(&data->chip));
+ nand_release(&data->chip);
if (pdata->ctrl.remove)
pdata->ctrl.remove(pdev);
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 07d8750313fd..148c7a16f318 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -466,11 +466,13 @@ struct qcom_nand_host {
* among different NAND controllers.
* @ecc_modes - ecc mode for NAND
* @is_bam - whether NAND controller is using BAM
+ * @is_qpic - whether NAND CTRL is part of qpic IP
* @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
*/
struct qcom_nandc_props {
u32 ecc_modes;
bool is_bam;
+ bool is_qpic;
u32 dev_cmd_reg_start;
};
@@ -1576,6 +1578,8 @@ static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
int i;
+ nandc_read_buffer_sync(nandc, true);
+
for (i = 0; i < cw_cnt; i++) {
u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
@@ -2766,7 +2770,8 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
u32 nand_ctrl;
/* kill onenand */
- nandc_write(nandc, SFLASHC_BURST_CFG, 0);
+ if (!nandc->props->is_qpic)
+ nandc_write(nandc, SFLASHC_BURST_CFG, 0);
nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
NAND_DEV_CMD_VLD_VAL);
@@ -2834,7 +2839,7 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
/* set up initial status value */
host->status = NAND_STATUS_READY | NAND_STATUS_WP;
- ret = nand_scan(mtd, 1);
+ ret = nand_scan(chip, 1);
if (ret)
return ret;
@@ -2860,7 +2865,7 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
struct device *dev = nandc->dev;
struct device_node *dn = dev->of_node, *child;
struct qcom_nand_host *host;
- int ret;
+ int ret = -ENODEV;
for_each_available_child_of_node(dn, child) {
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
@@ -2878,10 +2883,7 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
list_add_tail(&host->node, &nandc->host_list);
}
- if (list_empty(&nandc->host_list))
- return -ENODEV;
-
- return 0;
+ return ret;
}
/* parse custom DT properties here */
@@ -2999,7 +3001,7 @@ static int qcom_nandc_remove(struct platform_device *pdev)
struct qcom_nand_host *host;
list_for_each_entry(host, &nandc->host_list, node)
- nand_release(nand_to_mtd(&host->chip));
+ nand_release(&host->chip);
qcom_nandc_unalloc(nandc);
@@ -3022,12 +3024,14 @@ static const struct qcom_nandc_props ipq806x_nandc_props = {
static const struct qcom_nandc_props ipq4019_nandc_props = {
.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
.is_bam = true,
+ .is_qpic = true,
.dev_cmd_reg_start = 0x0,
};
static const struct qcom_nandc_props ipq8074_nandc_props = {
.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
.is_bam = true,
+ .is_qpic = true,
.dev_cmd_reg_start = 0x7000,
};
diff --git a/drivers/mtd/nand/raw/r852.c b/drivers/mtd/nand/raw/r852.c
index dcdeb0660e5e..bb74a0ac697e 100644
--- a/drivers/mtd/nand/raw/r852.c
+++ b/drivers/mtd/nand/raw/r852.c
@@ -656,7 +656,7 @@ static int r852_register_nand_device(struct r852_device *dev)
dev->card_registred = 1;
return 0;
error3:
- nand_release(mtd);
+ nand_release(dev->chip);
error1:
/* Force card redetect */
dev->card_detected = 0;
@@ -675,7 +675,7 @@ static void r852_unregister_nand_device(struct r852_device *dev)
return;
device_remove_file(&mtd->dev, &dev_attr_media_type);
- nand_release(mtd);
+ nand_release(dev->chip);
r852_engine_disable(dev);
dev->card_registred = 0;
}
diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c
index c21e8892394a..cf045813c160 100644
--- a/drivers/mtd/nand/raw/s3c2410.c
+++ b/drivers/mtd/nand/raw/s3c2410.c
@@ -781,7 +781,7 @@ static int s3c24xx_nand_remove(struct platform_device *pdev)
for (mtdno = 0; mtdno < info->mtd_count; mtdno++, ptr++) {
pr_debug("releasing mtd %d (%p)\n", mtdno, ptr);
- nand_release(nand_to_mtd(&ptr->chip));
+ nand_release(&ptr->chip);
}
}
@@ -1170,7 +1170,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
mtd->dev.parent = &pdev->dev;
s3c2410_nand_init_chip(info, nmtd, sets);
- err = nand_scan(mtd, sets ? sets->nr_chips : 1);
+ err = nand_scan(&nmtd->chip, sets ? sets->nr_chips : 1);
if (err)
goto exit_error;
diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c
index 1e7273263c4b..683df1a12989 100644
--- a/drivers/mtd/nand/raw/sh_flctl.c
+++ b/drivers/mtd/nand/raw/sh_flctl.c
@@ -1203,7 +1203,7 @@ static int flctl_probe(struct platform_device *pdev)
flctl_setup_dma(flctl);
nand->dummy_controller.ops = &flctl_nand_controller_ops;
- ret = nand_scan(flctl_mtd, 1);
+ ret = nand_scan(nand, 1);
if (ret)
goto err_chip;
@@ -1226,7 +1226,7 @@ static int flctl_remove(struct platform_device *pdev)
struct sh_flctl *flctl = platform_get_drvdata(pdev);
flctl_release_dma(flctl);
- nand_release(nand_to_mtd(&flctl->chip));
+ nand_release(&flctl->chip);
pm_runtime_disable(&pdev->dev);
return 0;
diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c
index fc171b17a39b..4544753e6a1b 100644
--- a/drivers/mtd/nand/raw/sharpsl.c
+++ b/drivers/mtd/nand/raw/sharpsl.c
@@ -171,7 +171,7 @@ static int sharpsl_nand_probe(struct platform_device *pdev)
this->ecc.correct = nand_correct_data;
/* Scan to find existence of the device */
- err = nand_scan(mtd, 1);
+ err = nand_scan(this, 1);
if (err)
goto err_scan;
@@ -187,7 +187,7 @@ static int sharpsl_nand_probe(struct platform_device *pdev)
return 0;
err_add:
- nand_release(mtd);
+ nand_cleanup(this);
err_scan:
iounmap(sharpsl->io);
@@ -205,7 +205,7 @@ static int sharpsl_nand_remove(struct platform_device *pdev)
struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev);
/* Release resources, unregister device */
- nand_release(nand_to_mtd(&sharpsl->chip));
+ nand_release(&sharpsl->chip);
iounmap(sharpsl->io);
diff --git a/drivers/mtd/nand/raw/sm_common.c b/drivers/mtd/nand/raw/sm_common.c
index 73aafe8c3ef3..02ac6e9b2d16 100644
--- a/drivers/mtd/nand/raw/sm_common.c
+++ b/drivers/mtd/nand/raw/sm_common.c
@@ -195,7 +195,7 @@ int sm_register_device(struct mtd_info *mtd, int smartmedia)
/* Scan for card properties */
chip->dummy_controller.ops = &sm_controller_ops;
flash_ids = smartmedia ? nand_smartmedia_flash_ids : nand_xd_flash_ids;
- ret = nand_scan_with_ids(mtd, 1, flash_ids);
+ ret = nand_scan_with_ids(chip, 1, flash_ids);
if (ret)
return ret;
diff --git a/drivers/mtd/nand/raw/socrates_nand.c b/drivers/mtd/nand/raw/socrates_nand.c
index 9824a9923583..274b3521a1dc 100644
--- a/drivers/mtd/nand/raw/socrates_nand.c
+++ b/drivers/mtd/nand/raw/socrates_nand.c
@@ -185,7 +185,7 @@ static int socrates_nand_probe(struct platform_device *ofdev)
dev_set_drvdata(&ofdev->dev, host);
- res = nand_scan(mtd, 1);
+ res = nand_scan(nand_chip, 1);
if (res)
goto out;
@@ -193,7 +193,7 @@ static int socrates_nand_probe(struct platform_device *ofdev)
if (!res)
return res;
- nand_release(mtd);
+ nand_cleanup(nand_chip);
out:
iounmap(host->io_base);
@@ -206,9 +206,8 @@ out:
static int socrates_nand_remove(struct platform_device *ofdev)
{
struct socrates_nand_host *host = dev_get_drvdata(&ofdev->dev);
- struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
- nand_release(mtd);
+ nand_release(&host->nand_chip);
iounmap(host->io_base);
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index 5b5f4d25a3e1..88075e420f90 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -1940,14 +1940,14 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
mtd = nand_to_mtd(nand);
mtd->dev.parent = dev;
- ret = nand_scan(mtd, nsels);
+ ret = nand_scan(nand, nsels);
if (ret)
return ret;
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(dev, "failed to register mtd device: %d\n", ret);
- nand_release(mtd);
+ nand_cleanup(nand);
return ret;
}
@@ -1986,7 +1986,7 @@ static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
while (!list_empty(&nfc->chips)) {
chip = list_first_entry(&nfc->chips, struct sunxi_nand_chip,
node);
- nand_release(nand_to_mtd(&chip->nand));
+ nand_release(&chip->nand);
sunxi_nand_ecc_cleanup(&chip->nand.ecc);
list_del(&chip->node);
}
diff --git a/drivers/mtd/nand/raw/tango_nand.c b/drivers/mtd/nand/raw/tango_nand.c
index 72698691727d..1061eb60ee60 100644
--- a/drivers/mtd/nand/raw/tango_nand.c
+++ b/drivers/mtd/nand/raw/tango_nand.c
@@ -588,7 +588,7 @@ static int chip_init(struct device *dev, struct device_node *np)
mtd_set_ooblayout(mtd, &tango_nand_ooblayout_ops);
mtd->dev.parent = dev;
- err = nand_scan(mtd, 1);
+ err = nand_scan(chip, 1);
if (err)
return err;
@@ -617,7 +617,7 @@ static int tango_nand_remove(struct platform_device *pdev)
for (cs = 0; cs < MAX_CS; ++cs) {
if (nfc->chips[cs])
- nand_release(nand_to_mtd(&nfc->chips[cs]->nand_chip));
+ nand_release(&nfc->chips[cs]->nand_chip);
}
return 0;
diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c
index 79da1efc88d1..5dcee20e2a8c 100644
--- a/drivers/mtd/nand/raw/tegra_nand.c
+++ b/drivers/mtd/nand/raw/tegra_nand.c
@@ -1119,7 +1119,7 @@ static int tegra_nand_chips_init(struct device *dev,
chip->select_chip = tegra_nand_select_chip;
chip->setup_data_interface = tegra_nand_setup_data_interface;
- ret = nand_scan(mtd, 1);
+ ret = nand_scan(chip, 1);
if (ret)
return ret;
diff --git a/drivers/mtd/nand/raw/tmio_nand.c b/drivers/mtd/nand/raw/tmio_nand.c
index dcaa924502de..2a9a966ff2b9 100644
--- a/drivers/mtd/nand/raw/tmio_nand.c
+++ b/drivers/mtd/nand/raw/tmio_nand.c
@@ -436,7 +436,7 @@ static int tmio_probe(struct platform_device *dev)
nand_chip->waitfunc = tmio_nand_wait;
/* Scan to find existence of the device */
- retval = nand_scan(mtd, 1);
+ retval = nand_scan(nand_chip, 1);
if (retval)
goto err_irq;
@@ -449,7 +449,7 @@ static int tmio_probe(struct platform_device *dev)
if (!retval)
return retval;
- nand_release(mtd);
+ nand_cleanup(nand_chip);
err_irq:
tmio_hw_stop(dev, tmio);
@@ -460,7 +460,7 @@ static int tmio_remove(struct platform_device *dev)
{
struct tmio_nand *tmio = platform_get_drvdata(dev);
- nand_release(nand_to_mtd(&tmio->chip));
+ nand_release(&tmio->chip);
tmio_hw_stop(dev, tmio);
return 0;
}
diff --git a/drivers/mtd/nand/raw/txx9ndfmc.c b/drivers/mtd/nand/raw/txx9ndfmc.c
index 4d61a14fcb65..f722aae2b244 100644
--- a/drivers/mtd/nand/raw/txx9ndfmc.c
+++ b/drivers/mtd/nand/raw/txx9ndfmc.c
@@ -359,7 +359,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
if (plat->wide_mask & (1 << i))
chip->options |= NAND_BUSWIDTH_16;
- if (nand_scan(mtd, 1)) {
+ if (nand_scan(chip, 1)) {
kfree(txx9_priv->mtdname);
kfree(txx9_priv);
continue;
@@ -390,7 +390,7 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev)
chip = mtd_to_nand(mtd);
txx9_priv = nand_get_controller_data(chip);
- nand_release(mtd);
+ nand_release(chip);
kfree(txx9_priv->mtdname);
kfree(txx9_priv);
}
diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c
index 6f6dcbf9095b..a73213c835a5 100644
--- a/drivers/mtd/nand/raw/vf610_nfc.c
+++ b/drivers/mtd/nand/raw/vf610_nfc.c
@@ -892,7 +892,7 @@ static int vf610_nfc_probe(struct platform_device *pdev)
/* Scan the NAND chip */
chip->dummy_controller.ops = &vf610_nfc_controller_ops;
- err = nand_scan(mtd, 1);
+ err = nand_scan(chip, 1);
if (err)
goto err_disable_clk;
@@ -916,7 +916,7 @@ static int vf610_nfc_remove(struct platform_device *pdev)
struct mtd_info *mtd = platform_get_drvdata(pdev);
struct vf610_nfc *nfc = mtd_to_nfc(mtd);
- nand_release(mtd);
+ nand_release(mtd_to_nand(mtd));
clk_disable_unprepare(nfc->clk);
return 0;
}
diff --git a/drivers/mtd/nand/raw/xway_nand.c b/drivers/mtd/nand/raw/xway_nand.c
index 9926b4e3d69d..bfa43fdc3c54 100644
--- a/drivers/mtd/nand/raw/xway_nand.c
+++ b/drivers/mtd/nand/raw/xway_nand.c
@@ -205,13 +205,13 @@ static int xway_nand_probe(struct platform_device *pdev)
| cs_flag, EBU_NAND_CON);
/* Scan to find existence of the device */
- err = nand_scan(mtd, 1);
+ err = nand_scan(&data->chip, 1);
if (err)
return err;
err = mtd_device_register(mtd, NULL, 0);
if (err)
- nand_release(mtd);
+ nand_cleanup(&data->chip);
return err;
}
@@ -223,7 +223,7 @@ static int xway_nand_remove(struct platform_device *pdev)
{
struct xway_nand_data *data = platform_get_drvdata(pdev);
- nand_release(nand_to_mtd(&data->chip));
+ nand_release(&data->chip);
return 0;
}
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 48b3ab26b124..4a3dc5953b00 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -629,18 +629,18 @@ static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
{
struct spinand_device *spinand = nand_to_spinand(nand);
+ u8 marker[2] = { };
struct nand_page_io_req req = {
.pos = *pos,
- .ooblen = 2,
+ .ooblen = sizeof(marker),
.ooboffs = 0,
- .oobbuf.in = spinand->oobbuf,
+ .oobbuf.in = marker,
.mode = MTD_OPS_RAW,
};
- memset(spinand->oobbuf, 0, 2);
spinand_select_target(spinand, pos->target);
spinand_read_page(spinand, &req, false);
- if (spinand->oobbuf[0] != 0xff || spinand->oobbuf[1] != 0xff)
+ if (marker[0] != 0xff || marker[1] != 0xff)
return true;
return false;
@@ -664,15 +664,16 @@ static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
{
struct spinand_device *spinand = nand_to_spinand(nand);
+ u8 marker[2] = { };
struct nand_page_io_req req = {
.pos = *pos,
.ooboffs = 0,
- .ooblen = 2,
- .oobbuf.out = spinand->oobbuf,
+ .ooblen = sizeof(marker),
+ .oobbuf.out = marker,
+ .mode = MTD_OPS_RAW,
};
int ret;
- /* Erase block before marking it bad. */
ret = spinand_select_target(spinand, pos->target);
if (ret)
return ret;
@@ -681,9 +682,6 @@ static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
if (ret)
return ret;
- spinand_erase_op(spinand, pos);
-
- memset(spinand->oobbuf, 0, 2);
return spinand_write_page(spinand, &req);
}
@@ -1047,6 +1045,10 @@ static int spinand_init(struct spinand_device *spinand)
mtd->oobavail = ret;
+ /* Propagate ECC information to mtd_info */
+ mtd->ecc_strength = nand->eccreq.strength;
+ mtd->ecc_step_size = nand->eccreq.step_size;
+
return 0;
err_cleanup_nanddev:
@@ -1127,12 +1129,14 @@ static const struct spi_device_id spinand_ids[] = {
{ .name = "spi-nand" },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(spi, spinand_ids);
#ifdef CONFIG_OF
static const struct of_device_id spinand_of_ids[] = {
{ .compatible = "spi-nand" },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, spinand_of_ids);
#endif
static struct spi_mem_driver spinand_drv = {
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index 04cedd3a2bf6..a92f531ad23a 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -473,7 +473,7 @@ static int cqspi_read_setup(struct spi_nor *nor)
/* Setup dummy clock cycles */
dummy_clk = nor->read_dummy;
if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
- dummy_clk = CQSPI_DUMMY_CLKS_MAX;
+ return -EOPNOTSUPP;
if (dummy_clk / 8) {
reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
diff --git a/drivers/mtd/spi-nor/hisi-sfc.c b/drivers/mtd/spi-nor/hisi-sfc.c
index dea7b0c4b339..184ba5069ac5 100644
--- a/drivers/mtd/spi-nor/hisi-sfc.c
+++ b/drivers/mtd/spi-nor/hisi-sfc.c
@@ -408,8 +408,10 @@ static int hisi_spi_nor_register_all(struct hifmc_host *host)
for_each_available_child_of_node(dev->of_node, np) {
ret = hisi_spi_nor_register(np, host);
- if (ret)
+ if (ret) {
+ of_node_put(np);
goto fail;
+ }
if (host->num_chip == HIFMC_MAX_CHIP_NUM) {
dev_warn(dev, "Flash device number exceeds the maximum chipselect number\n");
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 7bc96294ae4d..b108e1f04bf6 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -405,9 +405,6 @@ static void *eraseblk_count_seq_start(struct seq_file *s, loff_t *pos)
{
struct ubi_device *ubi = s->private;
- if (*pos == 0)
- return SEQ_START_TOKEN;
-
if (*pos < ubi->peb_count)
return pos;
@@ -421,8 +418,6 @@ static void *eraseblk_count_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
struct ubi_device *ubi = s->private;
- if (v == SEQ_START_TOKEN)
- return pos;
(*pos)++;
if (*pos < ubi->peb_count)
@@ -444,11 +439,8 @@ static int eraseblk_count_seq_show(struct seq_file *s, void *iter)
int err;
/* If this is the start, print a header */
- if (iter == SEQ_START_TOKEN) {
- seq_puts(s,
- "physical_block_number\terase_count\tblock_status\tread_status\n");
- return 0;
- }
+ if (*block_number == 0)
+ seq_puts(s, "physical_block_number\terase_count\n");
err = ubi_io_is_bad(ubi, *block_number);
if (err)
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index 98f7d6be8d1f..e08f6b4637dd 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -48,6 +48,13 @@ static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
return victim;
}
+static inline void return_unused_peb(struct ubi_device *ubi,
+ struct ubi_wl_entry *e)
+{
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+}
+
/**
* return_unused_pool_pebs - returns unused PEB to the free tree.
* @ubi: UBI device description object
@@ -61,23 +68,10 @@ static void return_unused_pool_pebs(struct ubi_device *ubi,
for (i = pool->used; i < pool->size; i++) {
e = ubi->lookuptbl[pool->pebs[i]];
- wl_tree_add(e, &ubi->free);
- ubi->free_count++;
+ return_unused_peb(ubi, e);
}
}
-static int anchor_pebs_available(struct rb_root *root)
-{
- struct rb_node *p;
- struct ubi_wl_entry *e;
-
- ubi_rb_for_each_entry(p, e, root, u.rb)
- if (e->pnum < UBI_FM_MAX_START)
- return 1;
-
- return 0;
-}
-
/**
* ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
* @ubi: UBI device description object
@@ -286,8 +280,26 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
{
struct ubi_work *wrk;
+ struct ubi_wl_entry *anchor;
spin_lock(&ubi->wl_lock);
+
+ /* Do we already have an anchor? */
+ if (ubi->fm_anchor) {
+ spin_unlock(&ubi->wl_lock);
+ return 0;
+ }
+
+ /* See if we can find an anchor PEB on the list of free PEBs */
+ anchor = ubi_wl_get_fm_peb(ubi, 1);
+ if (anchor) {
+ ubi->fm_anchor = anchor;
+ spin_unlock(&ubi->wl_lock);
+ return 0;
+ }
+
+ /* No luck, trigger wear leveling to produce a new anchor PEB */
+ ubi->fm_do_produce_anchor = 1;
if (ubi->wl_scheduled) {
spin_unlock(&ubi->wl_lock);
return 0;
@@ -303,7 +315,6 @@ int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
return -ENOMEM;
}
- wrk->anchor = 1;
wrk->func = &wear_leveling_worker;
__schedule_ubi_work(ubi, wrk);
return 0;
@@ -365,6 +376,11 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
return_unused_pool_pebs(ubi, &ubi->fm_pool);
return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
+ if (ubi->fm_anchor) {
+ return_unused_peb(ubi, ubi->fm_anchor);
+ ubi->fm_anchor = NULL;
+ }
+
if (ubi->fm) {
for (i = 0; i < ubi->fm->used_blocks; i++)
kfree(ubi->fm->e[i]);
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 8e292992f84c..b88ef875236c 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -1552,14 +1552,6 @@ int ubi_update_fastmap(struct ubi_device *ubi)
return 0;
}
- ret = ubi_ensure_anchor_pebs(ubi);
- if (ret) {
- up_write(&ubi->fm_eba_sem);
- up_write(&ubi->work_sem);
- up_write(&ubi->fm_protect);
- return ret;
- }
-
new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
if (!new_fm) {
up_write(&ubi->fm_eba_sem);
@@ -1630,7 +1622,8 @@ int ubi_update_fastmap(struct ubi_device *ubi)
}
spin_lock(&ubi->wl_lock);
- tmp_e = ubi_wl_get_fm_peb(ubi, 1);
+ tmp_e = ubi->fm_anchor;
+ ubi->fm_anchor = NULL;
spin_unlock(&ubi->wl_lock);
if (old_fm) {
@@ -1682,6 +1675,9 @@ out_unlock:
up_write(&ubi->work_sem);
up_write(&ubi->fm_protect);
kfree(old_fm);
+
+ ubi_ensure_anchor_pebs(ubi);
+
return ret;
err:
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index d47b9e436e67..d248ec371cc1 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -504,6 +504,8 @@ struct ubi_debug_info {
* @fm_work: fastmap work queue
* @fm_work_scheduled: non-zero if fastmap work was scheduled
* @fast_attach: non-zero if UBI was attached by fastmap
+ * @fm_anchor: The next anchor PEB to use for fastmap
+ * @fm_do_produce_anchor: If true produce an anchor PEB in wl
*
* @used: RB-tree of used physical eraseblocks
* @erroneous: RB-tree of erroneous used physical eraseblocks
@@ -612,6 +614,8 @@ struct ubi_device {
struct work_struct fm_work;
int fm_work_scheduled;
int fast_attach;
+ struct ubi_wl_entry *fm_anchor;
+ int fm_do_produce_anchor;
/* Wear-leveling sub-system's stuff */
struct rb_root used;
@@ -802,7 +806,6 @@ struct ubi_attach_info {
* @vol_id: the volume ID on which this erasure is being performed
* @lnum: the logical eraseblock number
* @torture: if the physical eraseblock has to be tortured
- * @anchor: produce a anchor PEB to by used by fastmap
*
* The @func pointer points to the worker function. If the @shutdown argument is
* not zero, the worker has to free the resources and exit immediately as the
@@ -818,7 +821,6 @@ struct ubi_work {
int vol_id;
int lnum;
int torture;
- int anchor;
};
#include "debug.h"
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 6f2ac865ff05..ac336164f625 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -331,13 +331,6 @@ static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
}
}
- /* If no fastmap has been written and this WL entry can be used
- * as anchor PEB, hold it back and return the second best WL entry
- * such that fastmap can use the anchor PEB later. */
- if (prev_e && !ubi->fm_disabled &&
- !ubi->fm && e->pnum < UBI_FM_MAX_START)
- return prev_e;
-
return e;
}
@@ -648,9 +641,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
{
int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
int erase = 0, keep = 0, vol_id = -1, lnum = -1;
-#ifdef CONFIG_MTD_UBI_FASTMAP
- int anchor = wrk->anchor;
-#endif
struct ubi_wl_entry *e1, *e2;
struct ubi_vid_io_buf *vidb;
struct ubi_vid_hdr *vid_hdr;
@@ -690,11 +680,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
}
#ifdef CONFIG_MTD_UBI_FASTMAP
- /* Check whether we need to produce an anchor PEB */
- if (!anchor)
- anchor = !anchor_pebs_available(&ubi->free);
-
- if (anchor) {
+ if (ubi->fm_do_produce_anchor) {
e1 = find_anchor_wl_entry(&ubi->used);
if (!e1)
goto out_cancel;
@@ -705,6 +691,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
self_check_in_wl_tree(ubi, e1, &ubi->used);
rb_erase(&e1->u.rb, &ubi->used);
dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
+ ubi->fm_do_produce_anchor = 0;
} else if (!ubi->scrub.rb_node) {
#else
if (!ubi->scrub.rb_node) {
@@ -1037,7 +1024,6 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
goto out_cancel;
}
- wrk->anchor = 0;
wrk->func = &wear_leveling_worker;
if (nested)
__schedule_ubi_work(ubi, wrk);
@@ -1079,8 +1065,15 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
err = sync_erase(ubi, e, wl_wrk->torture);
if (!err) {
spin_lock(&ubi->wl_lock);
- wl_tree_add(e, &ubi->free);
- ubi->free_count++;
+
+ if (!ubi->fm_anchor && e->pnum < UBI_FM_MAX_START) {
+ ubi->fm_anchor = e;
+ ubi->fm_do_produce_anchor = 0;
+ } else {
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+ }
+
spin_unlock(&ubi->wl_lock);
/*
@@ -1478,6 +1471,19 @@ int ubi_thread(void *u)
!ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock(&ubi->wl_lock);
+
+ /*
+ * Check kthread_should_stop() after we set the task
+ * state to guarantee that we either see the stop bit
+ * and exit or the task state is reset to runnable such
+ * that it's not scheduled out indefinitely and detects
+ * the stop bit at kthread_should_stop().
+ */
+ if (kthread_should_stop()) {
+ set_current_state(TASK_RUNNING);
+ break;
+ }
+
schedule();
continue;
}
@@ -1724,6 +1730,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
if (err)
goto out_free;
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ ubi_ensure_anchor_pebs(ubi);
+#endif
return 0;
out_free:
diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h
index a9e2d669acd8..c93a53293786 100644
--- a/drivers/mtd/ubi/wl.h
+++ b/drivers/mtd/ubi/wl.h
@@ -2,7 +2,6 @@
#ifndef UBI_WL_H
#define UBI_WL_H
#ifdef CONFIG_MTD_UBI_FASTMAP
-static int anchor_pebs_available(struct rb_root *root);
static void update_fastmap_work_fn(struct work_struct *wrk);
static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index f57b86f1373d..c21c4291921f 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1123,6 +1123,7 @@ static void bond_setup_by_slave(struct net_device *bond_dev,
bond_dev->type = slave_dev->type;
bond_dev->hard_header_len = slave_dev->hard_header_len;
+ bond_dev->needed_headroom = slave_dev->needed_headroom;
bond_dev->addr_len = slave_dev->addr_len;
memcpy(bond_dev->broadcast, slave_dev->broadcast,
@@ -1267,7 +1268,39 @@ static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave)
slave->dev->flags &= ~IFF_SLAVE;
}
-static struct slave *bond_alloc_slave(struct bonding *bond)
+static void slave_kobj_release(struct kobject *kobj)
+{
+ struct slave *slave = to_slave(kobj);
+ struct bonding *bond = bond_get_bond_by_slave(slave);
+
+ cancel_delayed_work_sync(&slave->notify_work);
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
+ kfree(SLAVE_AD_INFO(slave));
+
+ kfree(slave);
+}
+
+static struct kobj_type slave_ktype = {
+ .release = slave_kobj_release,
+#ifdef CONFIG_SYSFS
+ .sysfs_ops = &slave_sysfs_ops,
+#endif
+};
+
+static int bond_kobj_init(struct slave *slave)
+{
+ int err;
+
+ err = kobject_init_and_add(&slave->kobj, &slave_ktype,
+ &(slave->dev->dev.kobj), "bonding_slave");
+ if (err)
+ kobject_put(&slave->kobj);
+
+ return err;
+}
+
+static struct slave *bond_alloc_slave(struct bonding *bond,
+ struct net_device *slave_dev)
{
struct slave *slave = NULL;
@@ -1275,11 +1308,17 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
if (!slave)
return NULL;
+ slave->bond = bond;
+ slave->dev = slave_dev;
+
+ if (bond_kobj_init(slave))
+ return NULL;
+
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
GFP_KERNEL);
if (!SLAVE_AD_INFO(slave)) {
- kfree(slave);
+ kobject_put(&slave->kobj);
return NULL;
}
}
@@ -1288,17 +1327,6 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
return slave;
}
-static void bond_free_slave(struct slave *slave)
-{
- struct bonding *bond = bond_get_bond_by_slave(slave);
-
- cancel_delayed_work_sync(&slave->notify_work);
- if (BOND_MODE(bond) == BOND_MODE_8023AD)
- kfree(SLAVE_AD_INFO(slave));
-
- kfree(slave);
-}
-
static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info)
{
info->bond_mode = BOND_MODE(bond);
@@ -1486,14 +1514,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
bond->dev->addr_assign_type == NET_ADDR_RANDOM)
bond_set_dev_addr(bond->dev, slave_dev);
- new_slave = bond_alloc_slave(bond);
+ new_slave = bond_alloc_slave(bond, slave_dev);
if (!new_slave) {
res = -ENOMEM;
goto err_undo_flags;
}
- new_slave->bond = bond;
- new_slave->dev = slave_dev;
/* Set the new_slave's queue_id to be zero. Queue ID mapping
* is set via sysfs or module option if desired.
*/
@@ -1820,7 +1846,7 @@ err_restore_mtu:
dev_set_mtu(slave_dev, new_slave->original_mtu);
err_free:
- bond_free_slave(new_slave);
+ kobject_put(&new_slave->kobj);
err_undo_flags:
/* Enslave of first slave has failed and we need to fix master's mac */
@@ -2008,7 +2034,7 @@ static int __bond_release_one(struct net_device *bond_dev,
if (!netif_is_bond_master(slave_dev))
slave_dev->priv_flags &= ~IFF_BONDING;
- bond_free_slave(slave);
+ kobject_put(&slave->kobj);
return 0;
}
@@ -2029,7 +2055,8 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
int ret;
ret = __bond_release_one(bond_dev, slave_dev, false, true);
- if (ret == 0 && !bond_has_slaves(bond)) {
+ if (ret == 0 && !bond_has_slaves(bond) &&
+ bond_dev->reg_state != NETREG_UNREGISTERING) {
bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
netdev_info(bond_dev, "Destroying bond %s\n",
bond_dev->name);
@@ -2772,6 +2799,9 @@ static int bond_ab_arp_inspect(struct bonding *bond)
if (bond_time_in_interval(bond, last_rx, 1)) {
bond_propose_link_state(slave, BOND_LINK_UP);
commit++;
+ } else if (slave->link == BOND_LINK_BACK) {
+ bond_propose_link_state(slave, BOND_LINK_FAIL);
+ commit++;
}
continue;
}
@@ -2882,6 +2912,19 @@ static void bond_ab_arp_commit(struct bonding *bond)
continue;
+ case BOND_LINK_FAIL:
+ bond_set_slave_link_state(slave, BOND_LINK_FAIL,
+ BOND_SLAVE_NOTIFY_NOW);
+ bond_set_slave_inactive_flags(slave,
+ BOND_SLAVE_NOTIFY_NOW);
+
+ /* A slave has just been enslaved and has become
+ * the current active slave.
+ */
+ if (rtnl_dereference(bond->curr_active_slave))
+ RCU_INIT_POINTER(bond->current_arp_slave, NULL);
+ continue;
+
default:
netdev_err(bond->dev, "impossible: new_link %d on slave %s\n",
slave->link_new_state, slave->dev->name);
@@ -2931,8 +2974,6 @@ static bool bond_ab_arp_probe(struct bonding *bond)
return should_notify_rtnl;
}
- bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
-
bond_for_each_slave_rcu(bond, slave, iter) {
if (!found && !before && bond_slave_is_up(slave))
before = slave;
@@ -4200,13 +4241,23 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
return ret;
}
+static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed)
+{
+ if (speed == 0 || speed == SPEED_UNKNOWN)
+ speed = slave->speed;
+ else
+ speed = min(speed, slave->speed);
+
+ return speed;
+}
+
static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
struct ethtool_link_ksettings *cmd)
{
struct bonding *bond = netdev_priv(bond_dev);
- unsigned long speed = 0;
struct list_head *iter;
struct slave *slave;
+ u32 speed = 0;
cmd->base.duplex = DUPLEX_UNKNOWN;
cmd->base.port = PORT_OTHER;
@@ -4218,8 +4269,13 @@ static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
*/
bond_for_each_slave(bond, slave, iter) {
if (bond_slave_can_tx(slave)) {
- if (slave->speed != SPEED_UNKNOWN)
- speed += slave->speed;
+ if (slave->speed != SPEED_UNKNOWN) {
+ if (BOND_MODE(bond) == BOND_MODE_BROADCAST)
+ speed = bond_mode_bcast_speed(slave,
+ speed);
+ else
+ speed += slave->speed;
+ }
if (cmd->base.duplex == DUPLEX_UNKNOWN &&
slave->duplex != DUPLEX_UNKNOWN)
cmd->base.duplex = slave->duplex;
@@ -4817,15 +4873,19 @@ int bond_create(struct net *net, const char *name)
bond_dev->rtnl_link_ops = &bond_link_ops;
res = register_netdevice(bond_dev);
+ if (res < 0) {
+ free_netdev(bond_dev);
+ rtnl_unlock();
+
+ return res;
+ }
netif_carrier_off(bond_dev);
bond_work_init_all(bond);
rtnl_unlock();
- if (res < 0)
- free_netdev(bond_dev);
- return res;
+ return 0;
}
static int __net_init bond_net_init(struct net *net)
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 6b9ad8673218..fbcd8a752ee7 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -451,11 +451,10 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
return err;
err = register_netdevice(bond_dev);
-
- netif_carrier_off(bond_dev);
if (!err) {
struct bonding *bond = netdev_priv(bond_dev);
+ netif_carrier_off(bond_dev);
bond_work_init_all(bond);
}
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 4985268e2273..9ec0498d7d54 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -125,7 +125,6 @@ static const struct slave_attribute *slave_attrs[] = {
};
#define to_slave_attr(_at) container_of(_at, struct slave_attribute, attr)
-#define to_slave(obj) container_of(obj, struct slave, kobj)
static ssize_t slave_show(struct kobject *kobj,
struct attribute *attr, char *buf)
@@ -136,26 +135,15 @@ static ssize_t slave_show(struct kobject *kobj,
return slave_attr->show(slave, buf);
}
-static const struct sysfs_ops slave_sysfs_ops = {
+const struct sysfs_ops slave_sysfs_ops = {
.show = slave_show,
};
-static struct kobj_type slave_ktype = {
-#ifdef CONFIG_SYSFS
- .sysfs_ops = &slave_sysfs_ops,
-#endif
-};
-
int bond_sysfs_slave_add(struct slave *slave)
{
const struct slave_attribute **a;
int err;
- err = kobject_init_and_add(&slave->kobj, &slave_ktype,
- &(slave->dev->dev.kobj), "bonding_slave");
- if (err)
- return err;
-
for (a = slave_attrs; *a; ++a) {
err = sysfs_create_file(&slave->kobj, &((*a)->attr));
if (err) {
@@ -173,6 +161,4 @@ void bond_sysfs_slave_del(struct slave *slave)
for (a = slave_attrs; *a; ++a)
sysfs_remove_file(&slave->kobj, &((*a)->attr));
-
- kobject_put(&slave->kobj);
}
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index a0f954f36c09..94d5ce9419ca 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -279,7 +279,6 @@ static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ser_device *ser;
- BUG_ON(dev == NULL);
ser = netdev_priv(dev);
/* Send flow off once, on high water mark */
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 24c6015f6c92..2278c5fff5c6 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -212,18 +212,6 @@ static const struct can_bittiming_const c_can_bittiming_const = {
.brp_inc = 1,
};
-static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv)
-{
- if (priv->device)
- pm_runtime_enable(priv->device);
-}
-
-static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv)
-{
- if (priv->device)
- pm_runtime_disable(priv->device);
-}
-
static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv)
{
if (priv->device)
@@ -1318,7 +1306,6 @@ static const struct net_device_ops c_can_netdev_ops = {
int register_c_can_dev(struct net_device *dev)
{
- struct c_can_priv *priv = netdev_priv(dev);
int err;
/* Deactivate pins to prevent DRA7 DCAN IP from being
@@ -1328,28 +1315,19 @@ int register_c_can_dev(struct net_device *dev)
*/
pinctrl_pm_select_sleep_state(dev->dev.parent);
- c_can_pm_runtime_enable(priv);
-
dev->flags |= IFF_ECHO; /* we support local echo */
dev->netdev_ops = &c_can_netdev_ops;
err = register_candev(dev);
- if (err)
- c_can_pm_runtime_disable(priv);
- else
+ if (!err)
devm_can_led_init(dev);
-
return err;
}
EXPORT_SYMBOL_GPL(register_c_can_dev);
void unregister_c_can_dev(struct net_device *dev)
{
- struct c_can_priv *priv = netdev_priv(dev);
-
unregister_candev(dev);
-
- c_can_pm_runtime_disable(priv);
}
EXPORT_SYMBOL_GPL(unregister_c_can_dev);
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index 406b4847e5dc..7efb60b50876 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -239,12 +239,13 @@ static void c_can_pci_remove(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct c_can_priv *priv = netdev_priv(dev);
+ void __iomem *addr = priv->base;
unregister_c_can_dev(dev);
free_c_can_dev(dev);
- pci_iounmap(pdev, priv->base);
+ pci_iounmap(pdev, addr);
pci_disable_msi(pdev);
pci_clear_master(pdev);
pci_release_regions(pdev);
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index b5145a7f874c..f2b0408ce87d 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -29,6 +29,7 @@
#include <linux/list.h>
#include <linux/io.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -385,6 +386,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
+ pm_runtime_enable(priv->device);
ret = register_c_can_dev(dev);
if (ret) {
dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
@@ -397,6 +399,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
return 0;
exit_free_device:
+ pm_runtime_disable(priv->device);
free_c_can_dev(dev);
exit:
dev_err(&pdev->dev, "probe failed\n");
@@ -407,9 +410,10 @@ exit:
static int c_can_plat_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
+ struct c_can_priv *priv = netdev_priv(dev);
unregister_c_can_dev(dev);
-
+ pm_runtime_disable(priv->device);
free_c_can_dev(dev);
return 0;
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 1545f2b299d0..8738d37f7273 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -493,9 +493,13 @@ struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8
*/
struct sk_buff *skb = priv->echo_skb[idx];
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
- u8 len = cf->len;
- *len_ptr = len;
+ /* get the real payload length for netdev statistics */
+ if (cf->can_id & CAN_RTR_FLAG)
+ *len_ptr = 0;
+ else
+ *len_ptr = cf->len;
+
priv->echo_skb[idx] = NULL;
return skb;
@@ -520,7 +524,11 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
if (!skb)
return 0;
- netif_rx(skb);
+ skb_get(skb);
+ if (netif_rx(skb) == NET_RX_SUCCESS)
+ dev_consume_skb_any(skb);
+ else
+ dev_kfree_skb_any(skb);
return len;
}
@@ -571,11 +579,11 @@ static void can_restart(struct net_device *dev)
}
cf->can_id |= CAN_ERR_RESTARTED;
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx_ni(skb);
+
restart:
netdev_dbg(dev, "restarted\n");
priv->can_stats.restarts++;
@@ -1134,7 +1142,7 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
struct can_ctrlmode cm = {.flags = priv->ctrlmode};
- struct can_berr_counter bec;
+ struct can_berr_counter bec = { };
enum can_state state = priv->state;
if (priv->do_get_state)
@@ -1227,6 +1235,7 @@ static void can_dellink(struct net_device *dev, struct list_head *head)
static struct rtnl_link_ops can_link_ops __read_mostly = {
.kind = "can",
+ .netns_refund = true,
.maxtype = IFLA_CAN_MAX,
.policy = can_policy,
.setup = can_setup,
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index bfe13c6627be..d4dfa0247ebb 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -301,8 +301,7 @@ static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
- FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
};
static const struct can_bittiming_const flexcan_bittiming_const = {
@@ -423,11 +422,17 @@ static int flexcan_chip_disable(struct flexcan_priv *priv)
static int flexcan_chip_freeze(struct flexcan_priv *priv)
{
struct flexcan_regs __iomem *regs = priv->regs;
- unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate;
+ unsigned int timeout;
+ u32 bitrate = priv->can.bittiming.bitrate;
u32 reg;
+ if (bitrate)
+ timeout = 1000 * 1000 * 10 / bitrate;
+ else
+ timeout = FLEXCAN_TIMEOUT_US / 10;
+
reg = priv->read(&regs->mcr);
- reg |= FLEXCAN_MCR_HALT;
+ reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT;
priv->write(reg, &regs->mcr);
while (timeout-- && !(priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
@@ -1091,18 +1096,23 @@ static int flexcan_chip_start(struct net_device *dev)
return err;
}
-/* flexcan_chip_stop
+/* __flexcan_chip_stop
*
- * this functions is entered with clocks enabled
+ * this function is entered with clocks enabled
*/
-static void flexcan_chip_stop(struct net_device *dev)
+static int __flexcan_chip_stop(struct net_device *dev, bool disable_on_error)
{
struct flexcan_priv *priv = netdev_priv(dev);
struct flexcan_regs __iomem *regs = priv->regs;
+ int err;
/* freeze + disable module */
- flexcan_chip_freeze(priv);
- flexcan_chip_disable(priv);
+ err = flexcan_chip_freeze(priv);
+ if (err && !disable_on_error)
+ return err;
+ err = flexcan_chip_disable(priv);
+ if (err && !disable_on_error)
+ goto out_chip_unfreeze;
/* Disable all interrupts */
priv->write(0, &regs->imask2);
@@ -1112,6 +1122,23 @@ static void flexcan_chip_stop(struct net_device *dev)
flexcan_transceiver_disable(priv);
priv->can.state = CAN_STATE_STOPPED;
+
+ return 0;
+
+ out_chip_unfreeze:
+ flexcan_chip_unfreeze(priv);
+
+ return err;
+}
+
+static inline int flexcan_chip_stop_disable_on_error(struct net_device *dev)
+{
+ return __flexcan_chip_stop(dev, true);
+}
+
+static inline int flexcan_chip_stop(struct net_device *dev)
+{
+ return __flexcan_chip_stop(dev, false);
}
static int flexcan_open(struct net_device *dev)
@@ -1165,7 +1192,7 @@ static int flexcan_close(struct net_device *dev)
netif_stop_queue(dev);
can_rx_offload_disable(&priv->offload);
- flexcan_chip_stop(dev);
+ flexcan_chip_stop_disable_on_error(dev);
free_irq(dev->irq, dev);
clk_disable_unprepare(priv->clk_per);
@@ -1231,10 +1258,14 @@ static int register_flexcandev(struct net_device *dev)
if (err)
goto out_chip_disable;
- /* set freeze, halt and activate FIFO, restrict register access */
+ /* set freeze, halt */
+ err = flexcan_chip_freeze(priv);
+ if (err)
+ goto out_chip_disable;
+
+ /* activate FIFO, restrict register access */
reg = priv->read(&regs->mcr);
- reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT |
- FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV;
+ reg |= FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV;
priv->write(reg, &regs->mcr);
/* Currently we only support newer versions of this core
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index deb274a19ba0..e87c3bb82081 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -520,9 +520,6 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota)
}
while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) {
- if (rxfs & RXFS_RFL)
- netdev_warn(dev, "Rx FIFO 0 Message Lost\n");
-
m_can_read_fifo(dev, rxfs);
quota--;
@@ -675,7 +672,7 @@ static int m_can_handle_state_change(struct net_device *dev,
unsigned int ecr;
switch (new_state) {
- case CAN_STATE_ERROR_ACTIVE:
+ case CAN_STATE_ERROR_WARNING:
/* error warning state */
priv->can.can_stats.error_warning++;
priv->can.state = CAN_STATE_ERROR_WARNING;
@@ -704,7 +701,7 @@ static int m_can_handle_state_change(struct net_device *dev,
__m_can_get_berr_counter(dev, &bec);
switch (new_state) {
- case CAN_STATE_ERROR_ACTIVE:
+ case CAN_STATE_ERROR_WARNING:
/* error warning state */
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = (bec.txerr > bec.rxerr) ?
@@ -976,7 +973,7 @@ static const struct can_bittiming_const m_can_bittiming_const_31X = {
.name = KBUILD_MODNAME,
.tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
.tseg1_max = 256,
- .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
+ .tseg2_min = 2, /* Time segment 2 = phase_seg2 */
.tseg2_max = 128,
.sjw_max = 128,
.brp_min = 1,
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index 5696d7e80751..4bc5d522c74b 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -256,8 +256,7 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
cf_len = get_can_dlc(pucan_msg_get_dlc(msg));
/* if this frame is an echo, */
- if ((rx_msg_flags & PUCAN_MSG_LOOPED_BACK) &&
- !(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) {
+ if (rx_msg_flags & PUCAN_MSG_LOOPED_BACK) {
unsigned long flags;
spin_lock_irqsave(&priv->echo_lock, flags);
@@ -271,7 +270,13 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
netif_wake_queue(priv->ndev);
spin_unlock_irqrestore(&priv->echo_lock, flags);
- return 0;
+
+ /* if this frame is only an echo, stop here. Otherwise,
+ * continue to push this application self-received frame into
+ * its own rx queue.
+ */
+ if (!(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE))
+ return 0;
}
/* otherwise, it should be pushed into rx fifo */
diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
index 5f7e97d54733..5cf4171df1f4 100644
--- a/drivers/net/can/rx-offload.c
+++ b/drivers/net/can/rx-offload.c
@@ -281,7 +281,7 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
if (skb_queue_len(&offload->skb_queue) >
offload->skb_queue_len_max) {
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return -ENOBUFS;
}
@@ -326,7 +326,7 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload,
{
if (skb_queue_len(&offload->skb_queue) >
offload->skb_queue_len_max) {
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return -ENOBUFS;
}
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index e22696190583..bed5ffa75b27 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -393,8 +393,13 @@ static int softing_netdev_open(struct net_device *ndev)
/* check or determine and set bittime */
ret = open_candev(ndev);
- if (!ret)
- ret = softing_startstop(ndev, 1);
+ if (ret)
+ return ret;
+
+ ret = softing_startstop(ndev, 1);
+ if (ret < 0)
+ close_candev(ndev);
+
return ret;
}
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index db6ea936dc3f..81a3fdd5e010 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -903,7 +903,8 @@ static int ti_hecc_probe(struct platform_device *pdev)
priv->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->base)) {
dev_err(&pdev->dev, "hecc ioremap failed\n");
- return PTR_ERR(priv->base);
+ err = PTR_ERR(priv->base);
+ goto probe_exit_candev;
}
/* handle hecc-ram memory */
@@ -916,7 +917,8 @@ static int ti_hecc_probe(struct platform_device *pdev)
priv->hecc_ram = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->hecc_ram)) {
dev_err(&pdev->dev, "hecc-ram ioremap failed\n");
- return PTR_ERR(priv->hecc_ram);
+ err = PTR_ERR(priv->hecc_ram);
+ goto probe_exit_candev;
}
/* handle mbx memory */
@@ -929,13 +931,14 @@ static int ti_hecc_probe(struct platform_device *pdev)
priv->mbx = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->mbx)) {
dev_err(&pdev->dev, "mbx ioremap failed\n");
- return PTR_ERR(priv->mbx);
+ err = PTR_ERR(priv->mbx);
+ goto probe_exit_candev;
}
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!irq) {
dev_err(&pdev->dev, "No irq resource\n");
- goto probe_exit;
+ goto probe_exit_candev;
}
priv->ndev = ndev;
@@ -988,7 +991,7 @@ probe_exit_clk:
clk_put(priv->clk);
probe_exit_candev:
free_candev(ndev);
-probe_exit:
+
return err;
}
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index cc2e224661b3..6a57169c2715 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -71,21 +71,27 @@ enum gs_can_identify_mode {
};
/* data types passed between host and device */
+
+/* The firmware on the original USB2CAN by Geschwister Schneider
+ * Technologie Entwicklungs- und Vertriebs UG exchanges all data
+ * between the host and the device in host byte order. This is done
+ * with the struct gs_host_config::byte_order member, which is sent
+ * first to indicate the desired byte order.
+ *
+ * The widely used open source firmware candleLight doesn't support
+ * this feature and exchanges the data in little endian byte order.
+ */
struct gs_host_config {
- u32 byte_order;
+ __le32 byte_order;
} __packed;
-/* All data exchanged between host and device is exchanged in host byte order,
- * thanks to the struct gs_host_config byte_order member, which is sent first
- * to indicate the desired byte order.
- */
struct gs_device_config {
u8 reserved1;
u8 reserved2;
u8 reserved3;
u8 icount;
- u32 sw_version;
- u32 hw_version;
+ __le32 sw_version;
+ __le32 hw_version;
} __packed;
#define GS_CAN_MODE_NORMAL 0
@@ -95,26 +101,26 @@ struct gs_device_config {
#define GS_CAN_MODE_ONE_SHOT BIT(3)
struct gs_device_mode {
- u32 mode;
- u32 flags;
+ __le32 mode;
+ __le32 flags;
} __packed;
struct gs_device_state {
- u32 state;
- u32 rxerr;
- u32 txerr;
+ __le32 state;
+ __le32 rxerr;
+ __le32 txerr;
} __packed;
struct gs_device_bittiming {
- u32 prop_seg;
- u32 phase_seg1;
- u32 phase_seg2;
- u32 sjw;
- u32 brp;
+ __le32 prop_seg;
+ __le32 phase_seg1;
+ __le32 phase_seg2;
+ __le32 sjw;
+ __le32 brp;
} __packed;
struct gs_identify_mode {
- u32 mode;
+ __le32 mode;
} __packed;
#define GS_CAN_FEATURE_LISTEN_ONLY BIT(0)
@@ -125,23 +131,23 @@ struct gs_identify_mode {
#define GS_CAN_FEATURE_IDENTIFY BIT(5)
struct gs_device_bt_const {
- u32 feature;
- u32 fclk_can;
- u32 tseg1_min;
- u32 tseg1_max;
- u32 tseg2_min;
- u32 tseg2_max;
- u32 sjw_max;
- u32 brp_min;
- u32 brp_max;
- u32 brp_inc;
+ __le32 feature;
+ __le32 fclk_can;
+ __le32 tseg1_min;
+ __le32 tseg1_max;
+ __le32 tseg2_min;
+ __le32 tseg2_max;
+ __le32 sjw_max;
+ __le32 brp_min;
+ __le32 brp_max;
+ __le32 brp_inc;
} __packed;
#define GS_CAN_FLAG_OVERFLOW 1
struct gs_host_frame {
u32 echo_id;
- u32 can_id;
+ __le32 can_id;
u8 can_dlc;
u8 channel;
@@ -337,13 +343,13 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
if (!skb)
return;
- cf->can_id = hf->can_id;
+ cf->can_id = le32_to_cpu(hf->can_id);
cf->can_dlc = get_can_dlc(hf->can_dlc);
memcpy(cf->data, hf->data, 8);
/* ERROR frames tell us information about the controller */
- if (hf->can_id & CAN_ERR_FLAG)
+ if (le32_to_cpu(hf->can_id) & CAN_ERR_FLAG)
gs_update_state(dev, cf);
netdev->stats.rx_packets++;
@@ -426,11 +432,11 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
if (!dbt)
return -ENOMEM;
- dbt->prop_seg = bt->prop_seg;
- dbt->phase_seg1 = bt->phase_seg1;
- dbt->phase_seg2 = bt->phase_seg2;
- dbt->sjw = bt->sjw;
- dbt->brp = bt->brp;
+ dbt->prop_seg = cpu_to_le32(bt->prop_seg);
+ dbt->phase_seg1 = cpu_to_le32(bt->phase_seg1);
+ dbt->phase_seg2 = cpu_to_le32(bt->phase_seg2);
+ dbt->sjw = cpu_to_le32(bt->sjw);
+ dbt->brp = cpu_to_le32(bt->brp);
/* request bit timings */
rc = usb_control_msg(interface_to_usbdev(intf),
@@ -511,7 +517,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
cf = (struct can_frame *)skb->data;
- hf->can_id = cf->can_id;
+ hf->can_id = cpu_to_le32(cf->can_id);
hf->can_dlc = cf->can_dlc;
memcpy(hf->data, cf->data, cf->can_dlc);
@@ -581,6 +587,7 @@ static int gs_can_open(struct net_device *netdev)
int rc, i;
struct gs_device_mode *dm;
u32 ctrlmode;
+ u32 flags = 0;
rc = open_candev(netdev);
if (rc)
@@ -648,24 +655,24 @@ static int gs_can_open(struct net_device *netdev)
/* flags */
ctrlmode = dev->can.ctrlmode;
- dm->flags = 0;
if (ctrlmode & CAN_CTRLMODE_LOOPBACK)
- dm->flags |= GS_CAN_MODE_LOOP_BACK;
+ flags |= GS_CAN_MODE_LOOP_BACK;
else if (ctrlmode & CAN_CTRLMODE_LISTENONLY)
- dm->flags |= GS_CAN_MODE_LISTEN_ONLY;
+ flags |= GS_CAN_MODE_LISTEN_ONLY;
/* Controller is not allowed to retry TX
* this mode is unavailable on atmels uc3c hardware
*/
if (ctrlmode & CAN_CTRLMODE_ONE_SHOT)
- dm->flags |= GS_CAN_MODE_ONE_SHOT;
+ flags |= GS_CAN_MODE_ONE_SHOT;
if (ctrlmode & CAN_CTRLMODE_3_SAMPLES)
- dm->flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
+ flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
/* finally start device */
- dm->mode = GS_CAN_MODE_START;
+ dm->mode = cpu_to_le32(GS_CAN_MODE_START);
+ dm->flags = cpu_to_le32(flags);
rc = usb_control_msg(interface_to_usbdev(dev->iface),
usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
GS_USB_BREQ_MODE,
@@ -745,9 +752,9 @@ static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
return -ENOMEM;
if (do_identify)
- imode->mode = GS_CAN_IDENTIFY_ON;
+ imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_ON);
else
- imode->mode = GS_CAN_IDENTIFY_OFF;
+ imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_OFF);
rc = usb_control_msg(interface_to_usbdev(dev->iface),
usb_sndctrlpipe(interface_to_usbdev(dev->iface),
@@ -798,6 +805,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
struct net_device *netdev;
int rc;
struct gs_device_bt_const *bt_const;
+ u32 feature;
bt_const = kmalloc(sizeof(*bt_const), GFP_KERNEL);
if (!bt_const)
@@ -838,14 +846,14 @@ static struct gs_can *gs_make_candev(unsigned int channel,
/* dev settup */
strcpy(dev->bt_const.name, "gs_usb");
- dev->bt_const.tseg1_min = bt_const->tseg1_min;
- dev->bt_const.tseg1_max = bt_const->tseg1_max;
- dev->bt_const.tseg2_min = bt_const->tseg2_min;
- dev->bt_const.tseg2_max = bt_const->tseg2_max;
- dev->bt_const.sjw_max = bt_const->sjw_max;
- dev->bt_const.brp_min = bt_const->brp_min;
- dev->bt_const.brp_max = bt_const->brp_max;
- dev->bt_const.brp_inc = bt_const->brp_inc;
+ dev->bt_const.tseg1_min = le32_to_cpu(bt_const->tseg1_min);
+ dev->bt_const.tseg1_max = le32_to_cpu(bt_const->tseg1_max);
+ dev->bt_const.tseg2_min = le32_to_cpu(bt_const->tseg2_min);
+ dev->bt_const.tseg2_max = le32_to_cpu(bt_const->tseg2_max);
+ dev->bt_const.sjw_max = le32_to_cpu(bt_const->sjw_max);
+ dev->bt_const.brp_min = le32_to_cpu(bt_const->brp_min);
+ dev->bt_const.brp_max = le32_to_cpu(bt_const->brp_max);
+ dev->bt_const.brp_inc = le32_to_cpu(bt_const->brp_inc);
dev->udev = interface_to_usbdev(intf);
dev->iface = intf;
@@ -862,28 +870,29 @@ static struct gs_can *gs_make_candev(unsigned int channel,
/* can settup */
dev->can.state = CAN_STATE_STOPPED;
- dev->can.clock.freq = bt_const->fclk_can;
+ dev->can.clock.freq = le32_to_cpu(bt_const->fclk_can);
dev->can.bittiming_const = &dev->bt_const;
dev->can.do_set_bittiming = gs_usb_set_bittiming;
dev->can.ctrlmode_supported = 0;
- if (bt_const->feature & GS_CAN_FEATURE_LISTEN_ONLY)
+ feature = le32_to_cpu(bt_const->feature);
+ if (feature & GS_CAN_FEATURE_LISTEN_ONLY)
dev->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
- if (bt_const->feature & GS_CAN_FEATURE_LOOP_BACK)
+ if (feature & GS_CAN_FEATURE_LOOP_BACK)
dev->can.ctrlmode_supported |= CAN_CTRLMODE_LOOPBACK;
- if (bt_const->feature & GS_CAN_FEATURE_TRIPLE_SAMPLE)
+ if (feature & GS_CAN_FEATURE_TRIPLE_SAMPLE)
dev->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
- if (bt_const->feature & GS_CAN_FEATURE_ONE_SHOT)
+ if (feature & GS_CAN_FEATURE_ONE_SHOT)
dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
SET_NETDEV_DEV(netdev, &intf->dev);
- if (dconf->sw_version > 1)
- if (bt_const->feature & GS_CAN_FEATURE_IDENTIFY)
+ if (le32_to_cpu(dconf->sw_version) > 1)
+ if (feature & GS_CAN_FEATURE_IDENTIFY)
netdev->ethtool_ops = &gs_usb_ethtool_ops;
kfree(bt_const);
@@ -918,7 +927,7 @@ static int gs_usb_probe(struct usb_interface *intf,
if (!hconf)
return -ENOMEM;
- hconf->byte_order = 0x0000beef;
+ hconf->byte_order = cpu_to_le32(0x0000beef);
/* send host config */
rc = usb_control_msg(interface_to_usbdev(intf),
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index 7ab87a758754..218fadc91155 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -367,7 +367,7 @@ static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = {
.tseg2_max = 32,
.sjw_max = 16,
.brp_min = 1,
- .brp_max = 4096,
+ .brp_max = 8192,
.brp_inc = 1,
};
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index 1b0afeaf1a3c..896f5b022729 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -337,8 +337,6 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb,
if (!ctx)
return NETDEV_TX_BUSY;
- can_put_echo_skb(skb, priv->netdev, ctx->ndx);
-
if (cf->can_id & CAN_EFF_FLAG) {
/* SIDH | SIDL | EIDH | EIDL
* 28 - 21 | 20 19 18 x x x 17 16 | 15 - 8 | 7 - 0
@@ -368,6 +366,8 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb,
if (cf->can_id & CAN_RTR_FLAG)
usb_msg.dlc |= MCBA_DLC_RTR_MASK;
+ can_put_echo_skb(skb, priv->netdev, ctx->ndx);
+
err = mcba_usb_xmit(priv, (struct mcba_usb_msg *)&usb_msg, ctx);
if (err)
goto xmit_failed;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index afc8d978124e..c235d1e6bc52 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -138,14 +138,55 @@ void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *time)
/* protect from getting time before setting now */
if (ktime_to_ns(time_ref->tv_host)) {
u64 delta_us;
+ s64 delta_ts = 0;
+
+ /* General case: dev_ts_1 < dev_ts_2 < ts, with:
+ *
+ * - dev_ts_1 = previous sync timestamp
+ * - dev_ts_2 = last sync timestamp
+ * - ts = event timestamp
+ * - ts_period = known sync period (theoretical)
+ * ~ dev_ts2 - dev_ts1
+ * *but*:
+ *
+ * - time counters wrap (see adapter->ts_used_bits)
+ * - sometimes, dev_ts_1 < ts < dev_ts2
+ *
+ * "normal" case (sync time counters increase):
+ * must take into account case when ts wraps (tsw)
+ *
+ * < ts_period > < >
+ * | | |
+ * ---+--------+----+-------0-+--+-->
+ * ts_dev_1 | ts_dev_2 |
+ * ts tsw
+ */
+ if (time_ref->ts_dev_1 < time_ref->ts_dev_2) {
+ /* case when event time (tsw) wraps */
+ if (ts < time_ref->ts_dev_1)
+ delta_ts = BIT_ULL(time_ref->adapter->ts_used_bits);
+
+ /* Otherwise, sync time counter (ts_dev_2) has wrapped:
+ * handle case when event time (tsn) hasn't.
+ *
+ * < ts_period > < >
+ * | | |
+ * ---+--------+--0-+---------+--+-->
+ * ts_dev_1 | ts_dev_2 |
+ * tsn ts
+ */
+ } else if (time_ref->ts_dev_1 < ts) {
+ delta_ts = -BIT_ULL(time_ref->adapter->ts_used_bits);
+ }
- delta_us = ts - time_ref->ts_dev_2;
- if (ts < time_ref->ts_dev_2)
- delta_us &= (1 << time_ref->adapter->ts_used_bits) - 1;
+ /* add delay between last sync and event timestamps */
+ delta_ts += (signed int)(ts - time_ref->ts_dev_2);
- delta_us += time_ref->ts_total;
+ /* add time from beginning to last sync */
+ delta_ts += time_ref->ts_total;
- delta_us *= time_ref->adapter->us_per_ts_scale;
+ /* convert ticks number into microseconds */
+ delta_us = delta_ts * time_ref->adapter->us_per_ts_scale;
delta_us >>= time_ref->adapter->us_per_ts_shift;
*time = ktime_add_us(time_ref->tv_host_0, delta_us);
@@ -823,7 +864,7 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
if (dev->adapter->dev_set_bus) {
err = dev->adapter->dev_set_bus(dev, 0);
if (err)
- goto lbl_unregister_candev;
+ goto adap_dev_free;
}
/* get device number early */
@@ -835,6 +876,10 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
return 0;
+adap_dev_free:
+ if (dev->adapter->dev_free)
+ dev->adapter->dev_free(dev);
+
lbl_unregister_candev:
unregister_candev(netdev);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 41988358f63c..40ac37fe9dcd 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -476,12 +476,18 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if,
struct pucan_msg *rx_msg)
{
struct pucan_rx_msg *rm = (struct pucan_rx_msg *)rx_msg;
- struct peak_usb_device *dev = usb_if->dev[pucan_msg_get_channel(rm)];
- struct net_device *netdev = dev->netdev;
+ struct peak_usb_device *dev;
+ struct net_device *netdev;
struct canfd_frame *cfd;
struct sk_buff *skb;
const u16 rx_msg_flags = le16_to_cpu(rm->flags);
+ if (pucan_msg_get_channel(rm) >= ARRAY_SIZE(usb_if->dev))
+ return -ENOMEM;
+
+ dev = usb_if->dev[pucan_msg_get_channel(rm)];
+ netdev = dev->netdev;
+
if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) {
/* CANFD frame case */
skb = alloc_canfd_skb(netdev, &cfd);
@@ -514,11 +520,11 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if,
else
memcpy(cfd->data, rm->d, cfd->len);
- peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low));
-
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += cfd->len;
+ peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low));
+
return 0;
}
@@ -527,15 +533,21 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
struct pucan_msg *rx_msg)
{
struct pucan_status_msg *sm = (struct pucan_status_msg *)rx_msg;
- struct peak_usb_device *dev = usb_if->dev[pucan_stmsg_get_channel(sm)];
- struct pcan_usb_fd_device *pdev =
- container_of(dev, struct pcan_usb_fd_device, dev);
+ struct pcan_usb_fd_device *pdev;
enum can_state new_state = CAN_STATE_ERROR_ACTIVE;
enum can_state rx_state, tx_state;
- struct net_device *netdev = dev->netdev;
+ struct peak_usb_device *dev;
+ struct net_device *netdev;
struct can_frame *cf;
struct sk_buff *skb;
+ if (pucan_stmsg_get_channel(sm) >= ARRAY_SIZE(usb_if->dev))
+ return -ENOMEM;
+
+ dev = usb_if->dev[pucan_stmsg_get_channel(sm)];
+ pdev = container_of(dev, struct pcan_usb_fd_device, dev);
+ netdev = dev->netdev;
+
/* nothing should be sent while in BUS_OFF state */
if (dev->can.state == CAN_STATE_BUS_OFF)
return 0;
@@ -574,11 +586,11 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
if (!skb)
return -ENOMEM;
- peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
-
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += cf->can_dlc;
+ peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
+
return 0;
}
@@ -587,9 +599,14 @@ static int pcan_usb_fd_decode_error(struct pcan_usb_fd_if *usb_if,
struct pucan_msg *rx_msg)
{
struct pucan_error_msg *er = (struct pucan_error_msg *)rx_msg;
- struct peak_usb_device *dev = usb_if->dev[pucan_ermsg_get_channel(er)];
- struct pcan_usb_fd_device *pdev =
- container_of(dev, struct pcan_usb_fd_device, dev);
+ struct pcan_usb_fd_device *pdev;
+ struct peak_usb_device *dev;
+
+ if (pucan_ermsg_get_channel(er) >= ARRAY_SIZE(usb_if->dev))
+ return -EINVAL;
+
+ dev = usb_if->dev[pucan_ermsg_get_channel(er)];
+ pdev = container_of(dev, struct pcan_usb_fd_device, dev);
/* keep a trace of tx and rx error counters for later use */
pdev->bec.txerr = er->tx_err_cnt;
@@ -603,11 +620,17 @@ static int pcan_usb_fd_decode_overrun(struct pcan_usb_fd_if *usb_if,
struct pucan_msg *rx_msg)
{
struct pcan_ufd_ovr_msg *ov = (struct pcan_ufd_ovr_msg *)rx_msg;
- struct peak_usb_device *dev = usb_if->dev[pufd_omsg_get_channel(ov)];
- struct net_device *netdev = dev->netdev;
+ struct peak_usb_device *dev;
+ struct net_device *netdev;
struct can_frame *cf;
struct sk_buff *skb;
+ if (pufd_omsg_get_channel(ov) >= ARRAY_SIZE(usb_if->dev))
+ return -EINVAL;
+
+ dev = usb_if->dev[pufd_omsg_get_channel(ov)];
+ netdev = dev->netdev;
+
/* allocate an skb to store the error frame */
skb = alloc_can_err_skb(netdev, &cf);
if (!skb)
@@ -724,6 +747,9 @@ static int pcan_usb_fd_encode_msg(struct peak_usb_device *dev,
u16 tx_msg_size, tx_msg_flags;
u8 can_dlc;
+ if (cfd->len > CANFD_MAX_DLEN)
+ return -EINVAL;
+
tx_msg_size = ALIGN(sizeof(struct pucan_tx_msg) + cfd->len, 4);
tx_msg->size = cpu_to_le16(tx_msg_size);
tx_msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX);
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index ed6828821fbd..ccd758ba3fb0 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -49,6 +49,7 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
struct net_device *peer;
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct net_device_stats *peerstats, *srcstats = &dev->stats;
+ u8 len;
if (can_dropped_invalid_skb(dev, skb))
return NETDEV_TX_OK;
@@ -71,12 +72,13 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
skb->dev = peer;
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ len = cfd->len;
if (netif_rx_ni(skb) == NET_RX_SUCCESS) {
srcstats->tx_packets++;
- srcstats->tx_bytes += cfd->len;
+ srcstats->tx_bytes += len;
peerstats = &peer->stats;
peerstats->rx_packets++;
- peerstats->rx_bytes += cfd->len;
+ peerstats->rx_bytes += len;
}
out_unlock:
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index ac5d945b934a..7eaeab65d39f 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -507,12 +507,27 @@ void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
}
EXPORT_SYMBOL(b53_imp_vlan_setup);
+static void b53_port_set_learning(struct b53_device *dev, int port,
+ bool learning)
+{
+ u16 reg;
+
+ b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, &reg);
+ if (learning)
+ reg &= ~BIT(port);
+ else
+ reg |= BIT(port);
+ b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg);
+}
+
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
{
struct b53_device *dev = ds->priv;
unsigned int cpu_port = ds->ports[port].cpu_dp->index;
u16 pvlan;
+ b53_port_set_learning(dev, port, false);
+
/* Clear the Rx and Tx disable bits and set to no spanning tree */
b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0);
@@ -620,6 +635,7 @@ static void b53_enable_cpu_port(struct b53_device *dev, int port)
b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl);
b53_brcm_hdr_setup(dev->ds, port);
+ b53_port_set_learning(dev, port, false);
}
static void b53_enable_mib(struct b53_device *dev)
@@ -1142,7 +1158,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port,
if ((is5325(dev) || is5365(dev)) && vlan->vid_begin == 0)
return -EOPNOTSUPP;
- if (vlan->vid_end > dev->num_vlans)
+ if (vlan->vid_end >= dev->num_vlans)
return -ERANGE;
b53_enable_vlan(dev, true, dev->vlan_filtering_enabled);
@@ -1253,6 +1269,10 @@ static int b53_arl_rw_op(struct b53_device *dev, unsigned int op)
reg |= ARLTBL_RW;
else
reg &= ~ARLTBL_RW;
+ if (dev->vlan_enabled)
+ reg &= ~ARLTBL_IVL_SVL_SELECT;
+ else
+ reg |= ARLTBL_IVL_SVL_SELECT;
b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg);
return b53_arl_op_wait(dev);
@@ -1262,6 +1282,7 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
u16 vid, struct b53_arl_entry *ent, u8 *idx,
bool is_valid)
{
+ DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES);
unsigned int i;
int ret;
@@ -1269,6 +1290,8 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
if (ret)
return ret;
+ bitmap_zero(free_bins, dev->num_arl_entries);
+
/* Read the bins */
for (i = 0; i < dev->num_arl_entries; i++) {
u64 mac_vid;
@@ -1280,13 +1303,24 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
B53_ARLTBL_DATA_ENTRY(i), &fwd_entry);
b53_arl_to_entry(ent, mac_vid, fwd_entry);
- if (!(fwd_entry & ARLTBL_VALID))
+ if (!(fwd_entry & ARLTBL_VALID)) {
+ set_bit(i, free_bins);
continue;
+ }
if ((mac_vid & ARLTBL_MAC_MASK) != mac)
continue;
+ if (dev->vlan_enabled &&
+ ((mac_vid >> ARLTBL_VID_S) & ARLTBL_VID_MASK) != vid)
+ continue;
*idx = i;
+ return 0;
}
+ if (bitmap_weight(free_bins, dev->num_arl_entries) == 0)
+ return -ENOSPC;
+
+ *idx = find_first_bit(free_bins, dev->num_arl_entries);
+
return -ENOENT;
}
@@ -1316,10 +1350,23 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
if (op)
return ret;
- /* We could not find a matching MAC, so reset to a new entry */
- if (ret) {
+ switch (ret) {
+ case -ETIMEDOUT:
+ return ret;
+ case -ENOSPC:
+ dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n",
+ addr, vid);
+ return is_valid ? ret : 0;
+ case -ENOENT:
+ /* We could not find a matching MAC, so reset to a new entry */
+ dev_dbg(dev->dev, "{%pM,%.4d} not found, using idx: %d\n",
+ addr, vid, idx);
fwd_entry = 0;
- idx = 1;
+ break;
+ default:
+ dev_dbg(dev->dev, "{%pM,%.4d} found, using idx: %d\n",
+ addr, vid, idx);
+ break;
}
memset(&ent, 0, sizeof(ent));
@@ -1486,6 +1533,8 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
dev->ports[port].vlan_ctl_mask = pvlan;
+ b53_port_set_learning(dev, port, true);
+
return 0;
}
EXPORT_SYMBOL(b53_br_join);
@@ -1533,6 +1582,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
vl->untag |= BIT(port) | BIT(cpu_port);
b53_set_vlan_entry(dev, pvid, vl);
}
+ b53_port_set_learning(dev, port, false);
}
EXPORT_SYMBOL(b53_br_leave);
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
index 2a9f421680aa..b2c539a42154 100644
--- a/drivers/net/dsa/b53/b53_regs.h
+++ b/drivers/net/dsa/b53/b53_regs.h
@@ -115,6 +115,7 @@
#define B53_UC_FLOOD_MASK 0x32
#define B53_MC_FLOOD_MASK 0x34
#define B53_IPMC_FLOOD_MASK 0x36
+#define B53_DIS_LEARNING 0x3c
/*
* Override Ports 0-7 State on devices with xMII interfaces (8 bit)
@@ -292,6 +293,7 @@
/* ARL Table Read/Write Register (8 bit) */
#define B53_ARLTBL_RW_CTRL 0x00
#define ARLTBL_RW BIT(0)
+#define ARLTBL_IVL_SVL_SELECT BIT(6)
#define ARLTBL_START_DONE BIT(7)
/* MAC Address Index Register (48 bit) */
@@ -304,7 +306,7 @@
*
* BCM5325 and BCM5365 share most definitions below
*/
-#define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n))
+#define B53_ARLTBL_MAC_VID_ENTRY(n) ((0x10 * (n)) + 0x10)
#define ARLTBL_MAC_MASK 0xffffffffffffULL
#define ARLTBL_VID_S 48
#define ARLTBL_VID_MASK_25 0xff
@@ -316,13 +318,16 @@
#define ARLTBL_VALID_25 BIT(63)
/* ARL Table Data Entry N Registers (32 bit) */
-#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x08)
+#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x18)
#define ARLTBL_DATA_PORT_ID_MASK 0x1ff
#define ARLTBL_TC(tc) ((3 & tc) << 11)
#define ARLTBL_AGE BIT(14)
#define ARLTBL_STATIC BIT(15)
#define ARLTBL_VALID BIT(16)
+/* Maximum number of bin entries in the ARL for all switches */
+#define B53_ARLTBL_MAX_BIN_ENTRIES 4
+
/* ARL Search Control Register (8 bit) */
#define B53_ARL_SRCH_CTL 0x50
#define B53_ARL_SRCH_CTL_25 0x20
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index ccba648452c4..3deda0321c00 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -173,11 +173,6 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
reg &= ~P_TXQ_PSM_VDD(port);
core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
- /* Enable learning */
- reg = core_readl(priv, CORE_DIS_LEARN);
- reg &= ~BIT(port);
- core_writel(priv, reg, CORE_DIS_LEARN);
-
/* Enable Broadcom tags for that port if requested */
if (priv->brcm_tag_mask & BIT(port))
b53_brcm_hdr_setup(ds, port);
@@ -423,15 +418,19 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
/* Find our integrated MDIO bus node */
dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
priv->master_mii_bus = of_mdio_find_bus(dn);
- if (!priv->master_mii_bus)
+ if (!priv->master_mii_bus) {
+ of_node_put(dn);
return -EPROBE_DEFER;
+ }
get_device(&priv->master_mii_bus->dev);
priv->master_mii_dn = dn;
priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
- if (!priv->slave_mii_bus)
+ if (!priv->slave_mii_bus) {
+ of_node_put(dn);
return -ENOMEM;
+ }
priv->slave_mii_bus->priv = priv;
priv->slave_mii_bus->name = "sf2 slave mii";
@@ -483,8 +482,10 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
* in bits 15:8 and the patch level in bits 7:0 which is exactly what
* the REG_PHY_REVISION register layout is.
*/
-
- return priv->hw_params.gphy_rev;
+ if (priv->int_phy_mask & BIT(port))
+ return priv->hw_params.gphy_rev;
+ else
+ return 0;
}
static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
@@ -1078,6 +1079,8 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
set_bit(0, priv->cfp.used);
set_bit(0, priv->cfp.unique);
+ /* Balance of_node_put() done by of_find_node_by_name() */
+ of_node_get(dn);
ports = of_find_node_by_name(dn, "ports");
if (ports) {
bcm_sf2_identify_ports(priv, ports);
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index 21db1804e85d..12156ab186a1 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -742,17 +742,14 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
fs->m_ext.data[1]))
return -EINVAL;
- if (fs->location != RX_CLS_LOC_ANY && fs->location >= CFP_NUM_RULES)
+ if (fs->location != RX_CLS_LOC_ANY &&
+ fs->location > bcm_sf2_cfp_rule_size(priv))
return -EINVAL;
if (fs->location != RX_CLS_LOC_ANY &&
test_bit(fs->location, priv->cfp.used))
return -EBUSY;
- if (fs->location != RX_CLS_LOC_ANY &&
- fs->location > bcm_sf2_cfp_rule_size(priv))
- return -EINVAL;
-
/* This rule is a Wake-on-LAN filter and we must specifically
* target the CPU port in order for it to be working.
*/
@@ -839,7 +836,7 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
u32 next_loc = 0;
int ret;
- if (loc >= CFP_NUM_RULES)
+ if (loc > bcm_sf2_cfp_rule_size(priv))
return -EINVAL;
/* Refuse deleting unused rules, and those that are not unique since
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index 816f34d64736..990de7c54b46 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -360,6 +360,7 @@ static void __exit dsa_loop_exit(void)
}
module_exit(dsa_loop_exit);
+MODULE_SOFTDEP("pre: dsa_loop_bdinfo");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Florian Fainelli");
MODULE_DESCRIPTION("DSA loopback driver");
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 8aa3b0af9fc2..6335c4ea0957 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -685,11 +685,8 @@ mt7530_cpu_port_enable(struct mt7530_priv *priv,
/* Setup the MAC by default for the cpu port */
mt7530_write(priv, MT7530_PMCR_P(port), PMCR_CPUP_LINK);
- /* Disable auto learning on the cpu port */
- mt7530_set(priv, MT7530_PSC_P(port), SA_DIS);
-
- /* Unknown unicast frame fordwarding to the cpu port */
- mt7530_set(priv, MT7530_MFC, UNU_FFP(BIT(port)));
+ /* Unknown multicast frame forwarding to the cpu port */
+ mt7530_rmw(priv, MT7530_MFC, UNM_FFP_MASK, UNM_FFP(BIT(port)));
/* CPU port gets connected to all user ports of
* the switch
@@ -824,8 +821,9 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
*/
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
MT7530_PORT_MATRIX_MODE);
- mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
- VLAN_ATTR(MT7530_VLAN_TRANSPARENT));
+ mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
+ VLAN_ATTR(MT7530_VLAN_TRANSPARENT) |
+ PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
priv->ports[port].vlan_filtering = false;
@@ -843,8 +841,8 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
if (all_user_ports_removed) {
mt7530_write(priv, MT7530_PCR_P(MT7530_CPU_PORT),
PCR_MATRIX(dsa_user_ports(priv->ds)));
- mt7530_write(priv, MT7530_PVC_P(MT7530_CPU_PORT),
- PORT_SPEC_TAG);
+ mt7530_write(priv, MT7530_PVC_P(MT7530_CPU_PORT), PORT_SPEC_TAG
+ | PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
}
}
@@ -853,25 +851,23 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
{
struct mt7530_priv *priv = ds->priv;
- /* The real fabric path would be decided on the membership in the
- * entry of VLAN table. PCR_MATRIX set up here with ALL_MEMBERS
- * means potential VLAN can be consisting of certain subset of all
- * ports.
- */
- mt7530_rmw(priv, MT7530_PCR_P(port),
- PCR_MATRIX_MASK, PCR_MATRIX(MT7530_ALL_MEMBERS));
-
/* Trapped into security mode allows packet forwarding through VLAN
- * table lookup.
+ * table lookup. CPU port is set to fallback mode to let untagged
+ * frames pass through.
*/
- mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
- MT7530_PORT_SECURITY_MODE);
+ if (dsa_is_cpu_port(ds, port))
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
+ MT7530_PORT_FALLBACK_MODE);
+ else
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
+ MT7530_PORT_SECURITY_MODE);
/* Set the port as a user port which is to be able to recognize VID
* from incoming packets before fetching entry within the VLAN table.
*/
- mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
- VLAN_ATTR(MT7530_VLAN_USER));
+ mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
+ VLAN_ATTR(MT7530_VLAN_USER) |
+ PVC_EG_TAG(MT7530_VLAN_EG_DISABLED));
}
static void
@@ -1286,8 +1282,6 @@ mt7530_setup(struct dsa_switch *ds)
/* Enable and reset MIB counters */
mt7530_mib_reset(ds);
- mt7530_clear(priv, MT7530_MFC, UNU_FFP_MASK);
-
for (i = 0; i < MT7530_NUM_PORTS; i++) {
/* Disable forwarding by default on all ports */
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
@@ -1297,6 +1291,10 @@ mt7530_setup(struct dsa_switch *ds)
mt7530_cpu_port_enable(priv, i);
else
mt7530_port_disable(ds, i, NULL);
+
+ /* Enable consistent egress tag */
+ mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK,
+ PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
}
/* Flush the FDB table */
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index d9b407a22a58..101d309ee445 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -34,6 +34,7 @@
#define MT7530_MFC 0x10
#define BC_FFP(x) (((x) & 0xff) << 24)
#define UNM_FFP(x) (((x) & 0xff) << 16)
+#define UNM_FFP_MASK UNM_FFP(~0)
#define UNU_FFP(x) (((x) & 0xff) << 8)
#define UNU_FFP_MASK UNU_FFP(~0)
@@ -147,6 +148,12 @@ enum mt7530_port_mode {
/* Port Matrix Mode: Frames are forwarded by the PCR_MATRIX members. */
MT7530_PORT_MATRIX_MODE = PORT_VLAN(0),
+ /* Fallback Mode: Forward received frames with ingress ports that do
+ * not belong to the VLAN member. Frames whose VID is not listed on
+ * the VLAN table are forwarded by the PCR_MATRIX members.
+ */
+ MT7530_PORT_FALLBACK_MODE = PORT_VLAN(1),
+
/* Security Mode: Discard any frame due to ingress membership
* violation or VID missed on the VLAN table.
*/
@@ -167,9 +174,16 @@ enum mt7530_port_mode {
/* Register for port vlan control */
#define MT7530_PVC_P(x) (0x2010 + ((x) * 0x100))
#define PORT_SPEC_TAG BIT(5)
+#define PVC_EG_TAG(x) (((x) & 0x7) << 8)
+#define PVC_EG_TAG_MASK PVC_EG_TAG(7)
#define VLAN_ATTR(x) (((x) & 0x3) << 6)
#define VLAN_ATTR_MASK VLAN_ATTR(3)
+enum mt7530_vlan_port_eg_tag {
+ MT7530_VLAN_EG_DISABLED = 0,
+ MT7530_VLAN_EG_CONSISTENT = 1,
+};
+
enum mt7530_vlan_port_attr {
MT7530_VLAN_USER = 0,
MT7530_VLAN_TRANSPARENT = 3,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 43b00e8bcdcd..67c0ad3b8079 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1658,7 +1658,11 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
if (!entry.portvec)
entry.state = MV88E6XXX_G1_ATU_DATA_STATE_UNUSED;
} else {
- entry.portvec |= BIT(port);
+ if (state == MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC)
+ entry.portvec = BIT(port);
+ else
+ entry.portvec |= BIT(port);
+
entry.state = state;
}
@@ -2630,10 +2634,17 @@ unlock:
return err;
}
+/* prod_id for switch families which do not have a PHY model number */
+static const u16 family_prod_id_table[] = {
+ [MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
+ [MV88E6XXX_FAMILY_6390] = MV88E6XXX_PORT_SWITCH_ID_PROD_6390,
+};
+
static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
{
struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
struct mv88e6xxx_chip *chip = mdio_bus->chip;
+ u16 prod_id;
u16 val;
int err;
@@ -2644,23 +2655,12 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
err = chip->info->ops->phy_read(chip, bus, phy, reg, &val);
mutex_unlock(&chip->reg_lock);
- if (reg == MII_PHYSID2) {
- /* Some internal PHYs don't have a model number. */
- if (chip->info->family != MV88E6XXX_FAMILY_6165)
- /* Then there is the 6165 family. It gets is
- * PHYs correct. But it can also have two
- * SERDES interfaces in the PHY address
- * space. And these don't have a model
- * number. But they are not PHYs, so we don't
- * want to give them something a PHY driver
- * will recognise.
- *
- * Use the mv88e6390 family model number
- * instead, for anything which really could be
- * a PHY,
- */
- if (!(val & 0x3f0))
- val |= MV88E6XXX_PORT_SWITCH_ID_PROD_6390 >> 4;
+ /* Some internal PHYs don't have a model number. */
+ if (reg == MII_PHYSID2 && !(val & 0x3f0) &&
+ chip->info->family < ARRAY_SIZE(family_prod_id_table)) {
+ prod_id = family_prod_id_table[chip->info->family];
+ if (prod_id)
+ val |= prod_id >> 4;
}
return err ? err : val;
@@ -2930,7 +2930,6 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
- .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
index 7a6667e0b9f9..e17158139aa8 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
@@ -127,11 +127,9 @@ static int mv88e6xxx_g1_vtu_vid_write(struct mv88e6xxx_chip *chip,
* Offset 0x08: VTU/STU Data Register 2
* Offset 0x09: VTU/STU Data Register 3
*/
-
-static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+static int mv88e6185_g1_vtu_stu_data_read(struct mv88e6xxx_chip *chip,
+ u16 *regs)
{
- u16 regs[3];
int i;
/* Read all 3 VTU/STU Data registers */
@@ -144,12 +142,45 @@ static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip,
return err;
}
- /* Extract MemberTag and PortState data */
+ return 0;
+}
+
+static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ u16 regs[3];
+ int err;
+ int i;
+
+ err = mv88e6185_g1_vtu_stu_data_read(chip, regs);
+ if (err)
+ return err;
+
+ /* Extract MemberTag data */
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
unsigned int member_offset = (i % 4) * 4;
- unsigned int state_offset = member_offset + 2;
entry->member[i] = (regs[i / 4] >> member_offset) & 0x3;
+ }
+
+ return 0;
+}
+
+static int mv88e6185_g1_stu_data_read(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ u16 regs[3];
+ int err;
+ int i;
+
+ err = mv88e6185_g1_vtu_stu_data_read(chip, regs);
+ if (err)
+ return err;
+
+ /* Extract PortState data */
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
+ unsigned int state_offset = (i % 4) * 4 + 2;
+
entry->state[i] = (regs[i / 4] >> state_offset) & 0x3;
}
@@ -322,6 +353,10 @@ int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
if (err)
return err;
+ err = mv88e6185_g1_stu_data_read(chip, entry);
+ if (err)
+ return err;
+
/* VTU DBNum[3:0] are located in VTU Operation 3:0
* VTU DBNum[7:4] are located in VTU Operation 11:8
*/
@@ -347,16 +382,20 @@ int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
return err;
if (entry->valid) {
- /* Fetch (and mask) VLAN PortState data from the STU */
- err = mv88e6xxx_g1_vtu_stu_get(chip, entry);
+ err = mv88e6185_g1_vtu_data_read(chip, entry);
if (err)
return err;
- err = mv88e6185_g1_vtu_data_read(chip, entry);
+ err = mv88e6xxx_g1_vtu_fid_read(chip, entry);
if (err)
return err;
- err = mv88e6xxx_g1_vtu_fid_read(chip, entry);
+ /* Fetch VLAN PortState data from the STU */
+ err = mv88e6xxx_g1_vtu_stu_get(chip, entry);
+ if (err)
+ return err;
+
+ err = mv88e6185_g1_stu_data_read(chip, entry);
if (err)
return err;
}
diff --git a/drivers/net/dsa/realtek-smi.h b/drivers/net/dsa/realtek-smi.h
index 9a63b51e1d82..6f2dab7e33d6 100644
--- a/drivers/net/dsa/realtek-smi.h
+++ b/drivers/net/dsa/realtek-smi.h
@@ -25,6 +25,9 @@ struct rtl8366_mib_counter {
const char *name;
};
+/**
+ * struct rtl8366_vlan_mc - Virtual LAN member configuration
+ */
struct rtl8366_vlan_mc {
u16 vid;
u16 untag;
@@ -119,7 +122,6 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi);
int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used);
int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
u32 untag, u32 fid);
-int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val);
int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
unsigned int vid);
int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable);
diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
index c281c488a306..dddbc86429bd 100644
--- a/drivers/net/dsa/rtl8366.c
+++ b/drivers/net/dsa/rtl8366.c
@@ -36,113 +36,66 @@ int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used)
}
EXPORT_SYMBOL_GPL(rtl8366_mc_is_used);
-int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
- u32 untag, u32 fid)
+/**
+ * rtl8366_obtain_mc() - retrieve or allocate a VLAN member configuration
+ * @smi: the Realtek SMI device instance
+ * @vid: the VLAN ID to look up or allocate
+ * @vlanmc: the pointer will be assigned to a pointer to a valid member config
+ * if successful
+ * @return: index of a new member config or negative error number
+ */
+static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
+ struct rtl8366_vlan_mc *vlanmc)
{
struct rtl8366_vlan_4k vlan4k;
int ret;
int i;
- /* Update the 4K table */
- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
- if (ret)
- return ret;
-
- vlan4k.member = member;
- vlan4k.untag = untag;
- vlan4k.fid = fid;
- ret = smi->ops->set_vlan_4k(smi, &vlan4k);
- if (ret)
- return ret;
-
- /* Try to find an existing MC entry for this VID */
+ /* Try to find an existing member config entry for this VID */
for (i = 0; i < smi->num_vlan_mc; i++) {
- struct rtl8366_vlan_mc vlanmc;
-
- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
- if (ret)
+ ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
+ if (ret) {
+ dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
+ i, vid);
return ret;
-
- if (vid == vlanmc.vid) {
- /* update the MC entry */
- vlanmc.member = member;
- vlanmc.untag = untag;
- vlanmc.fid = fid;
-
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
- break;
}
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(rtl8366_set_vlan);
-
-int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val)
-{
- struct rtl8366_vlan_mc vlanmc;
- int ret;
- int index;
-
- ret = smi->ops->get_mc_index(smi, port, &index);
- if (ret)
- return ret;
- ret = smi->ops->get_vlan_mc(smi, index, &vlanmc);
- if (ret)
- return ret;
-
- *val = vlanmc.vid;
- return 0;
-}
-EXPORT_SYMBOL_GPL(rtl8366_get_pvid);
-
-int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
- unsigned int vid)
-{
- struct rtl8366_vlan_mc vlanmc;
- struct rtl8366_vlan_4k vlan4k;
- int ret;
- int i;
-
- /* Try to find an existing MC entry for this VID */
- for (i = 0; i < smi->num_vlan_mc; i++) {
- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
- if (ret)
- return ret;
-
- if (vid == vlanmc.vid) {
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
- if (ret)
- return ret;
-
- ret = smi->ops->set_mc_index(smi, port, i);
- return ret;
- }
+ if (vid == vlanmc->vid)
+ return i;
}
/* We have no MC entry for this VID, try to find an empty one */
for (i = 0; i < smi->num_vlan_mc; i++) {
- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
- if (ret)
+ ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
+ if (ret) {
+ dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
+ i, vid);
return ret;
+ }
- if (vlanmc.vid == 0 && vlanmc.member == 0) {
+ if (vlanmc->vid == 0 && vlanmc->member == 0) {
/* Update the entry from the 4K table */
ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
- if (ret)
+ if (ret) {
+ dev_err(smi->dev, "error looking for 4K VLAN MC %d for VID %d\n",
+ i, vid);
return ret;
+ }
- vlanmc.vid = vid;
- vlanmc.member = vlan4k.member;
- vlanmc.untag = vlan4k.untag;
- vlanmc.fid = vlan4k.fid;
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
- if (ret)
+ vlanmc->vid = vid;
+ vlanmc->member = vlan4k.member;
+ vlanmc->untag = vlan4k.untag;
+ vlanmc->fid = vlan4k.fid;
+ ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
+ if (ret) {
+ dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
+ i, vid);
return ret;
+ }
- ret = smi->ops->set_mc_index(smi, port, i);
- return ret;
+ dev_dbg(smi->dev, "created new MC at index %d for VID %d\n",
+ i, vid);
+ return i;
}
}
@@ -160,24 +113,110 @@ int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
if (ret)
return ret;
- vlanmc.vid = vid;
- vlanmc.member = vlan4k.member;
- vlanmc.untag = vlan4k.untag;
- vlanmc.fid = vlan4k.fid;
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
- if (ret)
+ vlanmc->vid = vid;
+ vlanmc->member = vlan4k.member;
+ vlanmc->untag = vlan4k.untag;
+ vlanmc->fid = vlan4k.fid;
+ ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
+ if (ret) {
+ dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
+ i, vid);
return ret;
-
- ret = smi->ops->set_mc_index(smi, port, i);
- return ret;
+ }
+ dev_dbg(smi->dev, "recycled MC at index %i for VID %d\n",
+ i, vid);
+ return i;
}
}
- dev_err(smi->dev,
- "all VLAN member configurations are in use\n");
-
+ dev_err(smi->dev, "all VLAN member configurations are in use\n");
return -ENOSPC;
}
+
+int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
+ u32 untag, u32 fid)
+{
+ struct rtl8366_vlan_mc vlanmc;
+ struct rtl8366_vlan_4k vlan4k;
+ int mc;
+ int ret;
+
+ if (!smi->ops->is_vlan_valid(smi, vid))
+ return -EINVAL;
+
+ dev_dbg(smi->dev,
+ "setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
+ vid, member, untag);
+
+ /* Update the 4K table */
+ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ if (ret)
+ return ret;
+
+ vlan4k.member |= member;
+ vlan4k.untag |= untag;
+ vlan4k.fid = fid;
+ ret = smi->ops->set_vlan_4k(smi, &vlan4k);
+ if (ret)
+ return ret;
+
+ dev_dbg(smi->dev,
+ "resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
+ vid, vlan4k.member, vlan4k.untag);
+
+ /* Find or allocate a member config for this VID */
+ ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
+ if (ret < 0)
+ return ret;
+ mc = ret;
+
+ /* Update the MC entry */
+ vlanmc.member |= member;
+ vlanmc.untag |= untag;
+ vlanmc.fid = fid;
+
+ /* Commit updates to the MC entry */
+ ret = smi->ops->set_vlan_mc(smi, mc, &vlanmc);
+ if (ret)
+ dev_err(smi->dev, "failed to commit changes to VLAN MC index %d for VID %d\n",
+ mc, vid);
+ else
+ dev_dbg(smi->dev,
+ "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
+ vid, vlanmc.member, vlanmc.untag);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rtl8366_set_vlan);
+
+int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
+ unsigned int vid)
+{
+ struct rtl8366_vlan_mc vlanmc;
+ int mc;
+ int ret;
+
+ if (!smi->ops->is_vlan_valid(smi, vid))
+ return -EINVAL;
+
+ /* Find or allocate a member config for this VID */
+ ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
+ if (ret < 0)
+ return ret;
+ mc = ret;
+
+ ret = smi->ops->set_mc_index(smi, port, mc);
+ if (ret) {
+ dev_err(smi->dev, "set PVID: failed to set MC index %d for port %d\n",
+ mc, port);
+ return ret;
+ }
+
+ dev_dbg(smi->dev, "set PVID: the PVID for port %d set to %d using existing MC index %d\n",
+ port, vid, mc);
+
+ return 0;
+}
EXPORT_SYMBOL_GPL(rtl8366_set_pvid);
int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable)
@@ -376,7 +415,8 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
if (!smi->ops->is_vlan_valid(smi, vid))
return;
- dev_info(smi->dev, "add VLAN on port %d, %s, %s\n",
+ dev_info(smi->dev, "add VLAN %d on port %d, %s, %s\n",
+ vlan->vid_begin,
port,
untagged ? "untagged" : "tagged",
pvid ? " PVID" : "no PVID");
@@ -384,36 +424,31 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
dev_err(smi->dev, "port is DSA or CPU port\n");
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- int pvid_val = 0;
-
- dev_info(smi->dev, "add VLAN %04x\n", vid);
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
member |= BIT(port);
if (untagged)
untag |= BIT(port);
- /* To ensure that we have a valid MC entry for this VLAN,
- * initialize the port VLAN ID here.
- */
- ret = rtl8366_get_pvid(smi, port, &pvid_val);
- if (ret < 0) {
- dev_err(smi->dev, "could not lookup PVID for port %d\n",
- port);
- return;
- }
- if (pvid_val == 0) {
- ret = rtl8366_set_pvid(smi, port, vid);
- if (ret < 0)
- return;
- }
- }
+ ret = rtl8366_set_vlan(smi, vid, member, untag, 0);
+ if (ret)
+ dev_err(smi->dev,
+ "failed to set up VLAN %04x",
+ vid);
- ret = rtl8366_set_vlan(smi, port, member, untag, 0);
- if (ret)
- dev_err(smi->dev,
- "failed to set up VLAN %04x",
- vid);
+ if (!pvid)
+ continue;
+
+ ret = rtl8366_set_pvid(smi, port, vid);
+ if (ret)
+ dev_err(smi->dev,
+ "failed to set PVID on port %d to VLAN %04x",
+ port, vid);
+
+ if (!ret)
+ dev_dbg(smi->dev, "VLAN add: added VLAN %d with PVID on port %d\n",
+ vid, port);
+ }
}
EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
@@ -439,13 +474,19 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port,
return ret;
if (vid == vlanmc.vid) {
- /* clear VLAN member configurations */
- vlanmc.vid = 0;
- vlanmc.priority = 0;
- vlanmc.member = 0;
- vlanmc.untag = 0;
- vlanmc.fid = 0;
-
+ /* Remove this port from the VLAN */
+ vlanmc.member &= ~BIT(port);
+ vlanmc.untag &= ~BIT(port);
+ /*
+ * If no ports are members of this VLAN
+ * anymore then clear the whole member
+ * config so it can be reused.
+ */
+ if (!vlanmc.member && vlanmc.untag) {
+ vlanmc.vid = 0;
+ vlanmc.priority = 0;
+ vlanmc.fid = 0;
+ }
ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
if (ret) {
dev_err(smi->dev,
diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
index f4b14b6acd22..5aefd7a4696a 100644
--- a/drivers/net/dsa/rtl8366rb.c
+++ b/drivers/net/dsa/rtl8366rb.c
@@ -1270,7 +1270,7 @@ static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
if (smi->vlan4k_enabled)
max = RTL8366RB_NUM_VIDS - 1;
- if (vlan == 0 || vlan >= max)
+ if (vlan == 0 || vlan > max)
return false;
return true;
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 3143de45baaa..d249a4309da2 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -433,7 +433,7 @@ static void emac_timeout(struct net_device *dev)
/* Hardware start transmission.
* Send a packet to media from the upper layer.
*/
-static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct emac_board_info *db = netdev_priv(dev);
unsigned long channel;
@@ -441,7 +441,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
channel = db->tx_fifo_stat & 3;
if (channel == 3)
- return 1;
+ return NETDEV_TX_BUSY;
channel = (channel == 1 ? 1 : 0);
@@ -847,13 +847,13 @@ static int emac_probe(struct platform_device *pdev)
db->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(db->clk)) {
ret = PTR_ERR(db->clk);
- goto out_iounmap;
+ goto out_dispose_mapping;
}
ret = clk_prepare_enable(db->clk);
if (ret) {
dev_err(&pdev->dev, "Error couldn't enable clock (%d)\n", ret);
- goto out_iounmap;
+ goto out_dispose_mapping;
}
ret = sunxi_sram_claim(&pdev->dev);
@@ -910,6 +910,8 @@ out_release_sram:
sunxi_sram_release(&pdev->dev);
out_clk_disable_unprepare:
clk_disable_unprepare(db->clk);
+out_dispose_mapping:
+ irq_dispose_mapping(ndev->irq);
out_iounmap:
iounmap(db->membase);
out:
@@ -928,6 +930,7 @@ static int emac_remove(struct platform_device *pdev)
unregister_netdev(ndev);
sunxi_sram_release(&pdev->dev);
clk_disable_unprepare(db->clk);
+ irq_dispose_mapping(ndev->irq);
iounmap(db->membase);
free_netdev(ndev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 3afc0e59a2bd..d07f7f65169a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -2137,6 +2137,9 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
rss->hash_key;
int rc;
+ if (unlikely(!func))
+ return -EINVAL;
+
rc = ena_com_get_feature_ex(ena_dev, &get_resp,
ENA_ADMIN_RSS_HASH_FUNCTION,
rss->hash_key_dma_addr,
@@ -2149,8 +2152,7 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
if (rss->hash_func)
rss->hash_func--;
- if (func)
- *func = rss->hash_func;
+ *func = rss->hash_func;
if (key)
memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 8736718b1735..9aea4cf19d0c 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2433,16 +2433,9 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
goto err_mmio_read_less;
}
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width));
+ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_width));
if (rc) {
- dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc);
- goto err_mmio_read_less;
- }
-
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width));
- if (rc) {
- dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n",
- rc);
+ dev_err(dev, "dma_set_mask_and_coherent failed %d\n", rc);
goto err_mmio_read_less;
}
@@ -2647,16 +2640,14 @@ static void ena_fw_reset_device(struct work_struct *work)
{
struct ena_adapter *adapter =
container_of(work, struct ena_adapter, reset_task);
- struct pci_dev *pdev = adapter->pdev;
- if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
- dev_err(&pdev->dev,
- "device reset schedule while reset bit is off\n");
- return;
- }
rtnl_lock();
- ena_destroy_device(adapter, false);
- ena_restore_device(adapter);
+
+ if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
+ ena_destroy_device(adapter, false);
+ ena_restore_device(adapter);
+ }
+
rtnl_unlock();
}
@@ -2738,7 +2729,7 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
}
u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.missed_tx = missed_tx;
+ tx_ring->tx_stats.missed_tx += missed_tx;
u64_stats_update_end(&tx_ring->syncp);
return rc;
@@ -3185,6 +3176,12 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return rc;
}
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(ENA_MAX_PHYS_ADDR_SIZE_BITS));
+ if (rc) {
+ dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n", rc);
+ goto err_disable_device;
+ }
+
pci_set_master(pdev);
ena_dev = vzalloc(sizeof(*ena_dev));
@@ -3392,8 +3389,11 @@ static void ena_remove(struct pci_dev *pdev)
netdev->rx_cpu_rmap = NULL;
}
#endif /* CONFIG_RFS_ACCEL */
- del_timer_sync(&adapter->timer_service);
+ /* Make sure timer and reset routine won't be called after
+ * freeing device resources.
+ */
+ del_timer_sync(&adapter->timer_service);
cancel_work_sync(&adapter->reset_task);
unregister_netdev(netdev);
@@ -3543,6 +3543,9 @@ static void ena_keep_alive_wd(void *adapter_data,
rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low;
u64_stats_update_begin(&adapter->syncp);
+ /* These stats are accumulated by the device, so the counters indicate
+ * all drops since last reset.
+ */
adapter->dev_stats.rx_drops = rx_drops;
u64_stats_update_end(&adapter->syncp);
}
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index f5ad12c10934..da84660ceae1 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1548,8 +1548,7 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
- ioaddr = pci_resource_start(pdev, 0);
- if (!ioaddr) {
+ if (!pci_resource_len(pdev, 0)) {
if (pcnet32_debug & NETIF_MSG_PROBE)
pr_err("card has no PCI IO resources, aborting\n");
err = -ENODEV;
@@ -1562,6 +1561,8 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
pr_err("architecture does not support 32bit PCI busmaster DMA\n");
goto err_disable_dev;
}
+
+ ioaddr = pci_resource_start(pdev, 0);
if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
if (pcnet32_debug & NETIF_MSG_PROBE)
pr_err("io address range already allocated\n");
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index b40d4377cc71..b2cd3bdba9f8 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -1279,10 +1279,18 @@
#define MDIO_PMA_10GBR_FECCTRL 0x00ab
#endif
+#ifndef MDIO_PMA_RX_CTRL1
+#define MDIO_PMA_RX_CTRL1 0x8051
+#endif
+
#ifndef MDIO_PCS_DIG_CTRL
#define MDIO_PCS_DIG_CTRL 0x8000
#endif
+#ifndef MDIO_PCS_DIGITAL_STAT
+#define MDIO_PCS_DIGITAL_STAT 0x8010
+#endif
+
#ifndef MDIO_AN_XNP
#define MDIO_AN_XNP 0x0016
#endif
@@ -1358,6 +1366,8 @@
#define XGBE_KR_TRAINING_ENABLE BIT(1)
#define XGBE_PCS_CL37_BP BIT(12)
+#define XGBE_PCS_PSEQ_STATE_MASK 0x1c
+#define XGBE_PCS_PSEQ_STATE_POWER_GOOD 0x10
#define XGBE_AN_CL37_INT_CMPLT BIT(0)
#define XGBE_AN_CL37_INT_MASK 0x01
@@ -1375,6 +1385,10 @@
#define XGBE_PMA_CDR_TRACK_EN_OFF 0x00
#define XGBE_PMA_CDR_TRACK_EN_ON 0x01
+#define XGBE_PMA_RX_RST_0_MASK BIT(4)
+#define XGBE_PMA_RX_RST_0_RESET_ON 0x10
+#define XGBE_PMA_RX_RST_0_RESET_OFF 0x00
+
/* Bit setting and getting macros
* The get macro will extract the current bit field value from within
* the variable
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index d96a84a62d78..80cf6af822f7 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -515,7 +515,7 @@ static void xgbe_isr_task(unsigned long data)
xgbe_disable_rx_tx_ints(pdata);
/* Turn on polling */
- __napi_schedule_irqoff(&pdata->napi);
+ __napi_schedule(&pdata->napi);
}
} else {
/* Don't clear Rx/Tx status if doing per channel DMA
@@ -1444,6 +1444,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
return;
netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(pdata->netdev);
xgbe_stop_timers(pdata);
flush_workqueue(pdata->dev_workqueue);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 8a3a60bb2688..156a0bc8ab01 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -1345,7 +1345,7 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
&an_restart);
if (an_restart) {
xgbe_phy_config_aneg(pdata);
- return;
+ goto adjust_link;
}
if (pdata->phy.link) {
@@ -1396,7 +1396,6 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
pdata->phy_if.phy_impl.stop(pdata);
pdata->phy.link = 0;
- netif_carrier_off(pdata->netdev);
xgbe_phy_adjust_link(pdata);
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 3ceb4f95ca7c..54753c8a6a9d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -912,6 +912,9 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
if ((phy_id & 0xfffffff0) != 0x03625d10)
return false;
+ /* Reset PHY - wait for self-clearing reset bit to clear */
+ genphy_soft_reset(phy_data->phydev);
+
/* Disable RGMII mode */
phy_write(phy_data->phydev, 0x18, 0x7007);
reg = phy_read(phy_data->phydev, 0x18);
@@ -1942,6 +1945,27 @@ static void xgbe_phy_set_redrv_mode(struct xgbe_prv_data *pdata)
xgbe_phy_put_comm_ownership(pdata);
}
+static void xgbe_phy_rx_reset(struct xgbe_prv_data *pdata)
+{
+ int reg;
+
+ reg = XMDIO_READ_BITS(pdata, MDIO_MMD_PCS, MDIO_PCS_DIGITAL_STAT,
+ XGBE_PCS_PSEQ_STATE_MASK);
+ if (reg == XGBE_PCS_PSEQ_STATE_POWER_GOOD) {
+ /* Mailbox command timed out, reset of RX block is required.
+ * This can be done by asseting the reset bit and wait for
+ * its compeletion.
+ */
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_CTRL1,
+ XGBE_PMA_RX_RST_0_MASK, XGBE_PMA_RX_RST_0_RESET_ON);
+ ndelay(20);
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_CTRL1,
+ XGBE_PMA_RX_RST_0_MASK, XGBE_PMA_RX_RST_0_RESET_OFF);
+ usleep_range(40, 50);
+ netif_err(pdata, link, pdata->netdev, "firmware mailbox reset performed\n");
+ }
+}
+
static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
unsigned int cmd, unsigned int sub_cmd)
{
@@ -1949,9 +1973,11 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
unsigned int wait;
/* Log if a previous command did not complete */
- if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
+ if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) {
netif_dbg(pdata, link, pdata->netdev,
"firmware mailbox not ready for command\n");
+ xgbe_phy_rx_reset(pdata);
+ }
/* Construct the command */
XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, cmd);
@@ -1973,6 +1999,9 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
netif_dbg(pdata, link, pdata->netdev,
"firmware mailbox command did not complete\n");
+
+ /* Reset on error */
+ xgbe_phy_rx_reset(pdata);
}
static void xgbe_phy_rrc(struct xgbe_prv_data *pdata)
@@ -2569,6 +2598,14 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
if (reg & MDIO_STAT1_LSTATUS)
return 1;
+ if (pdata->phy.autoneg == AUTONEG_ENABLE &&
+ phy_data->port_mode == XGBE_PORT_MODE_BACKPLANE) {
+ if (!test_bit(XGBE_LINK_INIT, &pdata->dev_state)) {
+ netif_carrier_off(pdata->netdev);
+ *an_restart = 1;
+ }
+ }
+
/* No link, attempt a receiver reset cycle */
if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) {
phy_data->rrc_count = 0;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 47bcbcf58048..0c93a552b921 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -181,9 +181,9 @@
#define XGBE_DMA_SYS_AWCR 0x30303030
/* DMA cache settings - PCI device */
-#define XGBE_DMA_PCI_ARCR 0x00000003
-#define XGBE_DMA_PCI_AWCR 0x13131313
-#define XGBE_DMA_PCI_AWARCR 0x00000313
+#define XGBE_DMA_PCI_ARCR 0x000f0f0f
+#define XGBE_DMA_PCI_AWCR 0x0f0f0f0f
+#define XGBE_DMA_PCI_AWARCR 0x00000f0f
/* DMA channel interrupt modes */
#define XGBE_IRQ_MODE_EDGE 0
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 6a8e2567f2bd..ab6ce85540b8 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -1181,7 +1181,7 @@ bmac_get_station_address(struct net_device *dev, unsigned char *ea)
int i;
unsigned short data;
- for (i = 0; i < 6; i++)
+ for (i = 0; i < 3; i++)
{
reset_and_select_srom(dev);
data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index e3ae29e523f0..daf841ae337d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -50,8 +50,10 @@ static int aq_ndev_open(struct net_device *ndev)
if (err < 0)
goto err_exit;
err = aq_nic_start(aq_nic);
- if (err < 0)
+ if (err < 0) {
+ aq_nic_stop(aq_nic);
goto err_exit;
+ }
err_exit:
if (err < 0)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 15dcfb6704e5..adac5df0d6b4 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -620,6 +620,9 @@ int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
u32 *regs_buff = p;
int err = 0;
+ if (unlikely(!self->aq_hw_ops->hw_get_regs))
+ return -EOPNOTSUPP;
+
regs->version = 1;
err = self->aq_hw_ops->hw_get_regs(self->aq_hw,
@@ -634,6 +637,9 @@ err_exit:
int aq_nic_get_regs_count(struct aq_nic_s *self)
{
+ if (unlikely(!self->aq_hw_ops->hw_get_regs))
+ return 0;
+
return self->aq_nic_cfg.aq_hw_caps->mac_regs_count;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 750007513f9d..43dbfb228b0e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -60,7 +60,7 @@ static const struct aq_board_revision_s hw_atl_boards[] = {
{ AQ_DEVICE_ID_D108, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc108, },
{ AQ_DEVICE_ID_D109, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc109, },
- { AQ_DEVICE_ID_AQC100, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
+ { AQ_DEVICE_ID_AQC100, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc100, },
{ AQ_DEVICE_ID_AQC107, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
{ AQ_DEVICE_ID_AQC108, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108, },
{ AQ_DEVICE_ID_AQC109, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109, },
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index dab5891b9714..d48595470ec8 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -774,7 +774,7 @@ static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
int err = 0;
if (count > (HW_ATL_A0_MAC_MAX - HW_ATL_A0_MAC_MIN)) {
- err = EBADRQC;
+ err = -EBADRQC;
goto err_exit;
}
for (self->aq_nic_cfg->mc_list_count = 0U;
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
index 0187dbf3b87d..54cdafdd067d 100644
--- a/drivers/net/ethernet/arc/emac_mdio.c
+++ b/drivers/net/ethernet/arc/emac_mdio.c
@@ -153,6 +153,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
if (IS_ERR(data->reset_gpio)) {
error = PTR_ERR(data->reset_gpio);
dev_err(priv->dev, "Failed to request gpio: %d\n", error);
+ mdiobus_free(bus);
return error;
}
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 6d3221134927..d83ad06bf199 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1250,8 +1250,12 @@ out_disable_adv_intr:
static void __alx_stop(struct alx_priv *alx)
{
- alx_halt(alx);
alx_free_irq(alx);
+
+ cancel_work_sync(&alx->link_check_wk);
+ cancel_work_sync(&alx->reset_wk);
+
+ alx_halt(alx);
alx_free_rings(alx);
alx_free_napis(alx);
}
@@ -1861,9 +1865,6 @@ static void alx_remove(struct pci_dev *pdev)
struct alx_priv *alx = pci_get_drvdata(pdev);
struct alx_hw *hw = &alx->hw;
- cancel_work_sync(&alx->link_check_wk);
- cancel_work_sync(&alx->reset_wk);
-
/* restore permanent mac address */
alx_set_macaddr(hw, hw->perm_addr);
@@ -1901,13 +1902,16 @@ static int alx_resume(struct device *dev)
if (!netif_running(alx->dev))
return 0;
- netif_device_attach(alx->dev);
rtnl_lock();
err = __alx_open(alx, true);
rtnl_unlock();
+ if (err)
+ return err;
- return err;
+ netif_device_attach(alx->dev);
+
+ return 0;
}
static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 88f8d31e4c83..7aeb2805fec4 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2389,7 +2389,8 @@ static int b44_init_one(struct ssb_device *sdev,
goto err_out_free_dev;
}
- if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) {
+ err = dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30));
+ if (err) {
dev_err(sdev->dev,
"Required 30BIT DMA mask unsupported by the system\n");
goto err_out_powerdown;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 3fdf135bad56..0c69becc3c17 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -677,7 +677,8 @@ static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
dma_addr_t mapping;
/* Allocate a new SKB for a new packet */
- skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
+ skb = __netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH,
+ GFP_ATOMIC | __GFP_NOWARN);
if (!skb) {
priv->mib.alloc_rx_buff_failed++;
netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
@@ -2440,8 +2441,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
sizeof(struct bcm_sysport_tx_ring),
GFP_KERNEL);
- if (!priv->tx_rings)
- return -ENOMEM;
+ if (!priv->tx_rings) {
+ ret = -ENOMEM;
+ goto err_free_netdev;
+ }
priv->is_lite = params->is_lite;
priv->num_rx_desc_words = params->num_rx_desc_words;
@@ -2504,6 +2507,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
/* HW supported features, none enabled by default */
dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->max_mtu = UMAC_MAX_MTU_SIZE;
/* Request the WOL interrupt and advertise suspend if available */
priv->wol_irq_disabled = 1;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 122fdb80a789..9993f1162ac6 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8253,9 +8253,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
BNX2_WR(bp, PCI_COMMAND, reg);
} else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
!(bp->flags & BNX2_FLAG_PCIX)) {
-
dev_err(&pdev->dev,
"5706 A1 can only be used in a PCIX bus, aborting\n");
+ rc = -EPERM;
goto err_out_unmap;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index f008c91d4566..6033970fb667 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -6298,9 +6298,10 @@ void bnxt_tx_disable(struct bnxt *bp)
txr->dev_state = BNXT_DEV_STATE_CLOSING;
}
}
+ /* Drop carrier first to prevent TX timeout */
+ netif_carrier_off(bp->dev);
/* Stop all TX queues */
netif_tx_disable(bp->dev);
- netif_carrier_off(bp->dev);
}
void bnxt_tx_enable(struct bnxt *bp)
@@ -6326,6 +6327,11 @@ static void bnxt_report_link(struct bnxt *bp)
u16 fec;
netif_carrier_on(bp->dev);
+ speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
+ if (speed == SPEED_UNKNOWN) {
+ netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
+ return;
+ }
if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
duplex = "full";
else
@@ -6338,7 +6344,6 @@ static void bnxt_report_link(struct bnxt *bp)
flow_ctrl = "ON - receive";
else
flow_ctrl = "none";
- speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
speed, duplex, flow_ctrl);
if (bp->flags & BNXT_FLAG_EEE_CAP)
@@ -6836,16 +6841,19 @@ static ssize_t bnxt_show_temp(struct device *dev,
struct hwrm_temp_monitor_query_input req = {0};
struct hwrm_temp_monitor_query_output *resp;
struct bnxt *bp = dev_get_drvdata(dev);
- u32 temp = 0;
+ u32 len = 0;
+ int rc;
resp = bp->hwrm_cmd_resp_addr;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
mutex_lock(&bp->hwrm_cmd_lock);
- if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
- temp = resp->temp * 1000; /* display millidegree */
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
mutex_unlock(&bp->hwrm_cmd_lock);
-
- return sprintf(buf, "%u\n", temp);
+ if (rc)
+ return rc;
+ return len;
}
static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
@@ -6865,7 +6873,16 @@ static void bnxt_hwmon_close(struct bnxt *bp)
static void bnxt_hwmon_open(struct bnxt *bp)
{
+ struct hwrm_temp_monitor_query_input req = {0};
struct pci_dev *pdev = bp->pdev;
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
+ rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc == -EACCES || rc == -EOPNOTSUPP) {
+ bnxt_hwmon_close(bp);
+ return;
+ }
bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
DRV_MODULE_NAME, bp,
@@ -7024,15 +7041,15 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
}
}
- bnxt_enable_napi(bp);
- bnxt_debug_dev_init(bp);
-
rc = bnxt_init_nic(bp, irq_re_init);
if (rc) {
netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
- goto open_err;
+ goto open_err_irq;
}
+ bnxt_enable_napi(bp);
+ bnxt_debug_dev_init(bp);
+
if (link_re_init) {
mutex_lock(&bp->link_lock);
rc = bnxt_update_phy_setting(bp);
@@ -7063,10 +7080,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
bnxt_vf_reps_open(bp);
return 0;
-open_err:
- bnxt_debug_dev_exit(bp);
- bnxt_disable_napi(bp);
-
open_err_irq:
bnxt_del_napi(bp);
@@ -7177,7 +7190,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
bnxt_free_skbs(bp);
/* Save ring stats before shutdown */
- if (bp->bnapi)
+ if (bp->bnapi && irq_re_init)
bnxt_get_ring_stats(bp, &bp->net_stats_prev);
if (irq_re_init) {
bnxt_free_irq(bp);
@@ -7562,6 +7575,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct bnxt *bp = netdev_priv(dev);
+ netdev_features_t vlan_features;
if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
features &= ~NETIF_F_NTUPLE;
@@ -7578,12 +7592,14 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
/* Both CTAG and STAG VLAN accelaration on the RX side have to be
* turned on or off together.
*/
- if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
- (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
+ vlan_features = features & (NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX);
+ if (vlan_features != (NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX)) {
if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_STAG_RX);
- else
+ else if (vlan_features)
features |= NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_STAG_RX;
}
@@ -8008,7 +8024,8 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
dev_err(&pdev->dev, "System does not support DMA, aborting\n");
- goto init_err_disable;
+ rc = -EIO;
+ goto init_err_release;
}
pci_set_master(pdev);
@@ -9107,6 +9124,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
create_singlethread_workqueue("bnxt_pf_wq");
if (!bnxt_pf_wq) {
dev_err(&pdev->dev, "Unable to create workqueue.\n");
+ rc = -ENOMEM;
goto init_err_pci_clean;
}
}
@@ -9125,6 +9143,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
(long)pci_resource_start(pdev, 0), dev->dev_addr);
pcie_print_link_status(pdev);
+ pci_save_state(pdev);
return 0;
init_err_cleanup_tc:
@@ -9286,6 +9305,8 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
"Cannot re-enable PCI device after reset.\n");
} else {
pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
err = bnxt_hwrm_func_reset(bp);
if (!err && netif_running(netdev))
@@ -9297,8 +9318,11 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
}
}
- if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
- dev_close(netdev);
+ if (result != PCI_ERS_RESULT_RECOVERED) {
+ if (netif_running(netdev))
+ dev_close(netdev);
+ pci_disable_device(pdev);
+ }
rtnl_unlock();
@@ -9309,7 +9333,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
err); /* non-fatal, continue */
}
- return PCI_ERS_RESULT_RECOVERED;
+ return result;
}
/**
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 585f5aef0a45..f3f5484c43e4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -839,7 +839,6 @@ struct bnxt_vf_info {
#define BNXT_VF_LINK_FORCED 0x4
#define BNXT_VF_LINK_UP 0x8
#define BNXT_VF_TRUST 0x10
- u32 func_flags; /* func cfg flags */
u32 min_tx_rate;
u32 max_tx_rate;
void *hwrm_cmd_req_addr;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 047024717d65..511240e8246f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -471,7 +471,7 @@ static void bnxt_get_channels(struct net_device *dev,
int max_tx_sch_inputs;
/* Get the most up-to-date max_tx_sch_inputs. */
- if (BNXT_NEW_RM(bp))
+ if (netif_running(dev) && BNXT_NEW_RM(bp))
bnxt_hwrm_func_resc_qcaps(bp, false);
max_tx_sch_inputs = hw_resc->max_tx_sch_inputs;
@@ -1369,9 +1369,12 @@ static int bnxt_set_pauseparam(struct net_device *dev,
if (!BNXT_SINGLE_PF(bp))
return -EOPNOTSUPP;
+ mutex_lock(&bp->link_lock);
if (epause->autoneg) {
- if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
- return -EINVAL;
+ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
+ rc = -EINVAL;
+ goto pause_exit;
+ }
link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
if (bp->hwrm_spec_code >= 0x10201)
@@ -1394,6 +1397,9 @@ static int bnxt_set_pauseparam(struct net_device *dev,
if (netif_running(dev))
rc = bnxt_hwrm_set_pause(bp);
+
+pause_exit:
+ mutex_unlock(&bp->link_lock);
return rc;
}
@@ -1874,6 +1880,9 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
if (rc != 0)
return rc;
+ if (!dir_entries || !entry_length)
+ return -EIO;
+
/* Insert 2 bytes of directory info (count and size of entries) */
if (len < 2)
return -EINVAL;
@@ -2107,8 +2116,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
struct bnxt *bp = netdev_priv(dev);
struct ethtool_eee *eee = &bp->eee;
struct bnxt_link_info *link_info = &bp->link_info;
- u32 advertising =
- _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
+ u32 advertising;
int rc = 0;
if (!BNXT_SINGLE_PF(bp))
@@ -2117,19 +2125,23 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
if (!(bp->flags & BNXT_FLAG_EEE_CAP))
return -EOPNOTSUPP;
+ mutex_lock(&bp->link_lock);
+ advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
if (!edata->eee_enabled)
goto eee_ok;
if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
netdev_warn(dev, "EEE requires autoneg\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto eee_exit;
}
if (edata->tx_lpi_enabled) {
if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
bp->lpi_tmr_lo, bp->lpi_tmr_hi);
- return -EINVAL;
+ rc = -EINVAL;
+ goto eee_exit;
} else if (!bp->lpi_tmr_hi) {
edata->tx_lpi_timer = eee->tx_lpi_timer;
}
@@ -2139,7 +2151,8 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
} else if (edata->advertised & ~advertising) {
netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
edata->advertised, advertising);
- return -EINVAL;
+ rc = -EINVAL;
+ goto eee_exit;
}
eee->advertised = edata->advertised;
@@ -2151,6 +2164,8 @@ eee_ok:
if (netif_running(dev))
rc = bnxt_hwrm_set_link_setting(bp, false, true);
+eee_exit:
+ mutex_unlock(&bp->link_lock);
return rc;
}
@@ -2285,7 +2300,7 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
/* Read A2 portion of the EEPROM */
if (length) {
start -= ETH_MODULE_SFF_8436_LEN;
- rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1,
+ rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0,
start, length, data);
}
return rc;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 3962f6fd543c..ff53e597938a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -99,11 +99,10 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
if (old_setting == setting)
return 0;
- func_flags = vf->func_flags;
if (setting)
- func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
+ func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
else
- func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
+ func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
/*TODO: if the driver supports VLAN filter on guest VLAN,
* the spoof check should also include vlan anti-spoofing
*/
@@ -112,7 +111,6 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
req.flags = cpu_to_le32(func_flags);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
- vf->func_flags = func_flags;
if (setting)
vf->flags |= BNXT_VF_SPOOFCHK;
else
@@ -197,7 +195,6 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
memcpy(vf->mac_addr, mac, ETH_ALEN);
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(vf->fw_fid);
- req.flags = cpu_to_le32(vf->func_flags);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
@@ -235,7 +232,6 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(vf->fw_fid);
- req.flags = cpu_to_le32(vf->func_flags);
req.dflt_vlan = cpu_to_le16(vlan_tag);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
@@ -274,7 +270,6 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(vf->fw_fid);
- req.flags = cpu_to_le32(vf->func_flags);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
req.max_bw = cpu_to_le32(max_tx_rate);
req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
@@ -370,6 +365,7 @@ static void bnxt_free_vf_resources(struct bnxt *bp)
}
}
+ bp->pf.active_vfs = 0;
kfree(bp->pf.vf);
bp->pf.vf = NULL;
}
@@ -755,7 +751,6 @@ void bnxt_sriov_disable(struct bnxt *bp)
bnxt_free_vf_resources(bp);
- bp->pf.active_vfs = 0;
/* Reclaim all resources for the PF. */
rtnl_lock();
bnxt_restore_pf_fw_resources(bp);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 736a6a5fbd98..c3e824f5e50e 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -72,6 +72,9 @@
#define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
TOTAL_DESC * DMA_DESC_SIZE)
+/* Forward declarations */
+static void bcmgenet_set_rx_mode(struct net_device *dev);
+
static inline void bcmgenet_writel(u32 value, void __iomem *offset)
{
/* MIPS chips strapped for BE will automagically configure the
@@ -998,6 +1001,8 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
if (netif_running(dev))
bcmgenet_update_mib_counters(priv);
+ dev->netdev_ops->ndo_get_stats(dev);
+
for (i = 0; i < BCMGENET_STATS_LEN; i++) {
const struct bcmgenet_stats *s;
char *p;
@@ -1588,11 +1593,6 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
goto out;
}
- if (skb_padto(skb, ETH_ZLEN)) {
- ret = NETDEV_TX_OK;
- goto out;
- }
-
/* Retain how many bytes will be sent on the wire, without TSB inserted
* by transmit checksum offload
*/
@@ -1641,6 +1641,9 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
len_stat = (size << DMA_BUFLENGTH_SHIFT) |
(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);
+ /* Note: if we ever change from DMA_TX_APPEND_CRC below we
+ * will need to restore software padding of "runt" packets
+ */
if (!i) {
len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
if (skb->ip_summed == CHECKSUM_PARTIAL)
@@ -1697,7 +1700,8 @@ static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
dma_addr_t mapping;
/* Allocate a new Rx skb */
- skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT);
+ skb = __netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT,
+ GFP_ATOMIC | __GFP_NOWARN);
if (!skb) {
priv->mib.alloc_rx_buff_failed++;
netif_err(priv, rx_err, priv->dev,
@@ -2856,6 +2860,7 @@ static void bcmgenet_netif_start(struct net_device *dev)
struct bcmgenet_priv *priv = netdev_priv(dev);
/* Start the network engine */
+ bcmgenet_set_rx_mode(dev);
bcmgenet_enable_rx_napi(priv);
umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
@@ -3211,6 +3216,7 @@ static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
dev->stats.rx_packets = rx_packets;
dev->stats.rx_errors = rx_errors;
dev->stats.rx_missed_errors = rx_errors;
+ dev->stats.rx_dropped = rx_dropped;
return &dev->stats;
}
@@ -3587,8 +3593,10 @@ static int bcmgenet_probe(struct platform_device *pdev)
clk_disable_unprepare(priv->clk);
err = register_netdev(dev);
- if (err)
+ if (err) {
+ bcmgenet_mii_exit(dev);
goto err;
+ }
return err;
@@ -3612,36 +3620,6 @@ static int bcmgenet_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-static int bcmgenet_suspend(struct device *d)
-{
- struct net_device *dev = dev_get_drvdata(d);
- struct bcmgenet_priv *priv = netdev_priv(dev);
- int ret = 0;
-
- if (!netif_running(dev))
- return 0;
-
- netif_device_detach(dev);
-
- bcmgenet_netif_stop(dev);
-
- if (!device_may_wakeup(d))
- phy_suspend(dev->phydev);
-
- /* Prepare the device for Wake-on-LAN and switch to the slow clock */
- if (device_may_wakeup(d) && priv->wolopts) {
- ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
- clk_prepare_enable(priv->clk_wol);
- } else if (priv->internal_phy) {
- ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
- }
-
- /* Turn off the clocks */
- clk_disable_unprepare(priv->clk);
-
- return ret;
-}
-
static int bcmgenet_resume(struct device *d)
{
struct net_device *dev = dev_get_drvdata(d);
@@ -3720,6 +3698,39 @@ out_clk_disable:
clk_disable_unprepare(priv->clk);
return ret;
}
+
+static int bcmgenet_suspend(struct device *d)
+{
+ struct net_device *dev = dev_get_drvdata(d);
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int ret = 0;
+
+ if (!netif_running(dev))
+ return 0;
+
+ netif_device_detach(dev);
+
+ bcmgenet_netif_stop(dev);
+
+ if (!device_may_wakeup(d))
+ phy_suspend(dev->phydev);
+
+ /* Prepare the device for Wake-on-LAN and switch to the slow clock */
+ if (device_may_wakeup(d) && priv->wolopts) {
+ ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
+ clk_prepare_enable(priv->clk_wol);
+ } else if (priv->internal_phy) {
+ ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
+ }
+
+ /* Turn off the clocks */
+ clk_disable_unprepare(priv->clk);
+
+ if (ret)
+ bcmgenet_resume(d);
+
+ return ret;
+}
#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 2fbd027f0148..57582efa362d 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -186,6 +186,8 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
}
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
+ if (!(reg & MPD_EN))
+ return; /* already powered up so skip the rest */
reg &= ~MPD_EN;
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index a12962702611..6fcf9646d141 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7219,8 +7219,8 @@ static inline void tg3_reset_task_schedule(struct tg3 *tp)
static inline void tg3_reset_task_cancel(struct tg3 *tp)
{
- cancel_work_sync(&tp->reset_task);
- tg3_flag_clear(tp, RESET_TASK_PENDING);
+ if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
+ cancel_work_sync(&tp->reset_task);
tg3_flag_clear(tp, TX_RECOVERY_PENDING);
}
@@ -11213,18 +11213,27 @@ static void tg3_reset_task(struct work_struct *work)
tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
err = tg3_init_hw(tp, true);
- if (err)
+ if (err) {
+ tg3_full_unlock(tp);
+ tp->irq_sync = 0;
+ tg3_napi_enable(tp);
+ /* Clear this flag so that tg3_reset_task_cancel() will not
+ * call cancel_work_sync() and wait forever.
+ */
+ tg3_flag_clear(tp, RESET_TASK_PENDING);
+ dev_close(tp->dev);
goto out;
+ }
tg3_netif_start(tp);
-out:
tg3_full_unlock(tp);
if (!err)
tg3_phy_start(tp);
tg3_flag_clear(tp, RESET_TASK_PENDING);
+out:
rtnl_unlock();
}
@@ -18229,8 +18238,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
rtnl_lock();
- /* We probably don't have netdev yet */
- if (!netdev || !netif_running(netdev))
+ /* Could be second call or maybe we don't have netdev yet */
+ if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
goto done;
/* We needn't recover from permanent error */
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index ea5f32ea308a..1e25c3b5f563 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3290,7 +3290,7 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu)
{
int err, mtu;
struct bnad *bnad = netdev_priv(netdev);
- u32 rx_count = 0, frame, new_frame;
+ u32 frame, new_frame;
mutex_lock(&bnad->conf_mutex);
@@ -3306,12 +3306,9 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu)
/* only when transition is over 4K */
if ((frame <= 4096 && new_frame > 4096) ||
(frame > 4096 && new_frame <= 4096))
- rx_count = bnad_reinit_rx(bnad);
+ bnad_reinit_rx(bnad);
}
- /* rx_count > 0 - new rx created
- * - Linux set err = 0 and return
- */
err = bnad_mtu_set(bnad, new_frame);
if (err)
err = -EBUSY;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index d1ff317f3b18..0374a1ba1010 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1704,7 +1704,8 @@ static inline int macb_clear_csum(struct sk_buff *skb)
static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
{
- bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb);
+ bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) ||
+ skb_is_nonlinear(*skb);
int padlen = ETH_ZLEN - (*skb)->len;
int headroom = skb_headroom(*skb);
int tailroom = skb_tailroom(*skb);
@@ -4060,7 +4061,7 @@ static int macb_probe(struct platform_device *pdev)
bp->wol = 0;
if (of_get_property(np, "magic-packet", NULL))
bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
- device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
+ device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
spin_lock_init(&bp->lock);
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index 9f4f3c1d5043..55fe80ca10d3 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -1167,7 +1167,7 @@ static int cn23xx_get_pf_num(struct octeon_device *oct)
oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) &
CN23XX_PCIE_SRIOV_FDL_MASK);
} else {
- ret = EINVAL;
+ ret = -EINVAL;
/* Under some virtual environments, extended PCI regs are
* inaccessible, in which case the above read will have failed.
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
index e6d4ad99cc38..3f1c189646f4 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
@@ -521,7 +521,7 @@
#define CN23XX_BAR1_INDEX_OFFSET 3
#define CN23XX_PEM_BAR1_INDEX_REG(port, idx) \
- (CN23XX_PEM_BAR1_INDEX_START + ((port) << CN23XX_PEM_OFFSET) + \
+ (CN23XX_PEM_BAR1_INDEX_START + (((u64)port) << CN23XX_PEM_OFFSET) + \
((idx) << CN23XX_BAR1_INDEX_OFFSET))
/*############################ DPI #########################*/
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
index b248966837b4..7aad40b2aa73 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
@@ -412,7 +412,7 @@
| CN6XXX_INTR_M0UNWI_ERR \
| CN6XXX_INTR_M1UPB0_ERR \
| CN6XXX_INTR_M1UPWI_ERR \
- | CN6XXX_INTR_M1UPB0_ERR \
+ | CN6XXX_INTR_M1UNB0_ERR \
| CN6XXX_INTR_M1UNWI_ERR \
| CN6XXX_INTR_INSTR_DB_OF_ERR \
| CN6XXX_INTR_SLIST_DB_OF_ERR \
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 0957e735cdc4..cf4c92bdd9b8 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -235,6 +235,11 @@ static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
/* Put it in the ring. */
p->rx_ring[p->rx_next_fill] = re.d64;
+ /* Make sure there is no reorder of filling the ring and ringing
+ * the bell
+ */
+ wmb();
+
dma_sync_single_for_device(p->dev, p->rx_ring_handle,
ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
DMA_BIDIRECTIONAL);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index dca02b35c231..99eea9e6a8ea 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -2015,11 +2015,11 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
/* Save message data locally to prevent them from
* being overwritten by next ndo_set_rx_mode call().
*/
- spin_lock(&nic->rx_mode_wq_lock);
+ spin_lock_bh(&nic->rx_mode_wq_lock);
mode = vf_work->mode;
mc = vf_work->mc;
vf_work->mc = NULL;
- spin_unlock(&nic->rx_mode_wq_lock);
+ spin_unlock_bh(&nic->rx_mode_wq_lock);
__nicvf_set_rx_mode_task(mode, mc, nic);
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 9a4cfa61ed93..d9bcbe469ab9 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -779,7 +779,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
mbx.rq.qs_num = qs->vnic_id;
mbx.rq.rq_num = qidx;
- mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
+ mbx.rq.cfg = ((u64)rq->caching << 26) | (rq->cq_qs << 19) |
(rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
(rq->cont_qs_rbdr_idx << 8) |
(rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 20b6e1b3f5e3..5c26d50fe0e9 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -3176,6 +3176,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
GFP_KERNEL | __GFP_COMP);
if (!avail) {
CH_ALERT(adapter, "free list queue 0 initialization failed\n");
+ ret = -ENOMEM;
goto err;
}
if (avail < q->fl[0].size)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index b766362031c3..544c88f19f24 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -1065,9 +1065,9 @@ static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
}
}
-static unsigned long cudbg_mem_region_size(struct cudbg_init *pdbg_init,
- struct cudbg_error *cudbg_err,
- u8 mem_type)
+static int cudbg_mem_region_size(struct cudbg_init *pdbg_init,
+ struct cudbg_error *cudbg_err,
+ u8 mem_type, unsigned long *region_size)
{
struct adapter *padap = pdbg_init->adap;
struct cudbg_meminfo mem_info;
@@ -1076,15 +1076,23 @@ static unsigned long cudbg_mem_region_size(struct cudbg_init *pdbg_init,
memset(&mem_info, 0, sizeof(struct cudbg_meminfo));
rc = cudbg_fill_meminfo(padap, &mem_info);
- if (rc)
+ if (rc) {
+ cudbg_err->sys_err = rc;
return rc;
+ }
cudbg_t4_fwcache(pdbg_init, cudbg_err);
rc = cudbg_meminfo_get_mem_index(padap, &mem_info, mem_type, &mc_idx);
- if (rc)
+ if (rc) {
+ cudbg_err->sys_err = rc;
return rc;
+ }
+
+ if (region_size)
+ *region_size = mem_info.avail[mc_idx].limit -
+ mem_info.avail[mc_idx].base;
- return mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base;
+ return 0;
}
static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
@@ -1092,7 +1100,12 @@ static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
struct cudbg_error *cudbg_err,
u8 mem_type)
{
- unsigned long size = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type);
+ unsigned long size = 0;
+ int rc;
+
+ rc = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type, &size);
+ if (rc)
+ return rc;
return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size,
cudbg_err);
@@ -1387,11 +1400,25 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
struct cudbg_buffer temp_buff = { 0 };
struct sge_qbase_reg_field *sge_qbase;
struct ireg_buf *ch_sge_dbg;
+ u8 padap_running = 0;
int i, rc;
+ u32 size;
- rc = cudbg_get_buff(pdbg_init, dbg_buff,
- sizeof(*ch_sge_dbg) * 2 + sizeof(*sge_qbase),
- &temp_buff);
+ /* Accessing SGE_QBASE_MAP[0-3] and SGE_QBASE_INDEX regs can
+ * lead to SGE missing doorbells under heavy traffic. So, only
+ * collect them when adapter is idle.
+ */
+ for_each_port(padap, i) {
+ padap_running = netif_running(padap->port[i]);
+ if (padap_running)
+ break;
+ }
+
+ size = sizeof(*ch_sge_dbg) * 2;
+ if (!padap_running)
+ size += sizeof(*sge_qbase);
+
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1413,7 +1440,8 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
ch_sge_dbg++;
}
- if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) {
+ if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5 &&
+ !padap_running) {
sge_qbase = (struct sge_qbase_reg_field *)ch_sge_dbg;
/* 1 addr reg SGE_QBASE_INDEX and 4 data reg
* SGE_QBASE_MAP[0-3]
@@ -1974,7 +2002,6 @@ int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
u8 mem_type[CTXT_INGRESS + 1] = { 0 };
struct cudbg_buffer temp_buff = { 0 };
struct cudbg_ch_cntxt *buff;
- u64 *dst_off, *src_off;
u8 *ctx_buf;
u8 i, k;
int rc;
@@ -2043,8 +2070,11 @@ int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
}
for (j = 0; j < max_ctx_qid; j++) {
+ __be64 *dst_off;
+ u64 *src_off;
+
src_off = (u64 *)(ctx_buf + j * SGE_CTXT_SIZE);
- dst_off = (u64 *)buff->data;
+ dst_off = (__be64 *)buff->data;
/* The data is stored in 64-bit cpu order. Convert it
* to big endian before parsing.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 4af6e6ffc5df..3caf8bd1e00e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2730,7 +2730,7 @@ do { \
seq_printf(seq, "%-12s", s); \
for (i = 0; i < n; ++i) \
seq_printf(seq, " %16" fmt_spec, v); \
- seq_putc(seq, '\n'); \
+ seq_putc(seq, '\n'); \
} while (0)
#define S(s, v) S3("s", s, v)
#define T3(fmt_spec, s, v) S3(fmt_spec, s, tx[i].v)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 7dddb9e748b8..9160b44c68bb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -145,13 +145,13 @@ static int configure_filter_smac(struct adapter *adap, struct filter_entry *f)
int err;
/* do a set-tcb for smac-sel and CWR bit.. */
- err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1);
- if (err)
- goto smac_err;
-
err = set_tcb_field(adap, f, f->tid, TCB_SMAC_SEL_W,
TCB_SMAC_SEL_V(TCB_SMAC_SEL_M),
TCB_SMAC_SEL_V(f->smt->idx), 1);
+ if (err)
+ goto smac_err;
+
+ err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1);
if (!err)
return 0;
@@ -608,6 +608,7 @@ int set_filter_wr(struct adapter *adapter, int fidx)
FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
+ FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
f->fs.newvlan == VLAN_REWRITE) |
FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
@@ -625,7 +626,8 @@ int set_filter_wr(struct adapter *adapter, int fidx)
FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
- fwr->smac_sel = 0;
+ if (f->fs.newsmac)
+ fwr->smac_sel = f->smt->idx;
fwr->rx_chan_rx_rpl_iq =
htons(FW_FILTER_WR_RX_CHAN_V(0) |
FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
@@ -810,16 +812,16 @@ static bool is_addr_all_mask(u8 *ipmask, int family)
struct in_addr *addr;
addr = (struct in_addr *)ipmask;
- if (addr->s_addr == 0xffffffff)
+ if (addr->s_addr == htonl(0xffffffff))
return true;
} else if (family == AF_INET6) {
struct in6_addr *addr6;
addr6 = (struct in6_addr *)ipmask;
- if (addr6->s6_addr32[0] == 0xffffffff &&
- addr6->s6_addr32[1] == 0xffffffff &&
- addr6->s6_addr32[2] == 0xffffffff &&
- addr6->s6_addr32[3] == 0xffffffff)
+ if (addr6->s6_addr32[0] == htonl(0xffffffff) &&
+ addr6->s6_addr32[1] == htonl(0xffffffff) &&
+ addr6->s6_addr32[2] == htonl(0xffffffff) &&
+ addr6->s6_addr32[3] == htonl(0xffffffff))
return true;
}
return false;
@@ -1019,11 +1021,8 @@ static void mk_act_open_req6(struct filter_entry *f, struct sk_buff *skb,
TX_QUEUE_V(f->fs.nat_mode) |
T5_OPT_2_VALID_F |
RX_CHANNEL_F |
- CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
- (f->fs.dirsteer << 1)) |
PACE_V((f->fs.maskhash) |
- ((f->fs.dirsteerhash) << 1)) |
- CCTRL_ECN_V(f->fs.action == FILTER_SWITCH));
+ ((f->fs.dirsteerhash) << 1)));
}
static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
@@ -1059,11 +1058,8 @@ static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
TX_QUEUE_V(f->fs.nat_mode) |
T5_OPT_2_VALID_F |
RX_CHANNEL_F |
- CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
- (f->fs.dirsteer << 1)) |
PACE_V((f->fs.maskhash) |
- ((f->fs.dirsteerhash) << 1)) |
- CCTRL_ECN_V(f->fs.action == FILTER_SWITCH));
+ ((f->fs.dirsteerhash) << 1)));
}
static int cxgb4_set_hash_filter(struct net_device *dev,
@@ -1591,13 +1587,16 @@ out:
static int configure_filter_tcb(struct adapter *adap, unsigned int tid,
struct filter_entry *f)
{
- if (f->fs.hitcnts)
+ if (f->fs.hitcnts) {
set_tcb_field(adap, f, tid, TCB_TIMESTAMP_W,
- TCB_TIMESTAMP_V(TCB_TIMESTAMP_M) |
+ TCB_TIMESTAMP_V(TCB_TIMESTAMP_M),
+ TCB_TIMESTAMP_V(0ULL),
+ 1);
+ set_tcb_field(adap, f, tid, TCB_RTT_TS_RECENT_AGE_W,
TCB_RTT_TS_RECENT_AGE_V(TCB_RTT_TS_RECENT_AGE_M),
- TCB_TIMESTAMP_V(0ULL) |
TCB_RTT_TS_RECENT_AGE_V(0ULL),
1);
+ }
if (f->fs.newdmac)
set_tcb_tflag(adap, f, tid, TF_CCTRL_ECE_S, 1,
@@ -1719,6 +1718,20 @@ void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
}
return;
}
+ switch (f->fs.action) {
+ case FILTER_PASS:
+ if (f->fs.dirsteer)
+ set_tcb_tflag(adap, f, tid,
+ TF_DIRECT_STEER_S, 1, 1);
+ break;
+ case FILTER_DROP:
+ set_tcb_tflag(adap, f, tid, TF_DROP_S, 1, 1);
+ break;
+ case FILTER_SWITCH:
+ set_tcb_tflag(adap, f, tid, TF_LPBK_S, 1, 1);
+ break;
+ }
+
break;
default:
@@ -1778,22 +1791,11 @@ void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
if (ctx)
ctx->result = 0;
} else if (ret == FW_FILTER_WR_FLT_ADDED) {
- int err = 0;
-
- if (f->fs.newsmac)
- err = configure_filter_smac(adap, f);
-
- if (!err) {
- f->pending = 0; /* async setup completed */
- f->valid = 1;
- if (ctx) {
- ctx->result = 0;
- ctx->tid = idx;
- }
- } else {
- clear_filter(adap, f);
- if (ctx)
- ctx->result = err;
+ f->pending = 0; /* async setup completed */
+ f->valid = 1;
+ if (ctx) {
+ ctx->result = 0;
+ ctx->tid = idx;
}
} else {
/* Something went wrong. Issue a warning about the
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index c81d6c330548..9d1d77125826 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2281,8 +2281,6 @@ static int cxgb_up(struct adapter *adap)
#if IS_ENABLED(CONFIG_IPV6)
update_clip(adap);
#endif
- /* Initialize hash mac addr list*/
- INIT_LIST_HEAD(&adap->mac_hlist);
return err;
irq_err:
@@ -2304,6 +2302,7 @@ static void cxgb_down(struct adapter *adapter)
t4_sge_stop(adapter);
t4_free_sge_resources(adapter);
+
adapter->flags &= ~FULL_INIT_DONE;
}
@@ -5602,6 +5601,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
(is_t5(adapter->params.chip) ? STATMODE_V(0) :
T6_STATMODE_V(0)));
+ /* Initialize hash mac addr list */
+ INIT_LIST_HEAD(&adapter->mac_hlist);
+
for_each_port(adapter, i) {
netdev = alloc_etherdev_mq(sizeof(struct port_info),
MAX_ETH_QSETS);
@@ -5876,6 +5878,7 @@ fw_attach_fail:
static void remove_one(struct pci_dev *pdev)
{
struct adapter *adapter = pci_get_drvdata(pdev);
+ struct hash_mac_addr *entry, *tmp;
if (!adapter) {
pci_release_regions(pdev);
@@ -5923,6 +5926,12 @@ static void remove_one(struct pci_dev *pdev)
if (adapter->num_uld || adapter->num_ofld_uld)
t4_uld_mem_free(adapter);
free_some_resources(adapter);
+ list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist,
+ list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+
#if IS_ENABLED(CONFIG_IPV6)
t4_cleanup_clip_tbl(adapter);
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
index 9f9d6cae39d5..ff7e58a8c90f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
@@ -246,6 +246,9 @@ static int cxgb4_ptp_fineadjtime(struct adapter *adapter, s64 delta)
FW_PTP_CMD_PORTID_V(0));
c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
c.u.ts.sc = FW_PTP_SC_ADJ_FTIME;
+ c.u.ts.sign = (delta < 0) ? 1 : 0;
+ if (delta < 0)
+ delta = -delta;
c.u.ts.tm = cpu_to_be64(delta);
err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL);
@@ -308,32 +311,17 @@ static int cxgb4_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
*/
static int cxgb4_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
- struct adapter *adapter = (struct adapter *)container_of(ptp,
- struct adapter, ptp_clock_info);
- struct fw_ptp_cmd c;
+ struct adapter *adapter = container_of(ptp, struct adapter,
+ ptp_clock_info);
u64 ns;
- int err;
- memset(&c, 0, sizeof(c));
- c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) |
- FW_CMD_REQUEST_F |
- FW_CMD_READ_F |
- FW_PTP_CMD_PORTID_V(0));
- c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
- c.u.ts.sc = FW_PTP_SC_GET_TIME;
-
- err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), &c);
- if (err < 0) {
- dev_err(adapter->pdev_dev,
- "PTP: %s error %d\n", __func__, -err);
- return err;
- }
+ ns = t4_read_reg(adapter, T5_PORT_REG(0, MAC_PORT_PTP_SUM_LO_A));
+ ns |= (u64)t4_read_reg(adapter,
+ T5_PORT_REG(0, MAC_PORT_PTP_SUM_HI_A)) << 32;
/* convert to timespec*/
- ns = be64_to_cpu(c.u.ts.tm);
*ts = ns_to_timespec64(ns);
-
- return err;
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index c7d2b4dc7568..9fd0acd1cbd1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -47,7 +47,7 @@ static int fill_match_fields(struct adapter *adap,
bool next_header)
{
unsigned int i, j;
- u32 val, mask;
+ __be32 val, mask;
int off, err;
bool found;
@@ -216,7 +216,7 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
const struct cxgb4_next_header *next;
bool found = false;
unsigned int i, j;
- u32 val, mask;
+ __be32 val, mask;
int off;
if (t->table[link_uhtid - 1].link_handle) {
@@ -230,10 +230,10 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
/* Try to find matches that allow jumps to next header. */
for (i = 0; next[i].jump; i++) {
- if (next[i].offoff != cls->knode.sel->offoff ||
- next[i].shift != cls->knode.sel->offshift ||
- next[i].mask != cls->knode.sel->offmask ||
- next[i].offset != cls->knode.sel->off)
+ if (next[i].sel.offoff != cls->knode.sel->offoff ||
+ next[i].sel.offshift != cls->knode.sel->offshift ||
+ next[i].sel.offmask != cls->knode.sel->offmask ||
+ next[i].sel.off != cls->knode.sel->off)
continue;
/* Found a possible candidate. Find a key that
@@ -245,9 +245,9 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
val = cls->knode.sel->keys[j].val;
mask = cls->knode.sel->keys[j].mask;
- if (next[i].match_off == off &&
- next[i].match_val == val &&
- next[i].match_mask == mask) {
+ if (next[i].key.off == off &&
+ next[i].key.val == val &&
+ next[i].key.mask == mask) {
found = true;
break;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
index a4b99edcc339..141085e159e5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
@@ -38,12 +38,12 @@
struct cxgb4_match_field {
int off; /* Offset from the beginning of the header to match */
/* Fill the value/mask pair in the spec if matched */
- int (*val)(struct ch_filter_specification *f, u32 val, u32 mask);
+ int (*val)(struct ch_filter_specification *f, __be32 val, __be32 mask);
};
/* IPv4 match fields */
static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
f->val.tos = (ntohl(val) >> 16) & 0x000000FF;
f->mask.tos = (ntohl(mask) >> 16) & 0x000000FF;
@@ -52,7 +52,7 @@ static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f,
}
static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
u32 mask_val;
u8 frag_val;
@@ -74,7 +74,7 @@ static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f,
}
static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
f->val.proto = (ntohl(val) >> 16) & 0x000000FF;
f->mask.proto = (ntohl(mask) >> 16) & 0x000000FF;
@@ -83,7 +83,7 @@ static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f,
}
static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
memcpy(&f->val.fip[0], &val, sizeof(u32));
memcpy(&f->mask.fip[0], &mask, sizeof(u32));
@@ -92,7 +92,7 @@ static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f,
}
static inline int cxgb4_fill_ipv4_dst_ip(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
memcpy(&f->val.lip[0], &val, sizeof(u32));
memcpy(&f->mask.lip[0], &mask, sizeof(u32));
@@ -111,7 +111,7 @@ static const struct cxgb4_match_field cxgb4_ipv4_fields[] = {
/* IPv6 match fields */
static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
f->val.tos = (ntohl(val) >> 20) & 0x000000FF;
f->mask.tos = (ntohl(mask) >> 20) & 0x000000FF;
@@ -120,7 +120,7 @@ static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f,
}
static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
f->val.proto = (ntohl(val) >> 8) & 0x000000FF;
f->mask.proto = (ntohl(mask) >> 8) & 0x000000FF;
@@ -129,7 +129,7 @@ static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f,
}
static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
memcpy(&f->val.fip[0], &val, sizeof(u32));
memcpy(&f->mask.fip[0], &mask, sizeof(u32));
@@ -138,7 +138,7 @@ static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f,
}
static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
memcpy(&f->val.fip[4], &val, sizeof(u32));
memcpy(&f->mask.fip[4], &mask, sizeof(u32));
@@ -147,7 +147,7 @@ static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f,
}
static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
memcpy(&f->val.fip[8], &val, sizeof(u32));
memcpy(&f->mask.fip[8], &mask, sizeof(u32));
@@ -156,7 +156,7 @@ static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f,
}
static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
memcpy(&f->val.fip[12], &val, sizeof(u32));
memcpy(&f->mask.fip[12], &mask, sizeof(u32));
@@ -165,7 +165,7 @@ static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f,
}
static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
memcpy(&f->val.lip[0], &val, sizeof(u32));
memcpy(&f->mask.lip[0], &mask, sizeof(u32));
@@ -174,7 +174,7 @@ static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f,
}
static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
memcpy(&f->val.lip[4], &val, sizeof(u32));
memcpy(&f->mask.lip[4], &mask, sizeof(u32));
@@ -183,7 +183,7 @@ static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f,
}
static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
memcpy(&f->val.lip[8], &val, sizeof(u32));
memcpy(&f->mask.lip[8], &mask, sizeof(u32));
@@ -192,7 +192,7 @@ static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f,
}
static inline int cxgb4_fill_ipv6_dst_ip3(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
memcpy(&f->val.lip[12], &val, sizeof(u32));
memcpy(&f->mask.lip[12], &mask, sizeof(u32));
@@ -216,7 +216,7 @@ static const struct cxgb4_match_field cxgb4_ipv6_fields[] = {
/* TCP/UDP match */
static inline int cxgb4_fill_l4_ports(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
f->val.fport = ntohl(val) >> 16;
f->mask.fport = ntohl(mask) >> 16;
@@ -237,19 +237,13 @@ static const struct cxgb4_match_field cxgb4_udp_fields[] = {
};
struct cxgb4_next_header {
- unsigned int offset; /* Offset to next header */
- /* offset, shift, and mask added to offset above
+ /* Offset, shift, and mask added to beginning of the header
* to get to next header. Useful when using a header
* field's value to jump to next header such as IHL field
* in IPv4 header.
*/
- unsigned int offoff;
- u32 shift;
- u32 mask;
- /* match criteria to make this jump */
- unsigned int match_off;
- u32 match_val;
- u32 match_mask;
+ struct tc_u32_sel sel;
+ struct tc_u32_key key;
/* location of jump to make */
const struct cxgb4_match_field *jump;
};
@@ -258,26 +252,74 @@ struct cxgb4_next_header {
* IPv4 header.
*/
static const struct cxgb4_next_header cxgb4_ipv4_jumps[] = {
- { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF,
- .match_off = 8, .match_val = 0x600, .match_mask = 0xFF00,
- .jump = cxgb4_tcp_fields },
- { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF,
- .match_off = 8, .match_val = 0x1100, .match_mask = 0xFF00,
- .jump = cxgb4_udp_fields },
- { .jump = NULL }
+ {
+ /* TCP Jump */
+ .sel = {
+ .off = 0,
+ .offoff = 0,
+ .offshift = 6,
+ .offmask = cpu_to_be16(0x0f00),
+ },
+ .key = {
+ .off = 8,
+ .val = cpu_to_be32(0x00060000),
+ .mask = cpu_to_be32(0x00ff0000),
+ },
+ .jump = cxgb4_tcp_fields,
+ },
+ {
+ /* UDP Jump */
+ .sel = {
+ .off = 0,
+ .offoff = 0,
+ .offshift = 6,
+ .offmask = cpu_to_be16(0x0f00),
+ },
+ .key = {
+ .off = 8,
+ .val = cpu_to_be32(0x00110000),
+ .mask = cpu_to_be32(0x00ff0000),
+ },
+ .jump = cxgb4_udp_fields,
+ },
+ { .jump = NULL },
};
/* Accept a rule with a jump directly past the 40 Bytes of IPv6 fixed header
* to get to transport layer header.
*/
static const struct cxgb4_next_header cxgb4_ipv6_jumps[] = {
- { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0,
- .match_off = 4, .match_val = 0x60000, .match_mask = 0xFF0000,
- .jump = cxgb4_tcp_fields },
- { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0,
- .match_off = 4, .match_val = 0x110000, .match_mask = 0xFF0000,
- .jump = cxgb4_udp_fields },
- { .jump = NULL }
+ {
+ /* TCP Jump */
+ .sel = {
+ .off = 40,
+ .offoff = 0,
+ .offshift = 0,
+ .offmask = 0,
+ },
+ .key = {
+ .off = 4,
+ .val = cpu_to_be32(0x00000600),
+ .mask = cpu_to_be32(0x0000ff00),
+ },
+ .jump = cxgb4_tcp_fields,
+ },
+ {
+ /* UDP Jump */
+ .sel = {
+ .off = 40,
+ .offoff = 0,
+ .offshift = 0,
+ .offmask = 0,
+ },
+ .key = {
+ .off = 4,
+ .val = cpu_to_be32(0x00001100),
+ .mask = cpu_to_be32(0x0000ff00),
+ },
+ .jump = cxgb4_udp_fields,
+ },
+ { .jump = NULL },
};
struct cxgb4_link {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index de9ad311dacd..ea0758de8ac8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -44,6 +44,9 @@
#define MAX_ULD_QSETS 16
+/* ulp_mem_io + ulptx_idata + payload + padding */
+#define MAX_IMM_ULPTX_WR_LEN (32 + 8 + 256 + 8)
+
/* CPL message priority levels */
enum {
CPL_PRIORITY_DATA = 0, /* data messages */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 986277744611..08f4780e7fe7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -508,40 +508,19 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
EXPORT_SYMBOL(cxgb4_select_ntuple);
/*
- * Called when address resolution fails for an L2T entry to handle packets
- * on the arpq head. If a packet specifies a failure handler it is invoked,
- * otherwise the packet is sent to the device.
- */
-static void handle_failed_resolution(struct adapter *adap, struct l2t_entry *e)
-{
- struct sk_buff *skb;
-
- while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
- const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
-
- spin_unlock(&e->lock);
- if (cb->arp_err_handler)
- cb->arp_err_handler(cb->handle, skb);
- else
- t4_ofld_send(adap, skb);
- spin_lock(&e->lock);
- }
-}
-
-/*
* Called when the host's neighbor layer makes a change to some entry that is
* loaded into the HW L2 table.
*/
void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
{
- struct l2t_entry *e;
- struct sk_buff_head *arpq = NULL;
- struct l2t_data *d = adap->l2t;
unsigned int addr_len = neigh->tbl->key_len;
u32 *addr = (u32 *) neigh->primary_key;
- int ifidx = neigh->dev->ifindex;
- int hash = addr_hash(d, addr, addr_len, ifidx);
+ int hash, ifidx = neigh->dev->ifindex;
+ struct sk_buff_head *arpq = NULL;
+ struct l2t_data *d = adap->l2t;
+ struct l2t_entry *e;
+ hash = addr_hash(d, addr, addr_len, ifidx);
read_lock_bh(&d->lock);
for (e = d->l2tab[hash].first; e; e = e->next)
if (!addreq(e, addr) && e->ifindex == ifidx) {
@@ -574,8 +553,25 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
write_l2e(adap, e, 0);
}
- if (arpq)
- handle_failed_resolution(adap, e);
+ if (arpq) {
+ struct sk_buff *skb;
+
+ /* Called when address resolution fails for an L2T
+ * entry to handle packets on the arpq head. If a
+ * packet specifies a failure handler it is invoked,
+ * otherwise the packet is sent to the device.
+ */
+ while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
+ const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
+
+ spin_unlock(&e->lock);
+ if (cb->arp_err_handler)
+ cb->arp_err_handler(cb->handle, skb);
+ else
+ t4_ofld_send(adap, skb);
+ spin_lock(&e->lock);
+ }
+ }
spin_unlock_bh(&e->lock);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 6807bc3a44fb..0a2d10a000ca 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2084,17 +2084,22 @@ int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
* @skb: the packet
*
* Returns true if a packet can be sent as an offload WR with immediate
- * data. We currently use the same limit as for Ethernet packets.
+ * data.
+ * FW_OFLD_TX_DATA_WR limits the payload to 255 bytes due to 8-bit field.
+ * However, FW_ULPTX_WR commands have a 256 byte immediate only
+ * payload limit.
*/
static inline int is_ofld_imm(const struct sk_buff *skb)
{
struct work_request_hdr *req = (struct work_request_hdr *)skb->data;
unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi));
- if (opcode == FW_CRYPTO_LOOKASIDE_WR)
+ if (unlikely(opcode == FW_ULPTX_WR))
+ return skb->len <= MAX_IMM_ULPTX_WR_LEN;
+ else if (opcode == FW_CRYPTO_LOOKASIDE_WR)
return skb->len <= SGE_MAX_WR_LEN;
else
- return skb->len <= MAX_IMM_TX_PKT_LEN;
+ return skb->len <= MAX_IMM_OFLD_TX_DATA_WR_LEN;
}
/**
@@ -2367,6 +2372,7 @@ static inline int uld_send(struct adapter *adap, struct sk_buff *skb,
txq_info = adap->sge.uld_txq_info[tx_uld_type];
if (unlikely(!txq_info)) {
WARN_ON(true);
+ kfree_skb(skb);
return NET_XMIT_DROP;
}
@@ -2742,7 +2748,7 @@ static noinline int t4_systim_to_hwstamp(struct adapter *adapter,
hwtstamps = skb_hwtstamps(skb);
memset(hwtstamps, 0, sizeof(*hwtstamps));
- hwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*((u64 *)data)));
+ hwtstamps->hwtstamp = ns_to_ktime(get_unaligned_be64(data));
return RX_PTP_PKT_SUC;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 8350c0c9b89d..812f4b743d97 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2093,7 +2093,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x1190, 0x1194,
0x11a0, 0x11a4,
0x11b0, 0x11b4,
- 0x11fc, 0x1274,
+ 0x11fc, 0x123c,
+ 0x1254, 0x1274,
0x1280, 0x133c,
0x1800, 0x18fc,
0x3000, 0x302c,
@@ -3499,7 +3500,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
drv_fw = &fw_info->fw_hdr;
/* Read the header of the firmware on the card */
- ret = -t4_read_flash(adap, FLASH_FW_START,
+ ret = t4_read_flash(adap, FLASH_FW_START,
sizeof(*card_fw) / sizeof(uint32_t),
(uint32_t *)card_fw, 1);
if (ret == 0) {
@@ -3528,8 +3529,8 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
should_install_fs_fw(adap, card_fw_usable,
be32_to_cpu(fs_fw->fw_ver),
be32_to_cpu(card_fw->fw_ver))) {
- ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
- fw_size, 0);
+ ret = t4_fw_upgrade(adap, adap->mbox, fw_data,
+ fw_size, 0);
if (ret != 0) {
dev_err(adap->pdev_dev,
"failed to install firmware: %d\n", ret);
@@ -3560,7 +3561,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
- ret = EINVAL;
+ ret = -EINVAL;
goto bye;
}
@@ -3748,7 +3749,7 @@ int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
&param, &val);
- if (ret < 0)
+ if (ret)
return ret;
*phy_fw_ver = val;
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index eb222d40ddbf..a64eb6ac5c76 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -1896,6 +1896,9 @@
#define MAC_PORT_CFG2_A 0x818
+#define MAC_PORT_PTP_SUM_LO_A 0x990
+#define MAC_PORT_PTP_SUM_HI_A 0x994
+
#define MPS_CMN_CTL_A 0x9000
#define COUNTPAUSEMCRX_S 5
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
index 3297ce025e8b..6ddc2baa7481 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
@@ -42,6 +42,10 @@
#define TCB_T_FLAGS_W 1
+#define TF_DROP_S 22
+#define TF_DIRECT_STEER_S 23
+#define TF_LPBK_S 59
+
#define TF_CCTRL_ECE_S 60
#define TF_CCTRL_CWR_S 61
#define TF_CCTRL_RFR_S 62
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 972dc7bd721d..15029a5e62b9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -723,9 +723,6 @@ static int adapter_up(struct adapter *adapter)
if (adapter->flags & USING_MSIX)
name_msix_vecs(adapter);
- /* Initialize hash mac addr list*/
- INIT_LIST_HEAD(&adapter->mac_hlist);
-
adapter->flags |= FULL_INIT_DONE;
}
@@ -3038,6 +3035,9 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
if (err)
goto err_unmap_bar;
+ /* Initialize hash mac addr list */
+ INIT_LIST_HEAD(&adapter->mac_hlist);
+
/*
* Allocate our "adapter ports" and stitch everything together.
*/
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 0dd64acd2a3f..08cac1bfacaf 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -171,6 +171,7 @@ struct enic {
u16 num_vfs;
#endif
spinlock_t enic_api_lock;
+ bool enic_api_busy;
struct enic_port_profile *pp;
/* work queue cache line section */
diff --git a/drivers/net/ethernet/cisco/enic/enic_api.c b/drivers/net/ethernet/cisco/enic/enic_api.c
index b161f24522b8..b028ea2dec2b 100644
--- a/drivers/net/ethernet/cisco/enic/enic_api.c
+++ b/drivers/net/ethernet/cisco/enic/enic_api.c
@@ -34,6 +34,12 @@ int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf,
struct vnic_dev *vdev = enic->vdev;
spin_lock(&enic->enic_api_lock);
+ while (enic->enic_api_busy) {
+ spin_unlock(&enic->enic_api_lock);
+ cpu_relax();
+ spin_lock(&enic->enic_api_lock);
+ }
+
spin_lock_bh(&enic->devcmd_lock);
vnic_dev_cmd_proxy_by_index_start(vdev, vf);
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 026a3bd71204..bfe0e820956c 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -803,7 +803,7 @@ static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq,
return err;
}
-static inline void enic_queue_wq_skb(struct enic *enic,
+static inline int enic_queue_wq_skb(struct enic *enic,
struct vnic_wq *wq, struct sk_buff *skb)
{
unsigned int mss = skb_shinfo(skb)->gso_size;
@@ -849,6 +849,7 @@ static inline void enic_queue_wq_skb(struct enic *enic,
wq->to_use = buf->next;
dev_kfree_skb(skb);
}
+ return err;
}
/* netif_tx_lock held, process context with BHs disabled, or BH */
@@ -892,7 +893,8 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- enic_queue_wq_skb(enic, wq, skb);
+ if (enic_queue_wq_skb(enic, wq, skb))
+ goto error;
if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
netif_tx_stop_queue(txq);
@@ -900,6 +902,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
if (!skb->xmit_more || netif_xmit_stopped(txq))
vnic_wq_doorbell(wq);
+error:
spin_unlock(&enic->wq_lock[txq_map]);
return NETDEV_TX_OK;
@@ -2142,8 +2145,6 @@ static int enic_dev_wait(struct vnic_dev *vdev,
int done;
int err;
- BUG_ON(in_interrupt());
-
err = start(vdev, arg);
if (err)
return err;
@@ -2331,6 +2332,13 @@ static int enic_set_rss_nic_cfg(struct enic *enic)
rss_hash_bits, rss_base_cpu, rss_enable);
}
+static void enic_set_api_busy(struct enic *enic, bool busy)
+{
+ spin_lock(&enic->enic_api_lock);
+ enic->enic_api_busy = busy;
+ spin_unlock(&enic->enic_api_lock);
+}
+
static void enic_reset(struct work_struct *work)
{
struct enic *enic = container_of(work, struct enic, reset);
@@ -2340,7 +2348,9 @@ static void enic_reset(struct work_struct *work)
rtnl_lock();
- spin_lock(&enic->enic_api_lock);
+ /* Stop any activity from infiniband */
+ enic_set_api_busy(enic, true);
+
enic_stop(enic->netdev);
enic_dev_soft_reset(enic);
enic_reset_addr_lists(enic);
@@ -2348,7 +2358,10 @@ static void enic_reset(struct work_struct *work)
enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic);
enic_open(enic->netdev);
- spin_unlock(&enic->enic_api_lock);
+
+ /* Allow infiniband to fiddle with the device again */
+ enic_set_api_busy(enic, false);
+
call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
rtnl_unlock();
@@ -2360,7 +2373,9 @@ static void enic_tx_hang_reset(struct work_struct *work)
rtnl_lock();
- spin_lock(&enic->enic_api_lock);
+ /* Stop any activity from infiniband */
+ enic_set_api_busy(enic, true);
+
enic_dev_hang_notify(enic);
enic_stop(enic->netdev);
enic_dev_hang_reset(enic);
@@ -2369,7 +2384,10 @@ static void enic_tx_hang_reset(struct work_struct *work)
enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic);
enic_open(enic->netdev);
- spin_unlock(&enic->enic_api_lock);
+
+ /* Allow infiniband to fiddle with the device again */
+ enic_set_api_busy(enic, false);
+
call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
rtnl_unlock();
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 01a212097836..5242687060b4 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -2392,7 +2392,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
dev_info(dev, "probe %s ID %d\n", dev_name(dev), id);
- netdev = alloc_etherdev_mq(sizeof(*port), TX_QUEUE_NUM);
+ netdev = devm_alloc_etherdev_mqs(dev, sizeof(*port), TX_QUEUE_NUM, TX_QUEUE_NUM);
if (!netdev) {
dev_err(dev, "Can't allocate ethernet device #%d\n", id);
return -ENOMEM;
@@ -2451,7 +2451,8 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
port->reset = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(port->reset)) {
dev_err(dev, "no reset\n");
- return PTR_ERR(port->reset);
+ ret = PTR_ERR(port->reset);
+ goto unprepare;
}
reset_control_reset(port->reset);
usleep_range(100, 500);
@@ -2507,23 +2508,24 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
port_names[port->id],
port);
if (ret)
- return ret;
+ goto unprepare;
ret = register_netdev(netdev);
- if (!ret) {
+ if (ret)
+ goto unprepare;
+
+ netdev_info(netdev,
+ "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n",
+ port->irq, &dmares->start,
+ &gmacres->start);
+ ret = gmac_setup_phy(netdev);
+ if (ret)
netdev_info(netdev,
- "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n",
- port->irq, &dmares->start,
- &gmacres->start);
- ret = gmac_setup_phy(netdev);
- if (ret)
- netdev_info(netdev,
- "PHY init failed, deferring to ifup time\n");
- return 0;
- }
+ "PHY init failed, deferring to ifup time\n");
+ return 0;
- port->netdev = NULL;
- free_netdev(netdev);
+unprepare:
+ clk_disable_unprepare(port->pclk);
return ret;
}
@@ -2532,7 +2534,6 @@ static int gemini_ethernet_port_remove(struct platform_device *pdev)
struct gemini_ethernet_port *port = platform_get_drvdata(pdev);
gemini_port_remove(port);
- free_netdev(port->netdev);
return 0;
}
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 50222b7b81f3..8b07890b0b23 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -143,6 +143,8 @@ struct board_info {
u32 wake_state;
int ip_summed;
+
+ struct regulator *power_supply;
};
/* debug code */
@@ -1460,7 +1462,7 @@ dm9000_probe(struct platform_device *pdev)
if (ret) {
dev_err(dev, "failed to request reset gpio %d: %d\n",
reset_gpios, ret);
- return -ENODEV;
+ goto out_regulator_disable;
}
/* According to manual PWRST# Low Period Min 1ms */
@@ -1472,14 +1474,18 @@ dm9000_probe(struct platform_device *pdev)
if (!pdata) {
pdata = dm9000_parse_dt(&pdev->dev);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ goto out_regulator_disable;
+ }
}
/* Init network device */
ndev = alloc_etherdev(sizeof(struct board_info));
- if (!ndev)
- return -ENOMEM;
+ if (!ndev) {
+ ret = -ENOMEM;
+ goto out_regulator_disable;
+ }
SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -1490,6 +1496,8 @@ dm9000_probe(struct platform_device *pdev)
db->dev = &pdev->dev;
db->ndev = ndev;
+ if (!IS_ERR(power))
+ db->power_supply = power;
spin_lock_init(&db->lock);
mutex_init(&db->addr_lock);
@@ -1716,6 +1724,10 @@ out:
dm9000_release_board(pdev, db);
free_netdev(ndev);
+out_regulator_disable:
+ if (!IS_ERR(power))
+ regulator_disable(power);
+
return ret;
}
@@ -1775,10 +1787,13 @@ static int
dm9000_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
+ struct board_info *dm = to_dm9000_board(ndev);
unregister_netdev(ndev);
- dm9000_release_board(pdev, netdev_priv(ndev));
+ dm9000_release_board(pdev, dm);
free_netdev(ndev); /* free device structure */
+ if (dm->power_supply)
+ regulator_disable(dm->power_supply);
dev_dbg(&pdev->dev, "released and freed device\n");
return 0;
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 13430f75496c..b312cd9bce16 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -91,7 +91,7 @@ MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copi
#define DSL CONFIG_DE2104X_DSL
#endif
-#define DE_RX_RING_SIZE 64
+#define DE_RX_RING_SIZE 128
#define DE_TX_RING_SIZE 64
#define DE_RING_BYTES \
((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index f16853c3c851..c813e6f2b371 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -4927,11 +4927,11 @@ mii_get_oui(u_char phyaddr, u_long ioaddr)
u_char breg[2];
} a;
int i, r2, r3, ret=0;*/
- int r2, r3;
+ int r2;
/* Read r2 and r3 */
r2 = mii_rd(MII_ID0, phyaddr, ioaddr);
- r3 = mii_rd(MII_ID1, phyaddr, ioaddr);
+ mii_rd(MII_ID1, phyaddr, ioaddr);
/* SEEQ and Cypress way * /
/ * Shuffle r2 and r3 * /
a.reg=0;
diff --git a/drivers/net/ethernet/dec/tulip/media.c b/drivers/net/ethernet/dec/tulip/media.c
index dcf21a36a9cf..011604787b8e 100644
--- a/drivers/net/ethernet/dec/tulip/media.c
+++ b/drivers/net/ethernet/dec/tulip/media.c
@@ -319,13 +319,8 @@ void tulip_select_media(struct net_device *dev, int startup)
break;
}
case 5: case 6: {
- u16 setup[5];
-
new_csr6 = 0; /* FIXME */
- for (i = 0; i < 5; i++)
- setup[i] = get_u16(&p[i*2 + 1]);
-
if (startup && mtable->has_reset) {
struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
unsigned char *rst = rleaf->leafdata;
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 60da0499ad66..a2280c4be0f5 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1213,7 +1213,7 @@ static int ethoc_probe(struct platform_device *pdev)
ret = mdiobus_register(priv->mdio);
if (ret) {
dev_err(&netdev->dev, "failed to register MDIO bus\n");
- goto free2;
+ goto free3;
}
ret = ethoc_mdio_probe(netdev);
@@ -1245,6 +1245,7 @@ error2:
netif_napi_del(&priv->napi);
error:
mdiobus_unregister(priv->mdio);
+free3:
mdiobus_free(priv->mdio);
free2:
clk_disable_unprepare(priv->clk);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index e4fc38cbe853..964407deca35 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1333,6 +1333,7 @@ static int ftgmac100_poll(struct napi_struct *napi, int budget)
*/
if (unlikely(priv->need_mac_restart)) {
ftgmac100_start_hw(priv);
+ priv->need_mac_restart = false;
/* Re-enable "bad" interrupts */
iowrite32(FTGMAC100_INT_BAD,
@@ -1884,6 +1885,8 @@ static int ftgmac100_probe(struct platform_device *pdev)
return 0;
err_ncsi_dev:
+ if (priv->ndev)
+ ncsi_unregister_dev(priv->ndev);
err_register_netdev:
ftgmac100_destroy_mdio(netdev);
err_setup_mdio:
@@ -1904,6 +1907,8 @@ static int ftgmac100_remove(struct platform_device *pdev)
netdev = platform_get_drvdata(pdev);
priv = netdev_priv(netdev);
+ if (priv->ndev)
+ ncsi_unregister_dev(priv->ndev);
unregister_netdev(netdev);
clk_disable_unprepare(priv->clk);
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index a580a3dcbe59..e9f4326a0afa 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -76,6 +76,7 @@ config UCC_GETH
depends on QUICC_ENGINE
select FSL_PQ_MDIO
select PHYLIB
+ select FIXED_PHY
---help---
This driver supports the Gigabit Ethernet mode of the QUICC Engine,
which is available on some Freescale SOCs.
@@ -89,6 +90,7 @@ config GIANFAR
depends on HAS_DMA
select FSL_PQ_MDIO
select PHYLIB
+ select FIXED_PHY
select CRC32
---help---
This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
diff --git a/drivers/net/ethernet/freescale/dpaa/Kconfig b/drivers/net/ethernet/freescale/dpaa/Kconfig
index a654736237a9..8fec41e57178 100644
--- a/drivers/net/ethernet/freescale/dpaa/Kconfig
+++ b/drivers/net/ethernet/freescale/dpaa/Kconfig
@@ -2,6 +2,7 @@ menuconfig FSL_DPAA_ETH
tristate "DPAA Ethernet"
depends on FSL_DPAA && FSL_FMAN
select PHYLIB
+ select FIXED_PHY
select FSL_FMAN_MAC
---help---
Data Path Acceleration Architecture Ethernet driver,
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index bf80855dd0dd..d06a89e99872 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -488,6 +488,12 @@ struct fec_enet_priv_rx_q {
struct sk_buff *rx_skbuff[RX_RING_SIZE];
};
+struct fec_stop_mode_gpr {
+ struct regmap *gpr;
+ u8 reg;
+ u8 bit;
+};
+
/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
* tx_bd_base always point to the base of the buffer descriptors. The
* cur_rx and cur_tx point to the currently available buffer.
@@ -563,6 +569,7 @@ struct fec_enet_private {
int hwts_tx_en;
struct delayed_work time_keep;
struct regulator *reg_phy;
+ struct fec_stop_mode_gpr stop_gpr;
unsigned int tx_align;
unsigned int rx_align;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 9142992ccd5a..6b9eada1feb2 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -62,6 +62,8 @@
#include <linux/if_vlan.h>
#include <linux/pinctrl/consumer.h>
#include <linux/prefetch.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include <soc/imx/cpuidle.h>
#include <asm/cacheflush.h>
@@ -84,6 +86,56 @@ static void fec_enet_itr_coal_init(struct net_device *ndev);
#define FEC_ENET_OPD_V 0xFFF0
#define FEC_MDIO_PM_TIMEOUT 100 /* ms */
+struct fec_devinfo {
+ u32 quirks;
+ u8 stop_gpr_reg;
+ u8 stop_gpr_bit;
+};
+
+static const struct fec_devinfo fec_imx25_info = {
+ .quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
+ FEC_QUIRK_HAS_FRREG,
+};
+
+static const struct fec_devinfo fec_imx27_info = {
+ .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
+};
+
+static const struct fec_devinfo fec_imx28_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
+ FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
+ FEC_QUIRK_HAS_FRREG,
+};
+
+static const struct fec_devinfo fec_imx6q_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
+ FEC_QUIRK_HAS_RACC,
+ .stop_gpr_reg = 0x34,
+ .stop_gpr_bit = 27,
+};
+
+static const struct fec_devinfo fec_mvf600_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
+};
+
+static const struct fec_devinfo fec_imx6x_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
+ FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
+};
+
+static const struct fec_devinfo fec_imx6ul_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
+ FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
+ FEC_QUIRK_HAS_COALESCE,
+};
+
static struct platform_device_id fec_devtype[] = {
{
/* keep it for coldfire */
@@ -91,39 +143,25 @@ static struct platform_device_id fec_devtype[] = {
.driver_data = 0,
}, {
.name = "imx25-fec",
- .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
- FEC_QUIRK_HAS_FRREG,
+ .driver_data = (kernel_ulong_t)&fec_imx25_info,
}, {
.name = "imx27-fec",
- .driver_data = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
+ .driver_data = (kernel_ulong_t)&fec_imx27_info,
}, {
.name = "imx28-fec",
- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
- FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
- FEC_QUIRK_HAS_FRREG,
+ .driver_data = (kernel_ulong_t)&fec_imx28_info,
}, {
.name = "imx6q-fec",
- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
- FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
- FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
- FEC_QUIRK_HAS_RACC,
+ .driver_data = (kernel_ulong_t)&fec_imx6q_info,
}, {
.name = "mvf600-fec",
- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
+ .driver_data = (kernel_ulong_t)&fec_mvf600_info,
}, {
.name = "imx6sx-fec",
- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
- FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
- FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
- FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
- FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
+ .driver_data = (kernel_ulong_t)&fec_imx6x_info,
}, {
.name = "imx6ul-fec",
- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
- FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
- FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
- FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
- FEC_QUIRK_HAS_COALESCE,
+ .driver_data = (kernel_ulong_t)&fec_imx6ul_info,
}, {
/* sentinel */
}
@@ -1089,11 +1127,28 @@ fec_restart(struct net_device *ndev)
}
+static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
+{
+ struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
+ struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr;
+
+ if (stop_gpr->gpr) {
+ if (enabled)
+ regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
+ BIT(stop_gpr->bit),
+ BIT(stop_gpr->bit));
+ else
+ regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
+ BIT(stop_gpr->bit), 0);
+ } else if (pdata && pdata->sleep_mode_enable) {
+ pdata->sleep_mode_enable(enabled);
+ }
+}
+
static void
fec_stop(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
u32 val;
@@ -1122,9 +1177,7 @@ fec_stop(struct net_device *ndev)
val = readl(fep->hwp + FEC_ECNTRL);
val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
writel(val, fep->hwp + FEC_ECNTRL);
-
- if (pdata && pdata->sleep_mode_enable)
- pdata->sleep_mode_enable(true);
+ fec_enet_stop_mode(fep, true);
}
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
@@ -1844,6 +1897,27 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
return ret;
}
+static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct phy_device *phy_dev = ndev->phydev;
+
+ if (phy_dev) {
+ phy_reset_after_clk_enable(phy_dev);
+ } else if (fep->phy_node) {
+ /*
+ * If the PHY still is not bound to the MAC, but there is
+ * OF PHY node and a matching PHY device instance already,
+ * use the OF PHY node to obtain the PHY device instance,
+ * and then use that PHY device instance when triggering
+ * the PHY reset.
+ */
+ phy_dev = of_phy_find_device(fep->phy_node);
+ phy_reset_after_clk_enable(phy_dev);
+ put_device(&phy_dev->mdio.dev);
+ }
+}
+
static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -1870,7 +1944,7 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
if (ret)
goto failed_clk_ref;
- phy_reset_after_clk_enable(ndev->phydev);
+ fec_enet_phy_reset_after_clk_enable(ndev);
} else {
clk_disable_unprepare(fep->clk_enet_out);
if (fep->clk_ptp) {
@@ -2876,16 +2950,16 @@ fec_enet_open(struct net_device *ndev)
/* Init MAC prior to mii bus probe */
fec_restart(ndev);
- /* Probe and connect to PHY when open the interface */
- ret = fec_enet_mii_probe(ndev);
- if (ret)
- goto err_enet_mii_probe;
-
/* Call phy_reset_after_clk_enable() again if it failed during
* phy_reset_after_clk_enable() before because the PHY wasn't probed.
*/
if (reset_again)
- phy_reset_after_clk_enable(ndev->phydev);
+ fec_enet_phy_reset_after_clk_enable(ndev);
+
+ /* Probe and connect to PHY when open the interface */
+ ret = fec_enet_mii_probe(ndev);
+ if (ret)
+ goto err_enet_mii_probe;
if (fep->quirks & FEC_QUIRK_ERR006687)
imx6q_cpuidle_fec_irqs_used();
@@ -3147,7 +3221,9 @@ static int fec_enet_init(struct net_device *ndev)
return ret;
}
- fec_enet_alloc_queue(ndev);
+ ret = fec_enet_alloc_queue(ndev);
+ if (ret)
+ return ret;
bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
@@ -3155,7 +3231,8 @@ static int fec_enet_init(struct net_device *ndev)
cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
GFP_KERNEL);
if (!cbd_base) {
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_queue_mem;
}
memset(cbd_base, 0, bd_size);
@@ -3235,6 +3312,10 @@ static int fec_enet_init(struct net_device *ndev)
fec_enet_update_ethtool_stats(ndev);
return 0;
+
+free_queue_mem:
+ fec_enet_free_queue(ndev);
+ return ret;
}
#ifdef CONFIG_OF
@@ -3347,6 +3428,37 @@ static int fec_enet_get_irq_cnt(struct platform_device *pdev)
return irq_cnt;
}
+static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
+ struct fec_devinfo *dev_info,
+ struct device_node *np)
+{
+ struct device_node *gpr_np;
+ int ret = 0;
+
+ if (!dev_info)
+ return 0;
+
+ gpr_np = of_parse_phandle(np, "gpr", 0);
+ if (!gpr_np)
+ return 0;
+
+ fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);
+ if (IS_ERR(fep->stop_gpr.gpr)) {
+ dev_err(&fep->pdev->dev, "could not find gpr regmap\n");
+ ret = PTR_ERR(fep->stop_gpr.gpr);
+ fep->stop_gpr.gpr = NULL;
+ goto out;
+ }
+
+ fep->stop_gpr.reg = dev_info->stop_gpr_reg;
+ fep->stop_gpr.bit = dev_info->stop_gpr_bit;
+
+out:
+ of_node_put(gpr_np);
+
+ return ret;
+}
+
static int
fec_probe(struct platform_device *pdev)
{
@@ -3362,6 +3474,7 @@ fec_probe(struct platform_device *pdev)
int num_rx_qs;
char irq_name[8];
int irq_cnt;
+ struct fec_devinfo *dev_info;
fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
@@ -3379,7 +3492,9 @@ fec_probe(struct platform_device *pdev)
of_id = of_match_device(fec_dt_ids, &pdev->dev);
if (of_id)
pdev->id_entry = of_id->data;
- fep->quirks = pdev->id_entry->driver_data;
+ dev_info = (struct fec_devinfo *)pdev->id_entry->driver_data;
+ if (dev_info)
+ fep->quirks = dev_info->quirks;
fep->netdev = ndev;
fep->num_rx_queues = num_rx_qs;
@@ -3414,6 +3529,10 @@ fec_probe(struct platform_device *pdev)
if (of_get_property(np, "fsl,magic-packet", NULL))
fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
+ ret = fec_enet_init_stop_mode(fep, dev_info, np);
+ if (ret)
+ goto failed_stop_mode;
+
phy_node = of_parse_phandle(np, "phy-handle", 0);
if (!phy_node && of_phy_is_fixed_link(np)) {
ret = of_phy_register_fixed_link(np);
@@ -3568,11 +3687,11 @@ failed_mii_init:
failed_irq:
failed_init:
fec_ptp_stop(pdev);
- if (fep->reg_phy)
- regulator_disable(fep->reg_phy);
failed_reset:
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ if (fep->reg_phy)
+ regulator_disable(fep->reg_phy);
failed_regulator:
clk_disable_unprepare(fep->clk_ahb);
failed_clk_ahb:
@@ -3583,6 +3702,7 @@ failed_clk:
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(phy_node);
+failed_stop_mode:
failed_phy:
dev_id--;
failed_ioremap:
@@ -3660,7 +3780,6 @@ static int __maybe_unused fec_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
- struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
int ret;
int val;
@@ -3678,8 +3797,8 @@ static int __maybe_unused fec_resume(struct device *dev)
goto failed_clk;
}
if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
- if (pdata && pdata->sleep_mode_enable)
- pdata->sleep_mode_enable(false);
+ fec_enet_stop_mode(fep, false);
+
val = readl(fep->hwp + FEC_ECNTRL);
val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
writel(val, fep->hwp + FEC_ECNTRL);
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 7e892b1cbd3d..09a762eb4f09 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -382,9 +382,16 @@ static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
u64 ns;
unsigned long flags;
+ mutex_lock(&adapter->ptp_clk_mutex);
+ /* Check the ptp clock */
+ if (!adapter->ptp_clk_on) {
+ mutex_unlock(&adapter->ptp_clk_mutex);
+ return -EINVAL;
+ }
spin_lock_irqsave(&adapter->tmreg_lock, flags);
ns = timecounter_read(&adapter->tc);
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+ mutex_unlock(&adapter->ptp_clk_mutex);
*ts = ns_to_timespec64(ns);
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index 21d8023535ae..eba7e54ecf85 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -1396,8 +1396,7 @@ static void enable_time_stamp(struct fman *fman)
{
struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
u16 fm_clk_freq = fman->state->fm_clk_freq;
- u32 tmp, intgr, ts_freq;
- u64 frac;
+ u32 tmp, intgr, ts_freq, frac;
ts_freq = (u32)(1 << fman->state->count1_micro_bit);
/* configure timestamp so that bit 8 will count 1 microsecond
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 1ca543ac8f2c..d2de9ea80c43 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -1205,7 +1205,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
list_for_each(pos,
&dtsec->multicast_addr_hash->lsts[bucket]) {
hash_entry = ETH_HASH_ENTRY_OBJ(pos);
- if (hash_entry->addr == addr) {
+ if (hash_entry && hash_entry->addr == addr) {
list_del_init(&hash_entry->node);
kfree(hash_entry);
break;
@@ -1218,7 +1218,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
list_for_each(pos,
&dtsec->unicast_addr_hash->lsts[bucket]) {
hash_entry = ETH_HASH_ENTRY_OBJ(pos);
- if (hash_entry->addr == addr) {
+ if (hash_entry && hash_entry->addr == addr) {
list_del_init(&hash_entry->node);
kfree(hash_entry);
break;
diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h
index dd6d0526f6c1..19f327efdaff 100644
--- a/drivers/net/ethernet/freescale/fman/fman_mac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_mac.h
@@ -252,7 +252,7 @@ static inline struct eth_hash_t *alloc_hash_table(u16 size)
struct eth_hash_t *hash;
/* Allocate address hash table */
- hash = kmalloc_array(size, sizeof(struct eth_hash_t *), GFP_KERNEL);
+ hash = kmalloc(sizeof(*hash), GFP_KERNEL);
if (!hash)
return NULL;
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index e1901874c19f..9088b4f4b4b8 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -856,7 +856,6 @@ int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
tmp = ioread32be(&regs->command_config);
tmp &= ~CMD_CFG_PFC_MODE;
- priority = 0;
iowrite32be(tmp, &regs->command_config);
@@ -986,7 +985,7 @@ int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
list_for_each(pos, &memac->multicast_addr_hash->lsts[hash]) {
hash_entry = ETH_HASH_ENTRY_OBJ(pos);
- if (hash_entry->addr == addr) {
+ if (hash_entry && hash_entry->addr == addr) {
list_del_init(&hash_entry->node);
kfree(hash_entry);
break;
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index ee82ee1384eb..47f6fee1f396 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -1756,6 +1756,7 @@ static int fman_port_probe(struct platform_device *of_dev)
struct fman_port *port;
struct fman *fman;
struct device_node *fm_node, *port_node;
+ struct platform_device *fm_pdev;
struct resource res;
struct resource *dev_res;
u32 val;
@@ -1780,8 +1781,14 @@ static int fman_port_probe(struct platform_device *of_dev)
goto return_err;
}
- fman = dev_get_drvdata(&of_find_device_by_node(fm_node)->dev);
+ fm_pdev = of_find_device_by_node(fm_node);
of_node_put(fm_node);
+ if (!fm_pdev) {
+ err = -EINVAL;
+ goto return_err;
+ }
+
+ fman = dev_get_drvdata(&fm_pdev->dev);
if (!fman) {
err = -EINVAL;
goto return_err;
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
index f75b9c11b2d2..ac5a281e0ec3 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -630,7 +630,7 @@ int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
list_for_each(pos, &tgec->multicast_addr_hash->lsts[hash]) {
hash_entry = ETH_HASH_ENTRY_OBJ(pos);
- if (hash_entry->addr == addr) {
+ if (hash_entry && hash_entry->addr == addr) {
list_del_init(&hash_entry->node);
kfree(hash_entry);
break;
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index c8e5d889bd81..21de56345503 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -223,3 +223,4 @@ static struct platform_driver fs_enet_bb_mdio_driver = {
};
module_platform_driver(fs_enet_bb_mdio_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 1582d82483ec..4e6a9c5d8af5 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -224,3 +224,4 @@ static struct platform_driver fs_enet_fec_mdio_driver = {
};
module_platform_driver(fs_enet_fec_mdio_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index cf2d1e846a69..4feab06b7ad7 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -485,7 +485,11 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
static int gfar_set_mac_addr(struct net_device *dev, void *p)
{
- eth_mac_addr(dev, p);
+ int ret;
+
+ ret = eth_mac_addr(dev, p);
+ if (ret)
+ return ret;
gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
@@ -844,8 +848,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
continue;
err = gfar_parse_group(child, priv, model);
- if (err)
+ if (err) {
+ of_node_put(child);
goto err_grp_init;
+ }
}
} else { /* SQ_SG_MODE */
err = gfar_parse_group(np, priv, model);
@@ -1386,7 +1392,7 @@ static int gfar_probe(struct platform_device *ofdev)
if (dev->features & NETIF_F_IP_CSUM ||
priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
- dev->needed_headroom = GMAC_FCB_LEN;
+ dev->needed_headroom = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
/* Initializing some of the rx/tx queue level parameters */
for (i = 0; i < priv->num_tx_queues; i++) {
@@ -2368,20 +2374,12 @@ static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
/* make space for additional header when fcb is needed */
- if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
- struct sk_buff *skb_new;
-
- skb_new = skb_realloc_headroom(skb, fcb_len);
- if (!skb_new) {
+ if (fcb_len) {
+ if (unlikely(skb_cow_head(skb, fcb_len))) {
dev->stats.tx_errors++;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
-
- if (skb->sk)
- skb_set_owner_w(skb_new, skb->sk);
- dev_consume_skb_any(skb);
- skb = skb_new;
}
/* total number of fragments in the SKB */
@@ -2946,6 +2944,10 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
if (lstatus & BD_LFLAG(RXBD_LAST))
size -= skb->len;
+ WARN(size < 0, "gianfar: rx fragment size underflow");
+ if (size < 0)
+ return false;
+
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
rxb->page_offset + RXBUF_ALIGNMENT,
size, GFAR_RXB_TRUESIZE);
@@ -3107,6 +3109,17 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
if (lstatus & BD_LFLAG(RXBD_EMPTY))
break;
+ /* lost RXBD_LAST descriptor due to overrun */
+ if (skb &&
+ (lstatus & BD_LFLAG(RXBD_FIRST))) {
+ /* discard faulty buffer */
+ dev_kfree_skb(skb);
+ skb = NULL;
+ rx_queue->stats.rx_dropped++;
+
+ /* can continue normally */
+ }
+
/* order rx buffer descriptor reads */
rmb();
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index a5bf02ae4bc5..d43173917ce4 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -45,6 +45,7 @@
#include <soc/fsl/qe/ucc.h>
#include <soc/fsl/qe/ucc_fast.h>
#include <asm/machdep.h>
+#include <net/sch_generic.h>
#include "ucc_geth.h"
@@ -1551,11 +1552,8 @@ static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode)
static void ugeth_quiesce(struct ucc_geth_private *ugeth)
{
- /* Prevent any further xmits, plus detach the device. */
- netif_device_detach(ugeth->ndev);
-
- /* Wait for any current xmits to finish. */
- netif_tx_disable(ugeth->ndev);
+ /* Prevent any further xmits */
+ netif_tx_stop_all_queues(ugeth->ndev);
/* Disable the interrupt to avoid NAPI rescheduling. */
disable_irq(ugeth->ug_info->uf_info.irq);
@@ -1568,7 +1566,10 @@ static void ugeth_activate(struct ucc_geth_private *ugeth)
{
napi_enable(&ugeth->napi);
enable_irq(ugeth->ug_info->uf_info.irq);
- netif_device_attach(ugeth->ndev);
+
+ /* allow to xmit again */
+ netif_tx_wake_all_queues(ugeth->ndev);
+ __netdev_watchdog_up(ugeth->ndev);
}
/* Called every time the controller might need to be made
@@ -3901,6 +3902,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64);
dev->mtu = 1500;
+ dev->max_mtu = 1518;
ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
ugeth->phy_interface = phy_interface;
@@ -3946,12 +3948,12 @@ static int ucc_geth_remove(struct platform_device* ofdev)
struct device_node *np = ofdev->dev.of_node;
unregister_netdev(dev);
- free_netdev(dev);
ucc_geth_memclean(ugeth);
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(ugeth->ug_info->tbi_node);
of_node_put(ugeth->ug_info->phy_node);
+ free_netdev(dev);
return 0;
}
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index 5da19b440a6a..bf25e49d4fe3 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -580,7 +580,14 @@ struct ucc_geth_tx_global_pram {
u32 vtagtable[0x8]; /* 8 4-byte VLAN tags */
u32 tqptr; /* a base pointer to the Tx Queues Memory
Region */
- u8 res2[0x80 - 0x74];
+ u8 res2[0x78 - 0x74];
+ u64 snums_en;
+ u32 l2l3baseptr; /* top byte consists of a few other bit fields */
+
+ u16 mtu[8];
+ u8 res3[0xa8 - 0x94];
+ u32 wrrtablebase; /* top byte is reserved */
+ u8 res4[0xc0 - 0xac];
} __packed;
/* structure representing Extended Filtering Global Parameters in PRAM */
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index a69cd19a55ae..b8fc9bbeca2c 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -547,6 +547,11 @@ static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id)
return -1;
base = ioremap(link->resource[2]->start, resource_size(link->resource[2]));
+ if (!base) {
+ pcmcia_release_window(link, link->resource[2]);
+ return -1;
+ }
+
pcmcia_map_mem_page(link, link->resource[2], 0);
/*
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 024b08fafd3b..b7fe3e849872 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1677,8 +1677,10 @@ static int hns_nic_clear_all_rx_fetch(struct net_device *ndev)
for (j = 0; j < fetch_num; j++) {
/* alloc one skb and init */
skb = hns_assemble_skb(ndev);
- if (!skb)
+ if (!skb) {
+ ret = -ENOMEM;
goto out;
+ }
rd = &tx_ring_data(priv, skb->queue_mapping);
hns_nic_net_xmit_hw(ndev, skb, rd);
@@ -2297,8 +2299,10 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
priv->enet_ver = AE_VERSION_1;
else if (acpi_dev_found(hns_enet_acpi_match[1].id))
priv->enet_ver = AE_VERSION_2;
- else
- return -ENXIO;
+ else {
+ ret = -ENXIO;
+ goto out_read_prop_fail;
+ }
/* try to find port-idx-in-ae first */
ret = acpi_node_get_property_reference(dev->fwnode,
@@ -2314,7 +2318,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
priv->fwnode = args.fwnode;
} else {
dev_err(dev, "cannot read cfg data from OF or acpi\n");
- return -ENXIO;
+ ret = -ENXIO;
+ goto out_read_prop_fail;
}
ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 1fa0cd527ead..f453cebf758c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -419,6 +419,10 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data,
/* for mutl buffer*/
new_skb = skb_copy(skb, GFP_ATOMIC);
dev_kfree_skb_any(skb);
+ if (!new_skb) {
+ netdev_err(ndev, "skb alloc failed\n");
+ return;
+ }
skb = new_skb;
check_ok = 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index be9dc08ccf67..4f56ffcdfc93 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -102,7 +102,7 @@ struct hclgevf_mbx_arq_ring {
#define hclge_mbx_ring_ptr_move_crq(crq) \
(crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num)
#define hclge_mbx_tail_ptr_move_arq(arq) \
- (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE)
+ (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
#define hclge_mbx_head_ptr_move_arq(arq) \
- (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE)
+ (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 3eb8b85f6afb..19165a3548bf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -702,8 +702,6 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT)))
return false;
- skb_checksum_help(skb);
-
return true;
}
@@ -764,8 +762,7 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
/* the stack computes the IP header already,
* driver calculate l4 checksum when not TSO.
*/
- skb_checksum_help(skb);
- return 0;
+ return skb_checksum_help(skb);
}
l3.hdr = skb_inner_network_header(skb);
@@ -796,7 +793,7 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
break;
case IPPROTO_UDP:
if (hns3_tunnel_csum_bug(skb))
- break;
+ return skb_checksum_help(skb);
hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
hnae3_set_field(*type_cs_vlan_tso,
@@ -821,8 +818,7 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
/* the stack computes the IP header already,
* driver calculate l4 checksum when not TSO.
*/
- skb_checksum_help(skb);
- return 0;
+ return skb_checksum_help(skb);
}
return 0;
@@ -2639,7 +2635,6 @@ static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
{
- struct hnae3_ring_chain_node vector_ring_chain;
struct hnae3_handle *h = priv->ae_handle;
struct hns3_enet_tqp_vector *tqp_vector;
int ret = 0;
@@ -2669,6 +2664,8 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
}
for (i = 0; i < priv->vector_num; i++) {
+ struct hnae3_ring_chain_node vector_ring_chain;
+
tqp_vector = &priv->tqp_vector[i];
tqp_vector->rx_group.total_bytes = 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 0c34ea122358..1cb6f95f3a94 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -151,18 +151,21 @@ static void hns3_lb_check_skb_data(struct hns3_enet_ring *ring,
{
struct hns3_enet_tqp_vector *tqp_vector = ring->tqp_vector;
unsigned char *packet = skb->data;
+ u32 len = skb_headlen(skb);
u32 i;
- for (i = 0; i < skb->len; i++)
+ len = min_t(u32, len, HNS3_NIC_LB_TEST_PACKET_SIZE);
+
+ for (i = 0; i < len; i++)
if (packet[i] != (unsigned char)(i & 0xff))
break;
/* The packet is correctly received */
- if (i == skb->len)
+ if (i == HNS3_NIC_LB_TEST_PACKET_SIZE)
tqp_vector->rx_group.total_packets++;
else
print_hex_dump(KERN_ERR, "selftest:", DUMP_PREFIX_OFFSET, 16, 1,
- skb->data, skb->len, true);
+ skb->data, len, true);
dev_kfree_skb_any(skb);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index d575dd9a329d..16ab000454f9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -5182,12 +5182,19 @@ void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
{
+ struct hnae3_handle *handle = &vport->nic;
struct hclge_dev *hdev = vport->back;
int reset_try_times = 0;
int reset_status;
u16 queue_gid;
int ret;
+ if (queue_id >= handle->kinfo.num_tqps) {
+ dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
+ queue_id);
+ return;
+ }
+
queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 03491e8ebb73..d0fa344f0a84 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -235,6 +235,8 @@ void hclge_mac_start_phy(struct hclge_dev *hdev)
if (!phydev)
return;
+ phy_loopback(phydev, false);
+
phy_start(phydev);
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
index 4d09ea786b35..ee715bf785ad 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
@@ -398,7 +398,8 @@ static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq,
spin_unlock_bh(&cmdq->cmdq_lock);
- if (!wait_for_completion_timeout(&done, CMDQ_TIMEOUT)) {
+ if (!wait_for_completion_timeout(&done,
+ msecs_to_jiffies(CMDQ_TIMEOUT))) {
spin_lock_bh(&cmdq->cmdq_lock);
if (cmdq->errcode[curr_prod_idx] == &errcode)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 9deec13d98e9..4c91c8ceac5f 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -370,50 +370,6 @@ static int wait_for_db_state(struct hinic_hwdev *hwdev)
return -EFAULT;
}
-static int wait_for_io_stopped(struct hinic_hwdev *hwdev)
-{
- struct hinic_cmd_io_status cmd_io_status;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
- unsigned long end;
- u16 out_size;
- int err;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- cmd_io_status.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- end = jiffies + msecs_to_jiffies(IO_STATUS_TIMEOUT);
- do {
- err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
- HINIC_COMM_CMD_IO_STATUS_GET,
- &cmd_io_status, sizeof(cmd_io_status),
- &cmd_io_status, &out_size,
- HINIC_MGMT_MSG_SYNC);
- if ((err) || (out_size != sizeof(cmd_io_status))) {
- dev_err(&pdev->dev, "Failed to get IO status, ret = %d\n",
- err);
- return err;
- }
-
- if (cmd_io_status.status == IO_STOPPED) {
- dev_info(&pdev->dev, "IO stopped\n");
- return 0;
- }
-
- msleep(20);
- } while (time_before(jiffies, end));
-
- dev_err(&pdev->dev, "Wait for IO stopped - Timeout\n");
- return -ETIMEDOUT;
-}
-
/**
* clear_io_resource - set the IO resources as not active in the NIC
* @hwdev: the NIC HW device
@@ -433,11 +389,8 @@ static int clear_io_resources(struct hinic_hwdev *hwdev)
return -EINVAL;
}
- err = wait_for_io_stopped(hwdev);
- if (err) {
- dev_err(&pdev->dev, "IO has not stopped yet\n");
- return err;
- }
+ /* sleep 100ms to wait for firmware stopping I/O */
+ msleep(100);
cmd_clear_io_res.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
index 278dc13f3dae..0e40d647093c 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
@@ -52,7 +52,9 @@
#define MSG_NOT_RESP 0xFFFF
-#define MGMT_MSG_TIMEOUT 1000
+#define MGMT_MSG_TIMEOUT 5000
+
+#define SET_FUNC_PORT_MGMT_TIMEOUT 25000
#define mgmt_to_pfhwdev(pf_mgmt) \
container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt)
@@ -247,12 +249,13 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
u8 *buf_in, u16 in_size,
u8 *buf_out, u16 *out_size,
enum mgmt_direction_type direction,
- u16 resp_msg_id)
+ u16 resp_msg_id, u32 timeout)
{
struct hinic_hwif *hwif = pf_to_mgmt->hwif;
struct pci_dev *pdev = hwif->pdev;
struct hinic_recv_msg *recv_msg;
struct completion *recv_done;
+ unsigned long timeo;
u16 msg_id;
int err;
@@ -276,7 +279,9 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
goto unlock_sync_msg;
}
- if (!wait_for_completion_timeout(recv_done, MGMT_MSG_TIMEOUT)) {
+ timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT);
+
+ if (!wait_for_completion_timeout(recv_done, timeo)) {
dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id);
err = -ETIMEDOUT;
goto unlock_sync_msg;
@@ -350,6 +355,7 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
{
struct hinic_hwif *hwif = pf_to_mgmt->hwif;
struct pci_dev *pdev = hwif->pdev;
+ u32 timeout = 0;
if (sync != HINIC_MGMT_MSG_SYNC) {
dev_err(&pdev->dev, "Invalid MGMT msg type\n");
@@ -361,9 +367,12 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
return -EINVAL;
}
+ if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE)
+ timeout = SET_FUNC_PORT_MGMT_TIMEOUT;
+
return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
buf_out, out_size, MGMT_DIRECT_SEND,
- MSG_NOT_RESP);
+ MSG_NOT_RESP, timeout);
}
/**
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 2352046971a4..23de5fa3d885 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -475,7 +475,6 @@ static int hinic_close(struct net_device *netdev)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
unsigned int flags;
- int err;
down(&nic_dev->mgmt_lock);
@@ -489,20 +488,9 @@ static int hinic_close(struct net_device *netdev)
up(&nic_dev->mgmt_lock);
- err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
- if (err) {
- netif_err(nic_dev, drv, netdev,
- "Failed to set func port state\n");
- nic_dev->flags |= (flags & HINIC_INTF_UP);
- return err;
- }
+ hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
- err = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
- if (err) {
- netif_err(nic_dev, drv, netdev, "Failed to set port state\n");
- nic_dev->flags |= (flags & HINIC_INTF_UP);
- return err;
- }
+ hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
free_rxqs(nic_dev);
free_txqs(nic_dev);
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 40ad1e503255..75a1915d95aa 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1330,6 +1330,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
int offset = ibmveth_rxq_frame_offset(adapter);
int csum_good = ibmveth_rxq_csum_good(adapter);
int lrg_pkt = ibmveth_rxq_large_packet(adapter);
+ __sum16 iph_check = 0;
skb = ibmveth_rxq_get_buffer(adapter);
@@ -1366,16 +1367,26 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, netdev);
- if (csum_good) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- ibmveth_rx_csum_helper(skb, adapter);
+ /* PHYP without PLSO support places a -1 in the ip
+ * checksum for large send frames.
+ */
+ if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
+ struct iphdr *iph = (struct iphdr *)skb->data;
+
+ iph_check = iph->check;
}
- if (length > netdev->mtu + ETH_HLEN) {
+ if ((length > netdev->mtu + ETH_HLEN) ||
+ lrg_pkt || iph_check == 0xffff) {
ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
adapter->rx_large_packets++;
}
+ if (csum_good) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ ibmveth_rx_csum_helper(skb, adapter);
+ }
+
napi_gro_receive(napi, skb); /* send it up */
netdev->stats.rx_packets++;
@@ -1695,7 +1706,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
}
netdev->min_mtu = IBMVETH_MIN_MTU;
- netdev->max_mtu = ETH_MAX_MTU;
+ netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH;
memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 8a1916443235..9f72cd3b1d24 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -212,8 +212,13 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
if (!ltb->buff)
return;
+ /* VIOS automatically unmaps the long term buffer at remote
+ * end for the following resets:
+ * FAILOVER, MOBILITY, TIMEOUT.
+ */
if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
- adapter->reset_reason != VNIC_RESET_MOBILITY)
+ adapter->reset_reason != VNIC_RESET_MOBILITY &&
+ adapter->reset_reason != VNIC_RESET_TIMEOUT)
send_request_unmap(adapter, ltb->map_id);
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
}
@@ -792,12 +797,13 @@ static int ibmvnic_login(struct net_device *netdev)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
unsigned long timeout = msecs_to_jiffies(30000);
int retry_count = 0;
+ int retries = 10;
bool retry;
int rc;
do {
retry = false;
- if (retry_count > IBMVNIC_MAX_QUEUES) {
+ if (retry_count > retries) {
netdev_warn(netdev, "Login attempts exceeded\n");
return -1;
}
@@ -812,11 +818,23 @@ static int ibmvnic_login(struct net_device *netdev)
if (!wait_for_completion_timeout(&adapter->init_done,
timeout)) {
- netdev_warn(netdev, "Login timed out\n");
- return -1;
+ netdev_warn(netdev, "Login timed out, retrying...\n");
+ retry = true;
+ adapter->init_done_rc = 0;
+ retry_count++;
+ continue;
}
- if (adapter->init_done_rc == PARTIALSUCCESS) {
+ if (adapter->init_done_rc == ABORTED) {
+ netdev_warn(netdev, "Login aborted, retrying...\n");
+ retry = true;
+ adapter->init_done_rc = 0;
+ retry_count++;
+ /* FW or device may be busy, so
+ * wait a bit before retrying login
+ */
+ msleep(500);
+ } else if (adapter->init_done_rc == PARTIALSUCCESS) {
retry_count++;
release_sub_crqs(adapter, 1);
@@ -1074,19 +1092,13 @@ static int __ibmvnic_open(struct net_device *netdev)
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
if (rc) {
- for (i = 0; i < adapter->req_rx_queues; i++)
- napi_disable(&adapter->napi[i]);
+ ibmvnic_napi_disable(adapter);
release_resources(adapter);
return rc;
}
netif_tx_start_all_queues(netdev);
- if (prev_state == VNIC_CLOSED) {
- for (i = 0; i < adapter->req_rx_queues; i++)
- napi_schedule(&adapter->napi[i]);
- }
-
adapter->state = VNIC_OPEN;
return rc;
}
@@ -1244,10 +1256,8 @@ static int __ibmvnic_close(struct net_device *netdev)
adapter->state = VNIC_CLOSING;
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
- if (rc)
- return rc;
adapter->state = VNIC_CLOSED;
- return 0;
+ return rc;
}
static int ibmvnic_close(struct net_device *netdev)
@@ -1510,6 +1520,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
skb_copy_from_linear_data(skb, dst, skb->len);
}
+ /* post changes to long_term_buff *dst before VIOS accessing it */
+ dma_wmb();
+
tx_pool->consumer_index =
(tx_pool->consumer_index + 1) % tx_pool->num_buffers;
@@ -1742,7 +1755,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
u64 old_num_rx_queues, old_num_tx_queues;
u64 old_num_rx_slots, old_num_tx_slots;
struct net_device *netdev = adapter->netdev;
- int i, rc;
+ int rc;
netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
rwi->reset_reason);
@@ -1860,13 +1873,11 @@ static int do_reset(struct ibmvnic_adapter *adapter,
/* refresh device's multicast list */
ibmvnic_set_multi(netdev);
- /* kick napi */
- for (i = 0; i < adapter->req_rx_queues; i++)
- napi_schedule(&adapter->napi[i]);
-
if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
- adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
+ adapter->reset_reason != VNIC_RESET_CHANGE_PARAM) {
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
+ call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev);
+ }
netif_carrier_on(netdev);
@@ -2143,6 +2154,12 @@ restart_poll:
if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
break;
+ /* The queue entry at the current index is peeked at above
+ * to determine that there is a valid descriptor awaiting
+ * processing. We want to be sure that the current slot
+ * holds a valid descriptor before reading its contents.
+ */
+ dma_rmb();
next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
rx_buff =
(struct ibmvnic_rx_buff *)be64_to_cpu(next->
@@ -2167,6 +2184,8 @@ restart_poll:
offset = be16_to_cpu(next->rx_comp.off_frame_data);
flags = next->rx_comp.flags;
skb = rx_buff->skb;
+ /* load long_term_buff before copying to skb */
+ dma_rmb();
skb_copy_to_linear_data(skb, rx_buff->data + offset,
length);
@@ -2547,6 +2566,9 @@ static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
{
int i, rc;
+ if (!adapter->tx_scrq || !adapter->rx_scrq)
+ return -EINVAL;
+
for (i = 0; i < adapter->req_tx_queues; i++) {
netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
@@ -2731,10 +2753,12 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
if (adapter->resetting &&
adapter->reset_reason == VNIC_RESET_MOBILITY) {
- struct irq_desc *desc = irq_to_desc(scrq->irq);
- struct irq_chip *chip = irq_desc_get_chip(desc);
+ u64 val = (0xff000000) | scrq->hw_irq;
- chip->irq_eoi(&desc->irq_data);
+ rc = plpar_hcall_norets(H_EOI, val);
+ if (rc)
+ dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
+ val, rc);
}
rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
@@ -2760,13 +2784,18 @@ restart_loop:
unsigned int pool = scrq->pool_index;
int num_entries = 0;
+ /* The queue entry at the current index is peeked at above
+ * to determine that there is a valid descriptor awaiting
+ * processing. We want to be sure that the current slot
+ * holds a valid descriptor before reading its contents.
+ */
+ dma_rmb();
+
next = ibmvnic_next_scrq(adapter, scrq);
for (i = 0; i < next->tx_comp.num_comps; i++) {
- if (next->tx_comp.rcs[i]) {
+ if (next->tx_comp.rcs[i])
dev_err(dev, "tx error %x\n",
next->tx_comp.rcs[i]);
- continue;
- }
index = be32_to_cpu(next->tx_comp.correlators[i]);
if (index & IBMVNIC_TSO_POOL_MASK) {
tx_pool = &adapter->tso_pool[pool];
@@ -2911,7 +2940,7 @@ req_rx_irq_failed:
req_tx_irq_failed:
for (j = 0; j < i; j++) {
free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
- irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+ irq_dispose_mapping(adapter->tx_scrq[j]->irq);
}
release_sub_crqs(adapter, 1);
return rc;
@@ -3156,6 +3185,11 @@ static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
}
spin_unlock_irqrestore(&scrq->lock, flags);
+ /* Ensure that the entire buffer descriptor has been
+ * loaded before reading its contents
+ */
+ dma_rmb();
+
return entry;
}
@@ -4293,12 +4327,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
break;
}
- dev_info(dev, "Partner protocol version is %d\n",
- crq->version_exchange_rsp.version);
- if (be16_to_cpu(crq->version_exchange_rsp.version) <
- ibmvnic_version)
- ibmvnic_version =
+ ibmvnic_version =
be16_to_cpu(crq->version_exchange_rsp.version);
+ dev_info(dev, "Partner protocol version is %d\n",
+ ibmvnic_version);
send_cap_queries(adapter);
break;
case QUERY_CAPABILITY_RSP:
@@ -4400,6 +4432,12 @@ static void ibmvnic_tasklet(void *data)
while (!done) {
/* Pull all the valid messages off the CRQ */
while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
+ /* This barrier makes sure ibmvnic_next_crq()'s
+ * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
+ * before ibmvnic_handle_crq()'s
+ * switch(gen_crq->first) and switch(gen_crq->cmd).
+ */
+ dma_rmb();
ibmvnic_handle_crq(crq, adapter);
crq->generic.first = 0;
}
@@ -4446,6 +4484,9 @@ static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
/* Clean out the queue */
+ if (!crq->msgs)
+ return -EINVAL;
+
memset(crq->msgs, 0, PAGE_SIZE);
crq->cur = 0;
crq->active = false;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 2110d5f2da19..195108858f38 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -542,8 +542,13 @@ void e1000_reinit_locked(struct e1000_adapter *adapter)
WARN_ON(in_interrupt());
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
msleep(1);
- e1000_down(adapter);
- e1000_up(adapter);
+
+ /* only run the task if not already down */
+ if (!test_bit(__E1000_DOWN, &adapter->flags)) {
+ e1000_down(adapter);
+ e1000_up(adapter);
+ }
+
clear_bit(__E1000_RESETTING, &adapter->flags);
}
@@ -1433,10 +1438,15 @@ int e1000_close(struct net_device *netdev)
struct e1000_hw *hw = &adapter->hw;
int count = E1000_CHECK_RESET_COUNT;
- while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--)
usleep_range(10000, 20000);
- WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
+ WARN_ON(count < 0);
+
+ /* signal that we're down so that the reset task will no longer run */
+ set_bit(__E1000_DOWN, &adapter->flags);
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+
e1000_down(adapter);
e1000_power_down_phy(adapter);
e1000_free_irq(adapter);
@@ -3144,8 +3154,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
if (skb->data_len && hdr_len == len) {
switch (hw->mac_type) {
+ case e1000_82544: {
unsigned int pull_size;
- case e1000_82544:
+
/* Make sure we have room to chop off 4 bytes,
* and that the end alignment will work out to
* this hardware's requirements
@@ -3166,6 +3177,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
}
len = skb_headlen(skb);
break;
+ }
default:
/* do nothing */
break;
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index b9309302c29e..16653e94009e 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -899,6 +899,8 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
} else {
data &= ~IGP02E1000_PM_D0_LPLU;
ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
+ if (ret_val)
+ return ret_val;
/* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
* important. During driver activity we should enable
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index c760dc72c520..c5a119daa7f3 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -574,7 +574,6 @@ static inline u32 __er32(struct e1000_hw *hw, unsigned long reg)
#define er32(reg) __er32(hw, E1000_##reg)
-s32 __ew32_prepare(struct e1000_hw *hw);
void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val);
#define ew32(reg, val) __ew32(hw, E1000_##reg, (val))
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index a7b5a47ab83d..6bbe7afdf30c 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -119,14 +119,12 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
* has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
* and try again a number of times.
**/
-s32 __ew32_prepare(struct e1000_hw *hw)
+static void __ew32_prepare(struct e1000_hw *hw)
{
s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
udelay(50);
-
- return i;
}
void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
@@ -607,11 +605,11 @@ static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
{
struct e1000_adapter *adapter = rx_ring->adapter;
struct e1000_hw *hw = &adapter->hw;
- s32 ret_val = __ew32_prepare(hw);
+ __ew32_prepare(hw);
writel(i, rx_ring->tail);
- if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
+ if (unlikely(i != readl(rx_ring->tail))) {
u32 rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
@@ -624,11 +622,11 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
{
struct e1000_adapter *adapter = tx_ring->adapter;
struct e1000_hw *hw = &adapter->hw;
- s32 ret_val = __ew32_prepare(hw);
+ __ew32_prepare(hw);
writel(i, tx_ring->tail);
- if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
+ if (unlikely(i != readl(tx_ring->tail))) {
u32 tctl = er32(TCTL);
ew32(TCTL, tctl & ~E1000_TCTL_EN);
@@ -5251,6 +5249,10 @@ static void e1000_watchdog_task(struct work_struct *work)
/* oops */
break;
}
+ if (hw->mac.type == e1000_pch_spt) {
+ netdev->features &= ~NETIF_F_TSO;
+ netdev->features &= ~NETIF_F_TSO6;
+ }
}
/* enable transmits in the hardware, need to do this
@@ -5920,15 +5922,19 @@ static void e1000_reset_task(struct work_struct *work)
struct e1000_adapter *adapter;
adapter = container_of(work, struct e1000_adapter, reset_task);
+ rtnl_lock();
/* don't run the task if already down */
- if (test_bit(__E1000_DOWN, &adapter->state))
+ if (test_bit(__E1000_DOWN, &adapter->state)) {
+ rtnl_unlock();
return;
+ }
if (!(adapter->flags & FLAG_RESTART_NOW)) {
e1000e_dump(adapter);
e_err("Reset adapter unexpectedly\n");
}
e1000e_reinit_locked(adapter);
+ rtnl_unlock();
}
/**
@@ -6306,11 +6312,17 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 ctrl, ctrl_ext, rctl, status;
- /* Runtime suspend should only enable wakeup for link changes */
- u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
+ u32 ctrl, ctrl_ext, rctl, status, wufc;
int retval = 0;
+ /* Runtime suspend should only enable wakeup for link changes */
+ if (runtime)
+ wufc = E1000_WUFC_LNKC;
+ else if (device_may_wakeup(&pdev->dev))
+ wufc = adapter->wol;
+ else
+ wufc = 0;
+
status = er32(STATUS);
if (status & E1000_STATUS_LU)
wufc &= ~E1000_WUFC_LNKC;
@@ -6367,7 +6379,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
if (adapter->hw.phy.type == e1000_phy_igp_3) {
e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
} else if (hw->mac.type >= e1000_pch_lpt) {
- if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
+ if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
/* ULP does not support wake from unicast, multicast
* or broadcast.
*/
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index f84e2c2d02c0..3c921dfc2056 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -127,6 +127,7 @@ enum i40e_state_t {
__I40E_RESET_INTR_RECEIVED,
__I40E_REINIT_REQUESTED,
__I40E_PF_RESET_REQUESTED,
+ __I40E_PF_RESET_AND_REBUILD_REQUESTED,
__I40E_CORE_RESET_REQUESTED,
__I40E_GLOBAL_RESET_REQUESTED,
__I40E_EMP_RESET_REQUESTED,
@@ -147,11 +148,15 @@ enum i40e_state_t {
__I40E_CLIENT_SERVICE_REQUESTED,
__I40E_CLIENT_L2_CHANGE,
__I40E_CLIENT_RESET,
+ __I40E_VF_RESETS_DISABLED, /* disable resets during i40e_remove */
+ __I40E_VFS_RELEASING,
/* This must be last as it determines the size of the BITMAP */
__I40E_STATE_SIZE__,
};
#define I40E_PF_RESET_FLAG BIT_ULL(__I40E_PF_RESET_REQUESTED)
+#define I40E_PF_RESET_AND_REBUILD_FLAG \
+ BIT_ULL(__I40E_PF_RESET_AND_REBUILD_REQUESTED)
/* VSI state flags */
enum i40e_vsi_state_t {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 80e3eec6134e..a5e5e7e14e6c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -1206,7 +1206,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
#define I40E_AQC_SET_VSI_DEFAULT 0x08
#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
-#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000
+#define I40E_AQC_SET_VSI_PROMISC_RX_ONLY 0x8000
__le16 seid;
#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
__le16 vlan_tag;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 5f3b8b9ff511..c1832a848714 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -377,6 +377,7 @@ void i40e_client_subtask(struct i40e_pf *pf)
clear_bit(__I40E_CLIENT_INSTANCE_OPENED,
&cdev->state);
i40e_client_del_instance(pf);
+ return;
}
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index eb0ae6ab01e2..e75b4c4872c0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1971,6 +1971,21 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
}
/**
+ * i40e_is_aq_api_ver_ge
+ * @aq: pointer to AdminQ info containing HW API version to compare
+ * @maj: API major value
+ * @min: API minor value
+ *
+ * Assert whether current HW API version is greater/equal than provided.
+ **/
+static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
+ u16 min)
+{
+ return (aq->api_maj_ver > maj ||
+ (aq->api_maj_ver == maj && aq->api_min_ver >= min));
+}
+
+/**
* i40e_aq_add_vsi
* @hw: pointer to the hw struct
* @vsi_ctx: pointer to a vsi context struct
@@ -2095,18 +2110,16 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
if (set) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
- if (rx_only_promisc &&
- (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
- (hw->aq.api_maj_ver > 1)))
- flags |= I40E_AQC_SET_VSI_PROMISC_TX;
+ if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
}
cmd->promiscuous_flags = cpu_to_le16(flags);
cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
- if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) ||
- (hw->aq.api_maj_ver > 1))
- cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX);
+ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ cmd->valid_flags |=
+ cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
cmd->seid = cpu_to_le16(seid);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -2203,11 +2216,17 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
- if (enable)
+ if (enable) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
+ }
cmd->promiscuous_flags = cpu_to_le16(flags);
cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ cmd->valid_flags |=
+ cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
cmd->seid = cpu_to_le16(seid);
cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index a6b0f605a7d8..9148d93c5c63 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -796,6 +796,7 @@ static int i40e_get_link_ksettings(struct net_device *netdev,
/* Set flow control settings */
ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
+ ethtool_link_ksettings_add_link_mode(ks, supported, Asym_Pause);
switch (hw->fc.requested_mode) {
case I40E_FC_FULL:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 23b31b2ff5cc..fa0e7582159f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -42,6 +42,8 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf);
static void i40e_determine_queue_usage(struct i40e_pf *pf);
static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
+static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
+ bool lock_acquired);
static int i40e_reset(struct i40e_pf *pf);
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
static void i40e_fdir_sb_setup(struct i40e_pf *pf);
@@ -446,11 +448,15 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
i40e_get_netdev_stats_struct_tx(ring, stats);
if (i40e_enabled_xdp_vsi(vsi)) {
- ring++;
+ ring = READ_ONCE(vsi->xdp_rings[i]);
+ if (!ring)
+ continue;
i40e_get_netdev_stats_struct_tx(ring, stats);
}
- ring++;
+ ring = READ_ONCE(vsi->rx_rings[i]);
+ if (!ring)
+ continue;
do {
start = u64_stats_fetch_begin_irq(&ring->syncp);
packets = ring->stats.packets;
@@ -793,6 +799,8 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
for (q = 0; q < vsi->num_queue_pairs; q++) {
/* locate Tx ring */
p = READ_ONCE(vsi->tx_rings[q]);
+ if (!p)
+ continue;
do {
start = u64_stats_fetch_begin_irq(&p->syncp);
@@ -806,8 +814,11 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
tx_linearize += p->tx_stats.tx_linearize;
tx_force_wb += p->tx_stats.tx_force_wb;
- /* Rx queue is part of the same block as Tx queue */
- p = &p[1];
+ /* locate Rx ring */
+ p = READ_ONCE(vsi->rx_rings[q]);
+ if (!p)
+ continue;
+
do {
start = u64_stats_fetch_begin_irq(&p->syncp);
packets = p->stats.packets;
@@ -2566,7 +2577,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
return;
if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
return;
- if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) {
+ if (test_bit(__I40E_VF_DISABLE, pf->state)) {
set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
return;
}
@@ -2584,7 +2595,6 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
}
}
}
- clear_bit(__I40E_VF_DISABLE, pf->state);
}
/**
@@ -3886,8 +3896,16 @@ static irqreturn_t i40e_intr(int irq, void *data)
}
if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
- ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
- set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
+ /* disable any further VFLR event notifications */
+ if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) {
+ u32 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
+
+ reg &= ~I40E_PFINT_ICR0_VFLR_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+ } else {
+ ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
+ set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
+ }
}
if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
@@ -6988,6 +7006,8 @@ int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
if (filter->flags >= ARRAY_SIZE(flag_table))
return I40E_ERR_CONFIG;
+ memset(&cld_filter, 0, sizeof(cld_filter));
+
/* copy element needed to add cloud filter from filter */
i40e_set_cld_element(filter, &cld_filter);
@@ -7051,10 +7071,13 @@ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
return -EOPNOTSUPP;
/* adding filter using src_port/src_ip is not supported at this stage */
- if (filter->src_port || filter->src_ipv4 ||
+ if (filter->src_port ||
+ (filter->src_ipv4 && filter->n_proto != ETH_P_IPV6) ||
!ipv6_addr_any(&filter->ip.v6.src_ip6))
return -EOPNOTSUPP;
+ memset(&cld_filter, 0, sizeof(cld_filter));
+
/* copy element needed to add cloud filter from filter */
i40e_set_cld_element(filter, &cld_filter.element);
@@ -7078,7 +7101,7 @@ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
}
- } else if (filter->dst_ipv4 ||
+ } else if ((filter->dst_ipv4 && filter->n_proto != ETH_P_IPV6) ||
!ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
cld_filter.element.flags =
cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
@@ -7912,6 +7935,14 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
dev_dbg(&pf->pdev->dev, "PFR requested\n");
i40e_handle_reset_warning(pf, lock_acquired);
+ } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
+ /* Request a PF Reset
+ *
+ * Resets PF and reinitializes PFs VSI.
+ */
+ i40e_prep_for_reset(pf, lock_acquired);
+ i40e_reset_and_rebuild(pf, true, lock_acquired);
+
} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
int v;
@@ -9372,7 +9403,6 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
{
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
- u8 set_fc_aq_fail = 0;
i40e_status ret;
u32 val;
int v;
@@ -9453,13 +9483,6 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
- /* make sure our flow control settings are restored */
- ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
- if (ret)
- dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
- i40e_stat_str(&pf->hw, ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
-
/* Rebuild the VSIs and VEBs that existed before reset.
* They are still in our local switch element arrays, so only
* need to rebuild the switch model in the HW.
@@ -10196,10 +10219,10 @@ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
if (vsi->tx_rings && vsi->tx_rings[0]) {
for (i = 0; i < vsi->alloc_queue_pairs; i++) {
kfree_rcu(vsi->tx_rings[i], rcu);
- vsi->tx_rings[i] = NULL;
- vsi->rx_rings[i] = NULL;
+ WRITE_ONCE(vsi->tx_rings[i], NULL);
+ WRITE_ONCE(vsi->rx_rings[i], NULL);
if (vsi->xdp_rings)
- vsi->xdp_rings[i] = NULL;
+ WRITE_ONCE(vsi->xdp_rings[i], NULL);
}
}
}
@@ -10233,7 +10256,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
ring->itr_setting = pf->tx_itr_default;
- vsi->tx_rings[i] = ring++;
+ WRITE_ONCE(vsi->tx_rings[i], ring++);
if (!i40e_enabled_xdp_vsi(vsi))
goto setup_rx;
@@ -10251,7 +10274,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
set_ring_xdp(ring);
ring->itr_setting = pf->tx_itr_default;
- vsi->xdp_rings[i] = ring++;
+ WRITE_ONCE(vsi->xdp_rings[i], ring++);
setup_rx:
ring->queue_index = i;
@@ -10264,7 +10287,7 @@ setup_rx:
ring->size = 0;
ring->dcb_tc = 0;
ring->itr_setting = pf->rx_itr_default;
- vsi->rx_rings[i] = ring;
+ WRITE_ONCE(vsi->rx_rings[i], ring);
}
return 0;
@@ -11080,6 +11103,8 @@ i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
struct i40e_aqc_configure_partition_bw_data bw_data;
i40e_status status;
+ memset(&bw_data, 0, sizeof(bw_data));
+
/* Set the valid bit for this PF */
bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
@@ -11186,6 +11211,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
{
int err = 0;
int size;
+ u16 pow;
/* Set default capability flags */
pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
@@ -11204,6 +11230,11 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->rss_table_size = pf->hw.func_caps.rss_table_size;
pf->rss_size_max = min_t(int, pf->rss_size_max,
pf->hw.func_caps.num_tx_qp);
+
+ /* find the next higher power-of-2 of num cpus */
+ pow = roundup_pow_of_two(num_online_cpus());
+ pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
+
if (pf->hw.func_caps.rss) {
pf->flags |= I40E_FLAG_RSS_ENABLED;
pf->alloc_rss_size = min_t(int, pf->rss_size_max,
@@ -13596,7 +13627,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int err;
u32 val;
u32 i;
- u8 set_fc_aq_fail;
err = pci_enable_device_mem(pdev);
if (err)
@@ -13876,24 +13906,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
- /* Make sure flow control is set according to current settings */
- err = i40e_set_fc(hw, &set_fc_aq_fail, true);
- if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
- dev_dbg(&pf->pdev->dev,
- "Set fc with err %s aq_err %s on get_phy_cap\n",
- i40e_stat_str(hw, err),
- i40e_aq_str(hw, hw->aq.asq_last_status));
- if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
- dev_dbg(&pf->pdev->dev,
- "Set fc with err %s aq_err %s on set_phy_config\n",
- i40e_stat_str(hw, err),
- i40e_aq_str(hw, hw->aq.asq_last_status));
- if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
- dev_dbg(&pf->pdev->dev,
- "Set fc with err %s aq_err %s on get_link_info\n",
- i40e_stat_str(hw, err),
- i40e_aq_str(hw, hw->aq.asq_last_status));
-
/* if FDIR VSI was set up, start it now */
for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
@@ -13950,6 +13962,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) {
dev_info(&pdev->dev,
"setup of misc vector failed: %d\n", err);
+ i40e_cloud_filter_exit(pf);
+ i40e_fdir_teardown(pf);
goto err_vsis;
}
}
@@ -14143,6 +14157,14 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
+ while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+ usleep_range(1000, 2000);
+
+ if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
+ set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
+ i40e_free_vfs(pf);
+ pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
+ }
/* no more scheduling of any task */
set_bit(__I40E_SUSPENDED, pf->state);
set_bit(__I40E_DOWN, pf->state);
@@ -14156,11 +14178,6 @@ static void i40e_remove(struct pci_dev *pdev)
*/
i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
- if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
- i40e_free_vfs(pf);
- pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
- }
-
i40e_fdir_teardown(pf);
/* If there is a switch structure or any orphans, remove them.
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index b5042d1a63c0..9ccbcd88bf1e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3070,13 +3070,16 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
l4_proto = ip.v4->protocol;
} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
+ int ret;
+
tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
exthdr = ip.hdr + sizeof(*ip.v6);
l4_proto = ip.v6->nexthdr;
- if (l4.hdr != exthdr)
- ipv6_skip_exthdr(skb, exthdr - skb->data,
- &l4_proto, &frag_off);
+ ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto, &frag_off);
+ if (ret < 0)
+ return -1;
}
/* define outer transport */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 6a677fd540d6..3c1533c627fd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -137,6 +137,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
**/
static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
{
+ struct i40e_pf *pf = vf->pf;
int i;
i40e_vc_notify_vf_reset(vf);
@@ -147,6 +148,11 @@ static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
* ensure a reset.
*/
for (i = 0; i < 20; i++) {
+ /* If PF is in VFs releasing state reset VF is impossible,
+ * so leave it.
+ */
+ if (test_bit(__I40E_VFS_RELEASING, pf->state))
+ return;
if (i40e_reset_vf(vf, false))
return;
usleep_range(10000, 20000);
@@ -181,7 +187,7 @@ static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
* check for the valid queue id
**/
static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
- u8 qid)
+ u16 qid)
{
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
@@ -196,7 +202,7 @@ static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
*
* check for the valid vector id
**/
-static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
+static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
{
struct i40e_pf *pf = vf->pf;
@@ -441,14 +447,28 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
u32 v_idx, i, reg_idx, reg;
u32 next_q_idx, next_q_type;
u32 msix_vf, size;
+ int ret = 0;
+
+ msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
+
+ if (qvlist_info->num_vectors > msix_vf) {
+ dev_warn(&pf->pdev->dev,
+ "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n",
+ qvlist_info->num_vectors,
+ msix_vf);
+ ret = -EINVAL;
+ goto err_out;
+ }
size = sizeof(struct virtchnl_iwarp_qvlist_info) +
(sizeof(struct virtchnl_iwarp_qv_info) *
(qvlist_info->num_vectors - 1));
+ kfree(vf->qvlist_info);
vf->qvlist_info = kzalloc(size, GFP_KERNEL);
- if (!vf->qvlist_info)
- return -ENOMEM;
-
+ if (!vf->qvlist_info) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
@@ -459,8 +479,10 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
v_idx = qv_info->v_idx;
/* Validate vector id belongs to this vf */
- if (!i40e_vc_isvalid_vector_id(vf, v_idx))
- goto err;
+ if (!i40e_vc_isvalid_vector_id(vf, v_idx)) {
+ ret = -EINVAL;
+ goto err_free;
+ }
vf->qvlist_info->qv_info[i] = *qv_info;
@@ -502,10 +524,11 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
}
return 0;
-err:
+err_free:
kfree(vf->qvlist_info);
vf->qvlist_info = NULL;
- return -EINVAL;
+err_out:
+ return ret;
}
/**
@@ -1187,7 +1210,8 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
* @vf: pointer to the VF structure
* @flr: VFLR was issued or not
*
- * Returns true if the VF is reset, false otherwise.
+ * Returns true if the VF is in reset, resets successfully, or resets
+ * are disabled and false otherwise.
**/
bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
{
@@ -1197,11 +1221,14 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
u32 reg;
int i;
+ if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
+ return true;
+
/* If the VFs have been disabled, this means something else is
* resetting the VF, so we shouldn't continue.
*/
if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
- return false;
+ return true;
i40e_trigger_vf_reset(vf, flr);
@@ -1360,11 +1387,22 @@ void i40e_free_vfs(struct i40e_pf *pf)
if (!pf->vf)
return;
+
+ set_bit(__I40E_VFS_RELEASING, pf->state);
while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
usleep_range(1000, 2000);
i40e_notify_client_of_vf_enable(pf, 0);
+ /* Disable IOV before freeing resources. This lets any VF drivers
+ * running in the host get themselves cleaned up before we yank
+ * the carpet out from underneath their feet.
+ */
+ if (!pci_vfs_assigned(pf->pdev))
+ pci_disable_sriov(pf->pdev);
+ else
+ dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
+
/* Amortize wait time by stopping all VFs at the same time */
for (i = 0; i < pf->num_alloc_vfs; i++) {
if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
@@ -1380,15 +1418,6 @@ void i40e_free_vfs(struct i40e_pf *pf)
i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
}
- /* Disable IOV before freeing resources. This lets any VF drivers
- * running in the host get themselves cleaned up before we yank
- * the carpet out from underneath their feet.
- */
- if (!pci_vfs_assigned(pf->pdev))
- pci_disable_sriov(pf->pdev);
- else
- dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
-
/* free up VF resources */
tmp = pf->num_alloc_vfs;
pf->num_alloc_vfs = 0;
@@ -1417,6 +1446,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
}
}
clear_bit(__I40E_VF_DISABLE, pf->state);
+ clear_bit(__I40E_VFS_RELEASING, pf->state);
}
#ifdef CONFIG_PCI_IOV
@@ -1546,7 +1576,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (num_vfs) {
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
- i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
+ i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
}
return i40e_pci_sriov_enable(pdev, num_vfs);
}
@@ -1554,7 +1584,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!pci_vfs_assigned(pf->pdev)) {
i40e_free_vfs(pf);
pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
- i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
+ i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
} else {
dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
return -EINVAL;
@@ -3335,7 +3365,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
- goto err;
+ goto err_out;
}
if (!vf->adq_enabled) {
@@ -3343,15 +3373,15 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
"VF %d: ADq is not enabled, can't apply cloud filter\n",
vf->vf_id);
aq_ret = I40E_ERR_PARAM;
- goto err;
+ goto err_out;
}
if (i40e_validate_cloud_filter(vf, vcf)) {
dev_info(&pf->pdev->dev,
"VF %d: Invalid input/s, can't apply cloud filter\n",
vf->vf_id);
- aq_ret = I40E_ERR_PARAM;
- goto err;
+ aq_ret = I40E_ERR_PARAM;
+ goto err_out;
}
cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
@@ -3412,13 +3442,17 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
"VF %d: Failed to add cloud filter, err %s aq_err %s\n",
vf->vf_id, i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
- goto err;
+ goto err_free;
}
INIT_HLIST_NODE(&cfilter->cloud_node);
hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
+ /* release the pointer passing it to the collection */
+ cfilter = NULL;
vf->num_cloud_filters++;
-err:
+err_free:
+ kfree(cfilter);
+err_out:
return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
aq_ret);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index 0f2cdb06e6ef..7960f0e4d872 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -30,8 +30,8 @@ enum ice_ctl_q {
ICE_CTL_Q_ADMIN,
};
-/* Control Queue timeout settings - max delay 250ms */
-#define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */
+/* Control Queue timeout settings - max delay 1s */
+#define ICE_CTL_Q_SQ_CMD_TIMEOUT 10000 /* Count 10000 times */
#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */
struct ice_ctl_q_ring {
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 50954e444985..2e17625e6c35 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -143,7 +143,8 @@ static int igb_get_link_ksettings(struct net_device *netdev,
u32 speed;
u32 supported, advertising;
- status = rd32(E1000_STATUS);
+ status = pm_runtime_suspended(&adapter->pdev->dev) ?
+ 0 : rd32(E1000_STATUS);
if (hw->phy.media_type == e1000_media_type_copper) {
supported = (SUPPORTED_10baseT_Half |
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 36db874f3c92..d85eb80d8249 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -6226,9 +6226,18 @@ static void igb_reset_task(struct work_struct *work)
struct igb_adapter *adapter;
adapter = container_of(work, struct igb_adapter, reset_task);
+ rtnl_lock();
+ /* If we're already down or resetting, just bail */
+ if (test_bit(__IGB_DOWN, &adapter->state) ||
+ test_bit(__IGB_RESETTING, &adapter->state)) {
+ rtnl_unlock();
+ return;
+ }
+
igb_dump(adapter);
netdev_err(adapter->netdev, "Reset adapter\n");
igb_reinit_locked(adapter);
+ rtnl_unlock();
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 0bd1294ba517..39c5e6fdb72c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -2243,7 +2243,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
}
/* Configure pause time (2 TCs per register) */
- reg = hw->fc.pause_time * 0x00010001;
+ reg = hw->fc.pause_time * 0x00010001U;
for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index ccd852ad62a4..d50c5b55da18 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -192,7 +192,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
}
/* alloc the udl from per cpu ddp pool */
- ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_KERNEL, &ddp->udp);
+ ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
if (!ddp->udl) {
e_err(drv, "failed allocated ddp context\n");
goto out_noddp_unmap;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index d361f570ca37..952630cb882c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -923,7 +923,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
ring->queue_index = txr_idx;
/* assign ring to adapter */
- adapter->tx_ring[txr_idx] = ring;
+ WRITE_ONCE(adapter->tx_ring[txr_idx], ring);
/* update count and index */
txr_count--;
@@ -950,7 +950,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
set_ring_xdp(ring);
/* assign ring to adapter */
- adapter->xdp_ring[xdp_idx] = ring;
+ WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring);
/* update count and index */
xdp_count--;
@@ -993,7 +993,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
ring->queue_index = rxr_idx;
/* assign ring to adapter */
- adapter->rx_ring[rxr_idx] = ring;
+ WRITE_ONCE(adapter->rx_ring[rxr_idx], ring);
/* update count and index */
rxr_count--;
@@ -1022,13 +1022,13 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
ixgbe_for_each_ring(ring, q_vector->tx) {
if (ring_is_xdp(ring))
- adapter->xdp_ring[ring->queue_index] = NULL;
+ WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL);
else
- adapter->tx_ring[ring->queue_index] = NULL;
+ WRITE_ONCE(adapter->tx_ring[ring->queue_index], NULL);
}
ixgbe_for_each_ring(ring, q_vector->rx)
- adapter->rx_ring[ring->queue_index] = NULL;
+ WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL);
adapter->q_vector[v_idx] = NULL;
napi_hash_del(&q_vector->napi);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 8177276500f5..8fcd3ffb43e0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1943,7 +1943,8 @@ static inline bool ixgbe_page_is_reserved(struct page *page)
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
-static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
+static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer,
+ int rx_buffer_pgcnt)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
@@ -1954,7 +1955,7 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
+ if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
return false;
#else
/* The last offset is a bit aggressive in that we assume the
@@ -2019,11 +2020,18 @@ static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff **skb,
- const unsigned int size)
+ const unsigned int size,
+ int *rx_buffer_pgcnt)
{
struct ixgbe_rx_buffer *rx_buffer;
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ *rx_buffer_pgcnt =
+#if (PAGE_SIZE < 8192)
+ page_count(rx_buffer->page);
+#else
+ 0;
+#endif
prefetchw(rx_buffer->page);
*skb = rx_buffer->skb;
@@ -2053,9 +2061,10 @@ skip_sync:
static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *rx_buffer,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ int rx_buffer_pgcnt)
{
- if (ixgbe_can_reuse_rx_page(rx_buffer)) {
+ if (ixgbe_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
/* hand second half of page back to the ring */
ixgbe_reuse_rx_page(rx_ring, rx_buffer);
} else {
@@ -2258,7 +2267,8 @@ static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring,
rx_buffer->page_offset ^= truesize;
#else
unsigned int truesize = ring_uses_build_skb(rx_ring) ?
- SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
+ SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
SKB_DATA_ALIGN(size);
rx_buffer->page_offset += truesize;
@@ -2298,6 +2308,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *rx_buffer;
struct sk_buff *skb;
+ int rx_buffer_pgcnt;
unsigned int size;
/* return some buffers to hardware, one at a time is too slow */
@@ -2317,7 +2328,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
*/
dma_rmb();
- rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
+ rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size, &rx_buffer_pgcnt);
/* retrieve a buffer from the ring */
if (!skb) {
@@ -2359,7 +2370,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
break;
}
- ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
+ ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt);
cleaned_count++;
/* place incomplete frames back on ring for completion */
@@ -7004,7 +7015,10 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
}
for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
+ struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]);
+
+ if (!rx_ring)
+ continue;
non_eop_descs += rx_ring->rx_stats.non_eop_descs;
alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
@@ -7025,15 +7039,20 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
packets = 0;
/* gather some stats to the adapter struct that are per queue */
for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+ struct ixgbe_ring *tx_ring = READ_ONCE(adapter->tx_ring[i]);
+
+ if (!tx_ring)
+ continue;
restart_queue += tx_ring->tx_stats.restart_queue;
tx_busy += tx_ring->tx_stats.tx_busy;
bytes += tx_ring->stats.bytes;
packets += tx_ring->stats.packets;
}
for (i = 0; i < adapter->num_xdp_queues; i++) {
- struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
+ struct ixgbe_ring *xdp_ring = READ_ONCE(adapter->xdp_ring[i]);
+ if (!xdp_ring)
+ continue;
restart_queue += xdp_ring->tx_stats.restart_queue;
tx_busy += xdp_ring->tx_stats.tx_busy;
bytes += xdp_ring->stats.bytes;
@@ -9468,8 +9487,10 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask);
err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
input->sw_idx, queue);
- if (!err)
- ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
+ if (err)
+ goto err_out_w_lock;
+
+ ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
spin_unlock(&adapter->fdir_perfect_lock);
if ((uhtid != 0x800) && (adapter->jump_tables[uhtid]))
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index f6ffd9fb2079..8aaf856771d7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -467,12 +467,16 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
return err;
}
-static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
+static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
- int max_frame = msgbuf[1];
u32 max_frs;
+ if (max_frame < ETH_MIN_MTU || max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
+ e_err(drv, "VF max_frame %d out of range\n", max_frame);
+ return -EINVAL;
+ }
+
/*
* For 82599EB we have to keep all PFs and VFs operating with
* the same max_frame value in order to avoid sending an oversize
@@ -532,12 +536,6 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
}
}
- /* MTU < 68 is an error and causes problems on some kernels */
- if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
- e_err(drv, "VF max_frame %d out of range\n", max_frame);
- return -EINVAL;
- }
-
/* pull current max frame size from hardware */
max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
max_frs &= IXGBE_MHADD_MFS_MASK;
@@ -1240,7 +1238,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf);
break;
case IXGBE_VF_SET_LPE:
- retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf);
+ retval = ixgbe_set_vf_lpe(adapter, msgbuf[1], vf);
break;
case IXGBE_VF_SET_MACVLAN:
retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf);
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index ae195f8adff5..9f804e2aba35 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -219,7 +219,7 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb_any(skb);
spin_unlock_irqrestore(&lp->lock, flags);
- return NETDEV_TX_BUSY;
+ return NETDEV_TX_OK;
}
}
@@ -1113,7 +1113,7 @@ out:
return rc;
probe_err_register:
- kfree(lp->td_ring);
+ kfree((struct dma_desc *)KSEG0ADDR(lp->td_ring));
probe_err_td_ring:
iounmap(lp->tx_dma_regs);
probe_err_dma_tx:
@@ -1133,6 +1133,7 @@ static int korina_remove(struct platform_device *pdev)
iounmap(lp->eth_regs);
iounmap(lp->rx_dma_regs);
iounmap(lp->tx_dma_regs);
+ kfree((struct dma_desc *)KSEG0ADDR(lp->td_ring));
unregister_netdev(bif->dev);
free_netdev(bif->dev);
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index ee7857298361..cf7e10fbab0e 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -319,15 +319,25 @@ static int orion_mdio_probe(struct platform_device *pdev)
init_waitqueue_head(&dev->smi_busy_wait);
- for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
- dev->clk[i] = of_clk_get(pdev->dev.of_node, i);
- if (PTR_ERR(dev->clk[i]) == -EPROBE_DEFER) {
+ if (pdev->dev.of_node) {
+ for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
+ dev->clk[i] = of_clk_get(pdev->dev.of_node, i);
+ if (PTR_ERR(dev->clk[i]) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto out_clk;
+ }
+ if (IS_ERR(dev->clk[i]))
+ break;
+ clk_prepare_enable(dev->clk[i]);
+ }
+ } else {
+ dev->clk[0] = clk_get(&pdev->dev, NULL);
+ if (PTR_ERR(dev->clk[0]) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto out_clk;
}
- if (IS_ERR(dev->clk[i]))
- break;
- clk_prepare_enable(dev->clk[i]);
+ if (!IS_ERR(dev->clk[0]))
+ clk_prepare_enable(dev->clk[0]);
}
dev->err_interrupt = platform_get_irq(pdev, 0);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 30a16cf796c7..fda5dd8c71eb 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -3022,7 +3022,9 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp,
}
/* Setup XPS mapping */
- if (txq_number > 1)
+ if (pp->neta_armada3700)
+ cpu = 0;
+ else if (txq_number > 1)
cpu = txq->id % num_present_cpus();
else
cpu = pp->rxq_def % num_present_cpus();
@@ -3667,6 +3669,11 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
node_online);
struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+ /* Armada 3700's per-cpu interrupt for mvneta is broken, all interrupts
+ * are routed to CPU 0, so we don't need all the cpu-hotplug support
+ */
+ if (pp->neta_armada3700)
+ return 0;
spin_lock(&pp->lock);
/*
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 04bee450eb3d..bc5cfe062b10 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -954,7 +954,7 @@ static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
if (port->gop_id == 2)
- val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
+ val |= GENCONF_CTRL0_PORT0_RGMII;
else if (port->gop_id == 3)
val |= GENCONF_CTRL0_PORT1_RGMII_MII;
regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
@@ -3363,6 +3363,7 @@ static int mvpp2_open(struct net_device *dev)
if (!valid) {
netdev_err(port->dev,
"invalid configuration: no dt or link IRQ");
+ err = -ENOENT;
goto err_free_irq;
}
@@ -4265,8 +4266,6 @@ static void mvpp2_phylink_validate(struct net_device *dev,
phylink_set(mask, Autoneg);
phylink_set_port_modes(mask);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
switch (state->interface) {
case PHY_INTERFACE_MODE_10GKR:
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
index 5692c6087bbb..dd590086fe6a 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -29,16 +29,16 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
/* Clear entry invalidation bit */
pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
- /* Write tcam index - indirect access */
- mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
- for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
- mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
-
/* Write sram index - indirect access */
mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]);
+ /* Write tcam index - indirect access */
+ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
+ for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
+ mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
+
return 0;
}
@@ -405,6 +405,38 @@ static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
return -EINVAL;
}
+/* Drop flow control pause frames */
+static void mvpp2_prs_drop_fc(struct mvpp2 *priv)
+{
+ unsigned char da[ETH_ALEN] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
+ struct mvpp2_prs_entry pe;
+ unsigned int len;
+
+ memset(&pe, 0, sizeof(pe));
+
+ /* For all ports - drop flow control frames */
+ pe.index = MVPP2_PE_FC_DROP;
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+
+ /* Set match on DA */
+ len = ETH_ALEN;
+ while (len--)
+ mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
+
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
+ MVPP2_PRS_RI_DROP_MASK);
+
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+
+ /* Mask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+ mvpp2_prs_hw_write(priv, &pe);
+}
+
/* Enable/disable dropping all mac da's */
static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
{
@@ -1162,6 +1194,7 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv)
mvpp2_prs_hw_write(priv, &pe);
/* Create dummy entries for drop all and promiscuous modes */
+ mvpp2_prs_drop_fc(priv);
mvpp2_prs_mac_drop_all_set(priv, 0, false);
mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
@@ -1647,8 +1680,9 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
MVPP2_PRS_RI_L3_PROTO_MASK);
- /* Skip eth_type + 4 bytes of IPv6 header */
- mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+ /* Jump to DIP of IPV6 header */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
+ MVPP2_MAX_L3_ADDR_SIZE,
MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
/* Set L3 offset */
mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
index e22f6c85d380..4b68dd374733 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
@@ -129,7 +129,7 @@
#define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
-/* reserved */
+#define MVPP2_PE_FC_DROP (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
#define MVPP2_PE_MAC_MC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
#define MVPP2_PE_MAC_UC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index ff2fea0f8b75..0d6a4e47e7a5 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1564,8 +1564,8 @@ static int pxa168_eth_remove(struct platform_device *pdev)
mdiobus_unregister(pep->smi_bus);
mdiobus_free(pep->smi_bus);
- unregister_netdev(dev);
cancel_work_sync(&pep->tx_timeout_task);
+ unregister_netdev(dev);
free_netdev(dev);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index d013f30019b6..2452d8ba4073 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -215,7 +215,7 @@ io_error:
static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
{
- u16 v;
+ u16 v = 0;
__gm_phy_read(hw, port, reg, &v);
return v;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 1d55f014725e..59f3dce3ab1d 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1041,7 +1041,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb->protocol = eth_type_trans(skb, netdev);
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
- RX_DMA_VID(trxd.rxd3))
+ (trxd.rxd2 & RX_DMA_VTAG))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
RX_DMA_VID(trxd.rxd3));
skb_record_rx_queue(skb, 0);
@@ -2452,6 +2452,8 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
eth->netdev[id]->irq = eth->irq[0];
eth->netdev[id]->dev.of_node = np;
+ eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
+
return 0;
free_netdev:
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 46819297fc3e..cb6b27861afa 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -285,6 +285,7 @@
#define RX_DMA_DONE BIT(31)
#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16)
#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff)
+#define RX_DMA_VTAG BIT(15)
/* QDMA descriptor rxd3 */
#define RX_DMA_VID(_x) ((_x) & 0xfff)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index e639a365ac2d..6a005014d46a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -47,7 +47,7 @@
#define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff)
#define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff)
-static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
+int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
{
int i, t;
int err = 0;
@@ -2011,8 +2011,6 @@ static int mlx4_en_set_tunable(struct net_device *dev,
return ret;
}
-#define MLX4_EEPROM_PAGE_LEN 256
-
static int mlx4_en_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
@@ -2047,7 +2045,7 @@ static int mlx4_en_get_module_info(struct net_device *dev,
break;
case MLX4_MODULE_ID_SFP:
modinfo->type = ETH_MODULE_SFF_8472;
- modinfo->eeprom_len = MLX4_EEPROM_PAGE_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
break;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 5868ec11db1a..f3a0617733d8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1384,8 +1384,10 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
}
priv->port_stats.tx_timeout++;
- en_dbg(DRV, priv, "Scheduling watchdog\n");
- queue_work(mdev->workqueue, &priv->watchdog_task);
+ if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) {
+ en_dbg(DRV, priv, "Scheduling port restart\n");
+ queue_work(mdev->workqueue, &priv->restart_task);
+ }
}
@@ -1739,6 +1741,7 @@ int mlx4_en_start_port(struct net_device *dev)
mlx4_en_deactivate_cq(priv, cq);
goto tx_err;
}
+ clear_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &tx_ring->state);
if (t != TX_XDP) {
tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
tx_ring->recycle_ring = NULL;
@@ -1835,6 +1838,7 @@ int mlx4_en_start_port(struct net_device *dev)
local_bh_enable();
}
+ clear_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state);
netif_tx_start_all_queues(dev);
netif_device_attach(dev);
@@ -2005,7 +2009,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
static void mlx4_en_restart(struct work_struct *work)
{
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
- watchdog_task);
+ restart_task);
struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev;
@@ -2387,7 +2391,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
if (netif_running(dev)) {
mutex_lock(&mdev->state_lock);
if (!mdev->device_up) {
- /* NIC is probably restarting - let watchdog task reset
+ /* NIC is probably restarting - let restart task reset
* the port */
en_dbg(DRV, priv, "Change MTU called with card down!?\n");
} else {
@@ -2396,7 +2400,9 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
if (err) {
en_err(priv, "Failed restarting port:%d\n",
priv->port);
- queue_work(mdev->workqueue, &priv->watchdog_task);
+ if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING,
+ &priv->state))
+ queue_work(mdev->workqueue, &priv->restart_task);
}
}
mutex_unlock(&mdev->state_lock);
@@ -2882,7 +2888,8 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
if (err) {
en_err(priv, "Failed starting port %d for XDP change\n",
priv->port);
- queue_work(mdev->workqueue, &priv->watchdog_task);
+ if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state))
+ queue_work(mdev->workqueue, &priv->restart_task);
}
}
@@ -3280,7 +3287,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
spin_lock_init(&priv->stats_lock);
INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
- INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
+ INIT_WORK(&priv->restart_task, mlx4_en_restart);
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
@@ -3660,6 +3667,8 @@ int mlx4_en_reset_config(struct net_device *dev,
en_err(priv, "Failed starting port\n");
}
+ if (!err)
+ err = mlx4_en_moderation_update(priv);
out:
mutex_unlock(&mdev->state_lock);
kfree(tmp);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 45d9a5f8fa1b..f509a6ce31db 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -945,6 +945,9 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
bool clean_complete = true;
int done;
+ if (!budget)
+ return 0;
+
if (priv->tx_ring_num[TX_XDP]) {
xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring];
if (xdp_tx_cq->xdp_busy) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 1857ee0f0871..6517e53da520 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -343,7 +343,7 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
.dma = tx_info->map0_dma,
};
- if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
+ if (!napi_mode || !mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
dma_unmap_page(priv->ddev, tx_info->map0_dma,
PAGE_SIZE, priv->dma_dir);
put_page(tx_info->page);
@@ -385,6 +385,35 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
return cnt;
}
+static void mlx4_en_handle_err_cqe(struct mlx4_en_priv *priv, struct mlx4_err_cqe *err_cqe,
+ u16 cqe_index, struct mlx4_en_tx_ring *ring)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_tx_info *tx_info;
+ struct mlx4_en_tx_desc *tx_desc;
+ u16 wqe_index;
+ int desc_size;
+
+ en_err(priv, "CQE error - cqn 0x%x, ci 0x%x, vendor syndrome: 0x%x syndrome: 0x%x\n",
+ ring->sp_cqn, cqe_index, err_cqe->vendor_err_syndrome, err_cqe->syndrome);
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe, sizeof(*err_cqe),
+ false);
+
+ wqe_index = be16_to_cpu(err_cqe->wqe_index) & ring->size_mask;
+ tx_info = &ring->tx_info[wqe_index];
+ desc_size = tx_info->nr_txbb << LOG_TXBB_SIZE;
+ en_err(priv, "Related WQE - qpn 0x%x, wqe index 0x%x, wqe size 0x%x\n", ring->qpn,
+ wqe_index, desc_size);
+ tx_desc = ring->buf + (wqe_index << LOG_TXBB_SIZE);
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, tx_desc, desc_size, false);
+
+ if (test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state))
+ return;
+
+ en_err(priv, "Scheduling port restart\n");
+ queue_work(mdev->workqueue, &priv->restart_task);
+}
+
bool mlx4_en_process_tx_cq(struct net_device *dev,
struct mlx4_en_cq *cq, int napi_budget)
{
@@ -431,13 +460,10 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
dma_rmb();
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
- MLX4_CQE_OPCODE_ERROR)) {
- struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe;
-
- en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n",
- cqe_err->vendor_err_syndrome,
- cqe_err->syndrome);
- }
+ MLX4_CQE_OPCODE_ERROR))
+ if (!test_and_set_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &ring->state))
+ mlx4_en_handle_err_cqe(priv, (struct mlx4_err_cqe *)cqe, index,
+ ring);
/* Skip over last polled CQE */
new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
@@ -836,6 +862,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
struct mlx4_en_tx_desc *tx_desc;
struct mlx4_wqe_data_seg *data;
struct mlx4_en_tx_info *tx_info;
+ u32 __maybe_unused ring_cons;
int tx_ind;
int nr_txbb;
int desc_size;
@@ -849,7 +876,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
bool stop_queue;
bool inline_ok;
u8 data_offset;
- u32 ring_cons;
bool bf_ok;
tx_ind = skb_get_queue_mapping(skb);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 75213046563c..926407f0bbd9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -1861,8 +1861,8 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77)
#define INIT_HCA_MCAST_OFFSET 0x0c0
#define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
-#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
-#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
+#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x13)
+#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x17)
#define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
#define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
@@ -1870,7 +1870,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_DRIVER_VERSION_SZ 0x40
#define INIT_HCA_FS_PARAM_OFFSET 0x1d0
#define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
-#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12)
+#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x13)
#define INIT_HCA_FS_A0_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x18)
#define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
#define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21)
@@ -2731,7 +2731,7 @@ void mlx4_opreq_action(struct work_struct *work)
if (err) {
mlx4_err(dev, "Failed to retrieve required operation: %d\n",
err);
- return;
+ goto out;
}
MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 650ae08c71de..8f020f26ebf5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -182,8 +182,8 @@ struct mlx4_init_hca_param {
u64 cmpt_base;
u64 mtt_base;
u64 global_caps;
- u16 log_mc_entry_sz;
- u16 log_mc_hash_sz;
+ u8 log_mc_entry_sz;
+ u8 log_mc_hash_sz;
u16 hca_core_clock; /* Internal Clock Frequency (in MHz) */
u8 log_num_qps;
u8 log_num_srqs;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 4afe56a6eedf..8d7bb9a88967 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2539,6 +2539,7 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
if (!err || err == -ENOSPC) {
priv->def_counter[port] = idx;
+ err = 0;
} else if (err == -ENOENT) {
err = 0;
continue;
@@ -2589,7 +2590,8 @@ int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage)
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (!err)
*idx = get_param_l(&out_param);
-
+ if (WARN_ON(err == -ENOSPC))
+ err = -EINVAL;
return err;
}
return __mlx4_counter_alloc(dev, idx);
@@ -4309,12 +4311,14 @@ end:
static void mlx4_shutdown(struct pci_dev *pdev)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+ struct mlx4_dev *dev = persist->dev;
mlx4_info(persist->dev, "mlx4_shutdown was called\n");
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
+ mlx4_pci_disable_device(dev);
}
static const struct pci_error_handlers mlx4_err_handler = {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 240f9c9ca943..3d5597d5b10d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -271,6 +271,10 @@ struct mlx4_en_page_cache {
} buf[MLX4_EN_CACHE_SIZE];
};
+enum {
+ MLX4_EN_TX_RING_STATE_RECOVERING,
+};
+
struct mlx4_en_priv;
struct mlx4_en_tx_ring {
@@ -317,6 +321,7 @@ struct mlx4_en_tx_ring {
* Only queue_stopped might be used if BQL is not properly working.
*/
unsigned long queue_stopped;
+ unsigned long state;
struct mlx4_hwq_resources sp_wqres;
struct mlx4_qp sp_qp;
struct mlx4_qp_context sp_context;
@@ -530,6 +535,10 @@ struct mlx4_en_stats_bitmap {
struct mutex mutex; /* for mutual access to stats bitmap */
};
+enum {
+ MLX4_EN_STATE_FLAG_RESTARTING,
+};
+
struct mlx4_en_priv {
struct mlx4_en_dev *mdev;
struct mlx4_en_port_profile *prof;
@@ -595,7 +604,7 @@ struct mlx4_en_priv {
struct mlx4_en_cq *rx_cq[MAX_RX_RINGS];
struct mlx4_qp drop_qp;
struct work_struct rx_mode_task;
- struct work_struct watchdog_task;
+ struct work_struct restart_task;
struct work_struct linkstate_task;
struct delayed_work stats_task;
struct delayed_work service_task;
@@ -643,6 +652,7 @@ struct mlx4_en_priv {
u32 pflags;
u8 rss_key[MLX4_EN_RSS_KEY_SIZE];
u8 rss_hash_fn;
+ unsigned long state;
};
enum mlx4_en_wol {
@@ -788,6 +798,7 @@ void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev);
#define DEV_FEATURE_CHANGED(dev, new_features, feature) \
((dev->features & feature) ^ (new_features & feature))
+int mlx4_en_moderation_update(struct mlx4_en_priv *priv);
int mlx4_en_reset_config(struct net_device *dev,
struct hwtstamp_config ts_config,
netdev_features_t new_features);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 1a11bc0e1612..cfa0bba3940f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -114,7 +114,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
goto err_out;
for (i = 0; i <= buddy->max_order; ++i) {
- s = BITS_TO_LONGS(1 << (buddy->max_order - i));
+ s = BITS_TO_LONGS(1UL << (buddy->max_order - i));
buddy->bits[i] = kvmalloc_array(s, sizeof(long), GFP_KERNEL | __GFP_ZERO);
if (!buddy->bits[i])
goto err_out_free;
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index ba6ac31a339d..256a06b3c096 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -1973,6 +1973,7 @@ EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
#define I2C_ADDR_LOW 0x50
#define I2C_ADDR_HIGH 0x51
#define I2C_PAGE_SIZE 256
+#define I2C_HIGH_PAGE_SIZE 128
/* Module Info Data */
struct mlx4_cable_info {
@@ -2026,6 +2027,88 @@ static inline const char *cable_info_mad_err_str(u16 mad_status)
return "Unknown Error";
}
+static int mlx4_get_module_id(struct mlx4_dev *dev, u8 port, u8 *module_id)
+{
+ struct mlx4_cmd_mailbox *inbox, *outbox;
+ struct mlx4_mad_ifc *inmad, *outmad;
+ struct mlx4_cable_info *cable_info;
+ int ret;
+
+ inbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(inbox))
+ return PTR_ERR(inbox);
+
+ outbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(outbox)) {
+ mlx4_free_cmd_mailbox(dev, inbox);
+ return PTR_ERR(outbox);
+ }
+
+ inmad = (struct mlx4_mad_ifc *)(inbox->buf);
+ outmad = (struct mlx4_mad_ifc *)(outbox->buf);
+
+ inmad->method = 0x1; /* Get */
+ inmad->class_version = 0x1;
+ inmad->mgmt_class = 0x1;
+ inmad->base_version = 0x1;
+ inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */
+
+ cable_info = (struct mlx4_cable_info *)inmad->data;
+ cable_info->dev_mem_address = 0;
+ cable_info->page_num = 0;
+ cable_info->i2c_addr = I2C_ADDR_LOW;
+ cable_info->size = cpu_to_be16(1);
+
+ ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
+ MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
+ if (ret)
+ goto out;
+
+ if (be16_to_cpu(outmad->status)) {
+ /* Mad returned with bad status */
+ ret = be16_to_cpu(outmad->status);
+ mlx4_warn(dev,
+ "MLX4_CMD_MAD_IFC Get Module ID attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n",
+ 0xFF60, port, I2C_ADDR_LOW, 0, 1, ret,
+ cable_info_mad_err_str(ret));
+ ret = -ret;
+ goto out;
+ }
+ cable_info = (struct mlx4_cable_info *)outmad->data;
+ *module_id = cable_info->data[0];
+out:
+ mlx4_free_cmd_mailbox(dev, inbox);
+ mlx4_free_cmd_mailbox(dev, outbox);
+ return ret;
+}
+
+static void mlx4_sfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset)
+{
+ *i2c_addr = I2C_ADDR_LOW;
+ *page_num = 0;
+
+ if (*offset < I2C_PAGE_SIZE)
+ return;
+
+ *i2c_addr = I2C_ADDR_HIGH;
+ *offset -= I2C_PAGE_SIZE;
+}
+
+static void mlx4_qsfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset)
+{
+ /* Offsets 0-255 belong to page 0.
+ * Offsets 256-639 belong to pages 01, 02, 03.
+ * For example, offset 400 is page 02: 1 + (400 - 256) / 128 = 2
+ */
+ if (*offset < I2C_PAGE_SIZE)
+ *page_num = 0;
+ else
+ *page_num = 1 + (*offset - I2C_PAGE_SIZE) / I2C_HIGH_PAGE_SIZE;
+ *i2c_addr = I2C_ADDR_LOW;
+ *offset -= *page_num * I2C_HIGH_PAGE_SIZE;
+}
+
/**
* mlx4_get_module_info - Read cable module eeprom data
* @dev: mlx4_dev.
@@ -2045,12 +2128,30 @@ int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
struct mlx4_cmd_mailbox *inbox, *outbox;
struct mlx4_mad_ifc *inmad, *outmad;
struct mlx4_cable_info *cable_info;
- u16 i2c_addr;
+ u8 module_id, i2c_addr, page_num;
int ret;
if (size > MODULE_INFO_MAX_READ)
size = MODULE_INFO_MAX_READ;
+ ret = mlx4_get_module_id(dev, port, &module_id);
+ if (ret)
+ return ret;
+
+ switch (module_id) {
+ case MLX4_MODULE_ID_SFP:
+ mlx4_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
+ break;
+ case MLX4_MODULE_ID_QSFP:
+ case MLX4_MODULE_ID_QSFP_PLUS:
+ case MLX4_MODULE_ID_QSFP28:
+ mlx4_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
+ break;
+ default:
+ mlx4_err(dev, "Module ID not recognized: %#x\n", module_id);
+ return -EINVAL;
+ }
+
inbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(inbox))
return PTR_ERR(inbox);
@@ -2076,11 +2177,9 @@ int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
*/
size -= offset + size - I2C_PAGE_SIZE;
- i2c_addr = I2C_ADDR_LOW;
-
cable_info = (struct mlx4_cable_info *)inmad->data;
cable_info->dev_mem_address = cpu_to_be16(offset);
- cable_info->page_num = 0;
+ cable_info->page_num = page_num;
cable_info->i2c_addr = i2c_addr;
cable_info->size = cpu_to_be16(size);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index a4c1ed65f620..550e4893253e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -4990,6 +4990,7 @@ static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule
if (!fs_rule->mirr_mbox) {
mlx4_err(dev, "rule mirroring mailbox is null\n");
+ mlx4_free_cmd_mailbox(dev, mailbox);
return -EINVAL;
}
memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index a53736c26c0c..a686082762df 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -835,6 +835,7 @@ static void cmd_work_handler(struct work_struct *work)
int alloc_ret;
int cmd_mode;
+ complete(&ent->handling);
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem);
if (!ent->page_queue) {
@@ -862,7 +863,6 @@ static void cmd_work_handler(struct work_struct *work)
}
cmd->ent_arr[ent->idx] = ent;
- set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
lay = get_inst(cmd, ent->idx);
ent->lay = lay;
memset(lay, 0, sizeof(*lay));
@@ -884,6 +884,7 @@ static void cmd_work_handler(struct work_struct *work)
if (ent->callback)
schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
+ set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
/* Skip sending command to fw if internal error */
if (pci_channel_offline(dev->pdev) ||
@@ -896,6 +897,10 @@ static void cmd_work_handler(struct work_struct *work)
MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
+ /* no doorbell, no need to keep the entry */
+ free_ent(cmd, ent->idx);
+ if (ent->callback)
+ free_cmd(ent);
return;
}
@@ -949,6 +954,11 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
struct mlx5_cmd *cmd = &dev->cmd;
int err;
+ if (!wait_for_completion_timeout(&ent->handling, timeout) &&
+ cancel_work_sync(&ent->work)) {
+ ent->ret = -ECANCELED;
+ goto out_err;
+ }
if (cmd->mode == CMD_MODE_POLLING || ent->polling) {
wait_for_completion(&ent->done);
} else if (!wait_for_completion_timeout(&ent->done, timeout)) {
@@ -956,12 +966,17 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
}
+out_err:
err = ent->ret;
if (err == -ETIMEDOUT) {
mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
mlx5_command_str(msg_to_opcode(ent->in)),
msg_to_opcode(ent->in));
+ } else if (err == -ECANCELED) {
+ mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n",
+ mlx5_command_str(msg_to_opcode(ent->in)),
+ msg_to_opcode(ent->in));
}
mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
err, deliv_status_to_str(ent->status), ent->status);
@@ -997,6 +1012,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
ent->token = token;
ent->polling = force_polling;
+ init_completion(&ent->handling);
if (!callback)
init_completion(&ent->done);
@@ -1016,6 +1032,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
err = wait_func(dev, ent);
if (err == -ETIMEDOUT)
goto out;
+ if (err == -ECANCELED)
+ goto out_free;
ds = ent->ts2 - ent->ts1;
op = MLX5_GET(mbox_in, in->first.data, opcode);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index d4ec93bde4de..2266c09b741a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -796,7 +796,7 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev)
return NULL;
}
- tracer = kzalloc(sizeof(*tracer), GFP_KERNEL);
+ tracer = kvzalloc(sizeof(*tracer), GFP_KERNEL);
if (!tracer)
return ERR_PTR(-ENOMEM);
@@ -842,7 +842,7 @@ destroy_workqueue:
tracer->dev = NULL;
destroy_workqueue(tracer->work_queue);
free_tracer:
- kfree(tracer);
+ kvfree(tracer);
return ERR_PTR(err);
}
@@ -919,7 +919,7 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer)
mlx5_fw_tracer_destroy_log_buf(tracer);
flush_workqueue(tracer->work_queue);
destroy_workqueue(tracer->work_queue);
- kfree(tracer);
+ kvfree(tracer);
}
void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index a383276eb816..3d824c20d2a4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1460,6 +1460,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
+ int err;
if (!MLX5_CAP_GEN(mdev, cqe_compression))
return -EOPNOTSUPP;
@@ -1469,7 +1470,10 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
return -EINVAL;
}
- mlx5e_modify_rx_cqe_compression_locked(priv, enable);
+ err = mlx5e_modify_rx_cqe_compression_locked(priv, enable);
+ if (err)
+ return err;
+
priv->channels.params.rx_cqe_compress_def = enable;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 76cc10e44080..c6eea6b6b1bb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -217,6 +217,9 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
break;
}
+ if (WARN_ONCE(*rule_p, "VLAN rule already exists type %d", rule_type))
+ return 0;
+
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(*rule_p)) {
@@ -397,8 +400,7 @@ static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
- if (priv->fs.vlan.cvlan_filter_disabled &&
- !(priv->netdev->flags & IFF_PROMISC))
+ if (priv->fs.vlan.cvlan_filter_disabled)
mlx5e_add_any_vid_rules(priv);
}
@@ -415,8 +417,12 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
- if (priv->fs.vlan.cvlan_filter_disabled &&
- !(priv->netdev->flags & IFF_PROMISC))
+ WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state)));
+
+ /* must be called after DESTROY bit is set and
+ * set_rx_mode is called and flushed
+ */
+ if (priv->fs.vlan.cvlan_filter_disabled)
mlx5e_del_any_vid_rules(priv);
}
@@ -887,6 +893,7 @@ static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
kfree(ft->g);
+ ft->g = NULL;
return -ENOMEM;
}
@@ -1027,6 +1034,7 @@ static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc)
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
kfree(ft->g);
+ ft->g = NULL;
return -ENOMEM;
}
@@ -1306,6 +1314,7 @@ err_destroy_groups:
ft->g[ft->num_groups] = NULL;
mlx5e_destroy_groups(ft);
kvfree(in);
+ kfree(ft->g);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 7e6706333fa8..51edc507b7b5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -519,7 +519,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
&rq->wq_ctrl);
if (err)
- return err;
+ goto err_rq_wq_destroy;
rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];
@@ -564,7 +564,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
&rq->wq_ctrl);
if (err)
- return err;
+ goto err_rq_wq_destroy;
rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 701624a63d2f..1ab40d622ae1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -198,7 +198,7 @@ int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- if (esw->mode == SRIOV_NONE)
+ if (esw->mode != SRIOV_OFFLOADS)
return -EOPNOTSUPP;
switch (attr->id) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 044687a1f27c..9d86e49a7f44 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1314,6 +1314,7 @@ out:
#ifdef CONFIG_MLX5_CORE_IPOIB
+#define MLX5_IB_GRH_SGID_OFFSET 8
#define MLX5_IB_GRH_DGID_OFFSET 24
#define MLX5_GID_SIZE 16
@@ -1327,6 +1328,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
struct net_device *netdev;
struct mlx5e_priv *priv;
char *pseudo_header;
+ u32 flags_rqpn;
u32 qpn;
u8 *dgid;
u8 g;
@@ -1347,7 +1349,8 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
priv = mlx5i_epriv(netdev);
tstamp = &priv->tstamp;
- g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
+ flags_rqpn = be32_to_cpu(cqe->flags_rqpn);
+ g = (flags_rqpn >> 28) & 3;
dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
if ((!g) || dgid[0] != 0xff)
skb->pkt_type = PACKET_HOST;
@@ -1356,9 +1359,15 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
else
skb->pkt_type = PACKET_MULTICAST;
- /* TODO: IB/ipoib: Allow mcast packets from other VFs
- * 68996a6e760e5c74654723eeb57bf65628ae87f4
+ /* Drop packets that this interface sent, ie multicast packets
+ * that the HCA has replicated.
*/
+ if (g && (qpn == (flags_rqpn & 0xffffff)) &&
+ (memcmp(netdev->dev_addr + 4, skb->data + MLX5_IB_GRH_SGID_OFFSET,
+ MLX5_GID_SIZE) == 0)) {
+ skb->dev = NULL;
+ return;
+ }
skb_pull(skb, MLX5_IB_GRH_BYTES);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index c8928ce69185..3050853774ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -2217,12 +2217,11 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
struct mlx5e_rep_priv *uplink_rpriv;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- int ret;
- ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
- fl6);
- if (ret < 0)
- return ret;
+ dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, fl6,
+ NULL);
+ if (IS_ERR(dst))
+ return PTR_ERR(dst);
if (!(*out_ttl))
*out_ttl = ip6_dst_hoplimit(dst);
@@ -2428,7 +2427,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
struct ip_tunnel_key *tun_key = &e->tun_info.key;
- struct net_device *out_dev;
+ struct net_device *out_dev = NULL;
struct neighbour *n = NULL;
struct flowi6 fl6 = {};
u8 nud_state, tos, ttl;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 73dce92c41c4..52d3989bb8e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -595,8 +595,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
{
struct mlx5e_tx_wqe_info *wi;
+ u32 nbytes = 0;
+ u16 ci, npkts = 0;
struct sk_buff *skb;
- u16 ci;
int i;
while (sq->cc != sq->pc) {
@@ -617,8 +618,11 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
}
dev_kfree_skb_any(skb);
+ npkts++;
+ nbytes += wi->num_bytes;
sq->cc += wi->num_wqebbs;
}
+ netdev_tx_completed_queue(sq->txq, npkts, nbytes);
}
#ifdef CONFIG_MLX5_CORE_IPOIB
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 7366033cd31c..2190daace873 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1999,12 +1999,15 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
max_guarantee = evport->info.min_rate;
}
- return max_t(u32, max_guarantee / fw_max_bw_share, 1);
+ if (max_guarantee)
+ return max_t(u32, max_guarantee / fw_max_bw_share, 1);
+ return 0;
}
-static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
+static int normalize_vports_min_rate(struct mlx5_eswitch *esw)
{
u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
+ u32 divider = calculate_vports_min_rate_divider(esw);
struct mlx5_vport *evport;
u32 vport_max_rate;
u32 vport_min_rate;
@@ -2018,9 +2021,9 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
continue;
vport_min_rate = evport->info.min_rate;
vport_max_rate = evport->info.max_rate;
- bw_share = MLX5_MIN_BW_SHARE;
+ bw_share = 0;
- if (vport_min_rate)
+ if (divider)
bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate,
divider,
fw_max_bw_share);
@@ -2045,7 +2048,6 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
struct mlx5_vport *evport;
u32 fw_max_bw_share;
u32 previous_min_rate;
- u32 divider;
bool min_rate_supported;
bool max_rate_supported;
int err = 0;
@@ -2071,8 +2073,7 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
previous_min_rate = evport->info.min_rate;
evport->info.min_rate = min_rate;
- divider = calculate_vports_min_rate_divider(esw);
- err = normalize_vports_min_rate(esw, divider);
+ err = normalize_vports_min_rate(esw);
if (err) {
evport->info.min_rate = previous_min_rate;
goto unlock;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index b16e0f45d28c..a38a0c86705a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1004,6 +1004,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
destroy_ft:
root->cmds->destroy_flow_table(root->dev, ft);
free_ft:
+ rhltable_destroy(&ft->fgs_hash);
kfree(ft);
unlock_root:
mutex_unlock(&root->chain_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 54f1a40a68ed..0fd62510fb27 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -366,10 +366,31 @@ static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
return 0;
}
+enum {
+ MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0),
+ MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1),
+};
+
static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan)
{
- return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
+ ptp_info);
+
+ switch (func) {
+ case PTP_PF_NONE:
+ return 0;
+ case PTP_PF_EXTTS:
+ return !(clock->pps_info.pin_caps[pin] &
+ MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN);
+ case PTP_PF_PEROUT:
+ return !(clock->pps_info.pin_caps[pin] &
+ MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
}
static const struct ptp_clock_info mlx5_ptp_clock_info = {
@@ -454,8 +475,9 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
switch (clock->ptp_info.pin_config[pin].func) {
case PTP_PF_EXTTS:
ptp_event.index = pin;
- ptp_event.timestamp = timecounter_cyc2time(&clock->tc,
- be64_to_cpu(eqe->data.pps.time_stamp));
+ ptp_event.timestamp =
+ mlx5_timecounter_cyc2time(clock,
+ be64_to_cpu(eqe->data.pps.time_stamp));
if (clock->pps_info.enabled) {
ptp_event.type = PTP_CLOCK_PPSUSR;
ptp_event.pps_times.ts_real =
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 5fac00ea6245..a2b25afa2472 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -51,6 +51,7 @@
#ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h>
#endif
+#include <linux/version.h>
#include <net/devlink.h>
#include "mlx5_core.h"
#include "fs_core.h"
@@ -211,7 +212,10 @@ static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
strncat(string, ",", remaining_size);
remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
- strncat(string, DRIVER_VERSION, remaining_size);
+
+ snprintf(string + strlen(string), remaining_size, "%u.%u.%u",
+ (u8)((LINUX_VERSION_CODE >> 16) & 0xff), (u8)((LINUX_VERSION_CODE >> 8) & 0xff),
+ (u16)(LINUX_VERSION_CODE & 0xffff));
/*Send the command*/
MLX5_SET(set_driver_version_in, in, opcode,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index e36d3e3675f9..9c3653e06886 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -331,6 +331,24 @@ out_free:
return err;
}
+static u32 fwp_fill_manage_pages_out(struct fw_page *fwp, u32 *out, u32 index,
+ u32 npages)
+{
+ u32 pages_set = 0;
+ unsigned int n;
+
+ for_each_clear_bit(n, &fwp->bitmask, MLX5_NUM_4K_IN_PAGE) {
+ MLX5_ARRAY_SET64(manage_pages_out, out, pas, index + pages_set,
+ fwp->addr + (n * MLX5_ADAPTER_PAGE_SIZE));
+ pages_set++;
+
+ if (!--npages)
+ break;
+ }
+
+ return pages_set;
+}
+
static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
u32 *in, int in_size, u32 *out, int out_size)
{
@@ -354,8 +372,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
if (fwp->func_id != func_id)
continue;
- MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->addr);
- i++;
+ i += fwp_fill_manage_pages_out(fwp, out, i, npages - i);
}
MLX5_SET(manage_pages_out, out, output_num_entries, i);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 2e6df5804b35..049ca4ba49de 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -439,7 +439,8 @@ static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
if (trans->core->fw_flash_in_progress)
timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS);
- queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
+ queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw,
+ timeout << trans->retries);
}
static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
@@ -488,6 +489,9 @@ static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
err = mlxsw_emad_transmit(trans->core, trans);
if (err == 0)
return;
+
+ if (!atomic_dec_and_test(&trans->active))
+ return;
} else {
err = -EIO;
}
@@ -587,7 +591,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener,
mlxsw_core);
if (err)
- return err;
+ goto err_trap_register;
err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core);
if (err)
@@ -599,6 +603,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
err_emad_trap_set:
mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
mlxsw_core);
+err_trap_register:
destroy_workqueue(mlxsw_core->emad_wq);
return err;
}
@@ -1110,6 +1115,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
if (!reload)
devlink_resources_unregister(devlink, NULL);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
+ if (!reload)
+ devlink_free(devlink);
return;
@@ -1383,7 +1390,7 @@ static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
bulk_list, cb, cb_priv, tid);
if (err) {
- kfree(trans);
+ kfree_rcu(trans, rcu);
return err;
}
return 0;
@@ -1604,11 +1611,13 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
break;
}
}
- rcu_read_unlock();
- if (!found)
+ if (!found) {
+ rcu_read_unlock();
goto drop;
+ }
rxl->func(skb, local_port, rxl_item->priv);
+ rcu_read_unlock();
return;
drop:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index c51b2adfc1e1..2cbfa5cfefab 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -316,7 +316,7 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
block = kzalloc(sizeof(*block), GFP_KERNEL);
if (!block)
- return NULL;
+ return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&block->resource_list);
block->afa = mlxsw_afa;
@@ -344,7 +344,7 @@ err_second_set_create:
mlxsw_afa_set_destroy(block->first_set);
err_first_set_create:
kfree(block);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL(mlxsw_afa_block_create);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 5df9b25cab27..1019c9efedea 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -3126,6 +3126,7 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_port_remove(mlxsw_sp, i);
kfree(mlxsw_sp->port_to_module);
kfree(mlxsw_sp->ports);
+ mlxsw_sp->ports = NULL;
}
static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
@@ -3174,6 +3175,7 @@ err_port_module_info_get:
kfree(mlxsw_sp->port_to_module);
err_port_to_module_alloc:
kfree(mlxsw_sp->ports);
+ mlxsw_sp->ports = NULL;
return err;
}
@@ -3228,6 +3230,14 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
}
}
+static struct mlxsw_sp_port *
+mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+{
+ if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
+ return mlxsw_sp->ports[local_port];
+ return NULL;
+}
+
static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
unsigned int count,
struct netlink_ext_ack *extack)
@@ -3238,7 +3248,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
int i;
int err;
- mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
if (!mlxsw_sp_port) {
dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
local_port);
@@ -3305,7 +3315,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
unsigned int count;
int i;
- mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
if (!mlxsw_sp_port) {
dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
local_port);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
index 8ca77f3e8f27..ffd4b055fead 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
@@ -88,8 +88,8 @@ static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
* to be written using PEFA register to all indexes for all regions.
*/
afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
- if (!afa_block) {
- err = -ENOMEM;
+ if (IS_ERR(afa_block)) {
+ err = PTR_ERR(afa_block);
goto err_afa_block;
}
err = mlxsw_afa_block_continue(afa_block);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index c4f9238591e6..c99f5542da1e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -442,7 +442,8 @@ mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
if (!rulei)
- return NULL;
+ return ERR_PTR(-ENOMEM);
+
rulei->act_block = mlxsw_afa_block_create(acl->mlxsw_sp->afa);
if (IS_ERR(rulei->act_block)) {
err = PTR_ERR(rulei->act_block);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
index 336e5ecc68f8..f1874578aa14 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
@@ -524,6 +524,16 @@ mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
u16 erif_index = 0;
int err;
+ /* Add the eRIF */
+ if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
+ erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
+ err = mr->mr_ops->route_erif_add(mlxsw_sp,
+ rve->mr_route->route_priv,
+ erif_index);
+ if (err)
+ return err;
+ }
+
/* Update the route action, as the new eVIF can be a tunnel or a pimreg
* device which will require updating the action.
*/
@@ -533,17 +543,7 @@ mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
rve->mr_route->route_priv,
route_action);
if (err)
- return err;
- }
-
- /* Add the eRIF */
- if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
- erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
- err = mr->mr_ops->route_erif_add(mlxsw_sp,
- rve->mr_route->route_priv,
- erif_index);
- if (err)
- goto err_route_erif_add;
+ goto err_route_action_update;
}
/* Update the minimum MTU */
@@ -561,14 +561,14 @@ mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
return 0;
err_route_min_mtu_update:
- if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
- mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
- erif_index);
-err_route_erif_add:
if (route_action != rve->mr_route->route_action)
mr->mr_ops->route_action_update(mlxsw_sp,
rve->mr_route->route_priv,
rve->mr_route->route_action);
+err_route_action_update:
+ if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
+ mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
+ erif_index);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
index 346f4a5fe053..221aa6a474eb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
@@ -199,8 +199,8 @@ mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp,
int err;
afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
- if (!afa_block)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(afa_block))
+ return afa_block;
err = mlxsw_afa_block_append_allocated_counter(afa_block,
counter_index);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 76960d3adfc0..93d662de106e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -5982,7 +5982,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
}
fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
- if (WARN_ON(!fib_work))
+ if (!fib_work)
return NOTIFY_BAD;
fib_work->mlxsw_sp = router->mlxsw_sp;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 2d4f213e154d..b22c190e001d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -1289,6 +1289,7 @@ static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
if (mlxsw_sx_port_created(mlxsw_sx, i))
mlxsw_sx_port_remove(mlxsw_sx, i);
kfree(mlxsw_sx->ports);
+ mlxsw_sx->ports = NULL;
}
static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
@@ -1323,6 +1324,7 @@ err_port_module_info_get:
if (mlxsw_sx_port_created(mlxsw_sx, i))
mlxsw_sx_port_remove(mlxsw_sx, i);
kfree(mlxsw_sx->ports);
+ mlxsw_sx->ports = NULL;
return err;
}
@@ -1406,6 +1408,12 @@ static int mlxsw_sx_port_type_set(struct mlxsw_core *mlxsw_core, u8 local_port,
u8 module, width;
int err;
+ if (!mlxsw_sx->ports || !mlxsw_sx->ports[local_port]) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port number \"%d\" does not exist\n",
+ local_port);
+ return -EINVAL;
+ }
+
if (new_type == DEVLINK_PORT_TYPE_AUTO)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index ebbdfb908745..623a05d78343 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -1657,8 +1657,7 @@ static inline void set_tx_len(struct ksz_desc *desc, u32 len)
#define HW_DELAY(hw, reg) \
do { \
- u16 dummy; \
- dummy = readw(hw->io + reg); \
+ readw(hw->io + reg); \
} while (0)
/**
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index f831238d9793..84b6ad76f5bc 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -1075,7 +1075,7 @@ static int encx24j600_spi_probe(struct spi_device *spi)
if (unlikely(ret)) {
netif_err(priv, probe, ndev, "Error %d initializing card encx24j600 card\n",
ret);
- goto out_free;
+ goto out_stop;
}
eidled = encx24j600_read_reg(priv, EIDLED);
@@ -1093,6 +1093,8 @@ static int encx24j600_spi_probe(struct spi_device *spi)
out_unregister:
unregister_netdev(priv->ndev);
+out_stop:
+ kthread_stop(priv->kworker_task);
out_free:
free_netdev(ndev);
@@ -1105,6 +1107,7 @@ static int encx24j600_spi_remove(struct spi_device *spi)
struct encx24j600_priv *priv = dev_get_drvdata(&spi->dev);
unregister_netdev(priv->ndev);
+ kthread_stop(priv->kworker_task);
free_netdev(priv->ndev);
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index 07c1eb63415a..190c22cdc4d2 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -659,7 +659,9 @@ static void lan743x_ethtool_get_wol(struct net_device *netdev,
wol->supported = 0;
wol->wolopts = 0;
- phy_ethtool_get_wol(netdev->phydev, wol);
+
+ if (netdev->phydev)
+ phy_ethtool_get_wol(netdev->phydev, wol);
wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST |
WAKE_MAGIC | WAKE_PHY | WAKE_ARP;
@@ -688,9 +690,8 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts);
- phy_ethtool_set_wol(netdev->phydev, wol);
-
- return 0;
+ return netdev->phydev ? phy_ethtool_set_wol(netdev->phydev, wol)
+ : -ENETDOWN;
}
#endif /* CONFIG_PM */
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 208341541087..0d681714878b 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -145,7 +145,8 @@ static void lan743x_intr_software_isr(void *context)
int_sts = lan743x_csr_read(adapter, INT_STS);
if (int_sts & INT_BIT_SW_GP_) {
- lan743x_csr_write(adapter, INT_STS, INT_BIT_SW_GP_);
+ /* disable the interrupt to prevent repeated re-triggering */
+ lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_);
intr->software_isr_flag = 1;
}
}
@@ -155,9 +156,8 @@ static void lan743x_tx_isr(void *context, u32 int_sts, u32 flags)
struct lan743x_tx *tx = context;
struct lan743x_adapter *adapter = tx->adapter;
bool enable_flag = true;
- u32 int_en = 0;
- int_en = lan743x_csr_read(adapter, INT_EN_SET);
+ lan743x_csr_read(adapter, INT_EN_SET);
if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) {
lan743x_csr_write(adapter, INT_EN_CLR,
INT_BIT_DMA_TX_(tx->channel_number));
@@ -672,14 +672,12 @@ clean_up:
static int lan743x_dp_write(struct lan743x_adapter *adapter,
u32 select, u32 addr, u32 length, u32 *buf)
{
- int ret = -EIO;
u32 dp_sel;
int i;
- mutex_lock(&adapter->dp_lock);
if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
1, 40, 100, 100))
- goto unlock;
+ return -EIO;
dp_sel = lan743x_csr_read(adapter, DP_SEL);
dp_sel &= ~DP_SEL_MASK_;
dp_sel |= select;
@@ -691,13 +689,10 @@ static int lan743x_dp_write(struct lan743x_adapter *adapter,
lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_);
if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
1, 40, 100, 100))
- goto unlock;
+ return -EIO;
}
- ret = 0;
-unlock:
- mutex_unlock(&adapter->dp_lock);
- return ret;
+ return 0;
}
static u32 lan743x_mac_mii_access(u16 id, u16 index, int read)
@@ -1250,13 +1245,13 @@ clean_up_data_descriptor:
goto clear_active;
if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED)) {
- dev_kfree_skb(buffer_info->skb);
+ dev_kfree_skb_any(buffer_info->skb);
goto clear_skb;
}
if (cleanup) {
lan743x_ptp_unrequest_tx_timestamp(tx->adapter);
- dev_kfree_skb(buffer_info->skb);
+ dev_kfree_skb_any(buffer_info->skb);
} else {
ignore_sync = (buffer_info->flags &
TX_BUFFER_INFO_FLAG_IGNORE_SYNC) != 0;
@@ -1566,7 +1561,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
if (required_number_of_descriptors >
lan743x_tx_get_avail_desc(tx)) {
if (required_number_of_descriptors > (tx->ring_size - 1)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
} else {
/* save to overflow buffer */
tx->overflow_skb = skb;
@@ -1599,7 +1594,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
start_frame_length,
do_timestamp,
skb->ip_summed == CHECKSUM_PARTIAL)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
goto unlock;
}
@@ -1619,7 +1614,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
* frame assembler clean up was performed inside
* lan743x_tx_frame_add_fragment
*/
- dev_kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
goto unlock;
}
}
@@ -1639,10 +1634,9 @@ static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight)
bool start_transmitter = false;
unsigned long irq_flags = 0;
u32 ioc_bit = 0;
- u32 int_sts = 0;
ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
- int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
+ lan743x_csr_read(adapter, DMAC_INT_STS);
if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C)
lan743x_csr_write(adapter, DMAC_INT_STS, ioc_bit);
spin_lock_irqsave(&tx->ring_lock, irq_flags);
@@ -2679,7 +2673,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
adapter->intr.irq = adapter->pdev->irq;
lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
- mutex_init(&adapter->dp_lock);
ret = lan743x_gpio_init(adapter);
if (ret)
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 2d6eea18973e..77273be2d1ee 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -702,9 +702,6 @@ struct lan743x_adapter {
struct lan743x_csr csr;
struct lan743x_intr intr;
- /* lock, used to prevent concurrent access to data port */
- struct mutex dp_lock;
-
struct lan743x_gpio gpio;
struct lan743x_ptp ptp;
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index b34055ac476f..4db3431b79ac 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -561,7 +561,7 @@ static int moxart_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
unregister_netdev(ndev);
- free_irq(ndev->irq, ndev);
+ devm_free_irq(&pdev->dev, ndev->irq, ndev);
moxart_mac_free_memory(ndev);
free_netdev(ndev);
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index a29a6a618110..ea30da1c53f0 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -1549,10 +1549,8 @@ static int ocelot_netdevice_event(struct notifier_block *unused,
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
int ret = 0;
- if (!ocelot_netdevice_dev_check(dev))
- return 0;
-
if (event == NETDEV_PRECHANGEUPPER &&
+ ocelot_netdevice_dev_check(dev) &&
netif_is_lag_master(info->upper_dev)) {
struct netdev_lag_upper_info *lag_upper_info = info->upper_info;
struct netlink_ext_ack *extack;
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index 51fa82b429a3..40970352d208 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -235,11 +235,13 @@ static int jazz_sonic_probe(struct platform_device *pdev)
err = register_netdev(dev);
if (err)
- goto out1;
+ goto undo_probe1;
return 0;
-out1:
+undo_probe1:
+ dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
release_mem_region(dev->base_addr, SONIC_MEM_SIZE);
out:
free_netdev(dev);
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 0937fc2a928e..23c9394cd5d2 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -540,10 +540,14 @@ static int mac_sonic_platform_probe(struct platform_device *pdev)
err = register_netdev(dev);
if (err)
- goto out;
+ goto undo_probe;
return 0;
+undo_probe:
+ dma_free_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
out:
free_netdev(dev);
@@ -618,12 +622,16 @@ static int mac_sonic_nubus_probe(struct nubus_board *board)
err = register_netdev(ndev);
if (err)
- goto out;
+ goto undo_probe;
nubus_set_drvdata(board, ndev);
return 0;
+undo_probe:
+ dma_free_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
out:
free_netdev(ndev);
return err;
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index e1b886e87a76..44171d7bb434 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -265,11 +265,14 @@ int xtsonic_probe(struct platform_device *pdev)
sonic_msg_init(dev);
if ((err = register_netdev(dev)))
- goto out1;
+ goto undo_probe1;
return 0;
-out1:
+undo_probe1:
+ dma_free_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
release_region(dev->base_addr, SONIC_MEM_SIZE);
out:
free_netdev(dev);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
index d743a37a3cee..e5dda2c27f18 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
@@ -2065,7 +2065,7 @@ vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \
(level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\
if ((mask & VXGE_DEBUG_MASK) == mask) \
- printk(fmt "\n", __VA_ARGS__); \
+ printk(fmt "\n", ##__VA_ARGS__); \
} while (0)
#else
#define vxge_debug_ll(level, mask, fmt, ...)
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h
index 59a57ff5e96a..9c86f4f9cd42 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h
@@ -452,49 +452,49 @@ int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
#if (VXGE_DEBUG_LL_CONFIG & VXGE_DEBUG_MASK)
#define vxge_debug_ll_config(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, __VA_ARGS__)
+ vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, ##__VA_ARGS__)
#else
#define vxge_debug_ll_config(level, fmt, ...)
#endif
#if (VXGE_DEBUG_INIT & VXGE_DEBUG_MASK)
#define vxge_debug_init(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, __VA_ARGS__)
+ vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, ##__VA_ARGS__)
#else
#define vxge_debug_init(level, fmt, ...)
#endif
#if (VXGE_DEBUG_TX & VXGE_DEBUG_MASK)
#define vxge_debug_tx(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, __VA_ARGS__)
+ vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, ##__VA_ARGS__)
#else
#define vxge_debug_tx(level, fmt, ...)
#endif
#if (VXGE_DEBUG_RX & VXGE_DEBUG_MASK)
#define vxge_debug_rx(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, __VA_ARGS__)
+ vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, ##__VA_ARGS__)
#else
#define vxge_debug_rx(level, fmt, ...)
#endif
#if (VXGE_DEBUG_MEM & VXGE_DEBUG_MASK)
#define vxge_debug_mem(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, __VA_ARGS__)
+ vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, ##__VA_ARGS__)
#else
#define vxge_debug_mem(level, fmt, ...)
#endif
#if (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)
#define vxge_debug_entryexit(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, __VA_ARGS__)
+ vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, ##__VA_ARGS__)
#else
#define vxge_debug_entryexit(level, fmt, ...)
#endif
#if (VXGE_DEBUG_INTR & VXGE_DEBUG_MASK)
#define vxge_debug_intr(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, __VA_ARGS__)
+ vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, ##__VA_ARGS__)
#else
#define vxge_debug_intr(level, fmt, ...)
#endif
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
index 0c3b5dea2858..ad3702d3d831 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
@@ -29,8 +29,6 @@
*/
enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
{
- u64 val64;
-
struct __vxge_hw_virtualpath *vpath;
struct vxge_hw_vpath_reg __iomem *vp_reg;
enum vxge_hw_status status = VXGE_HW_OK;
@@ -83,7 +81,7 @@ enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->xgmac_vp_int_status);
- val64 = readq(&vp_reg->vpath_general_int_status);
+ readq(&vp_reg->vpath_general_int_status);
/* Mask unwanted interrupts */
@@ -156,8 +154,6 @@ exit:
enum vxge_hw_status vxge_hw_vpath_intr_disable(
struct __vxge_hw_vpath_handle *vp)
{
- u64 val64;
-
struct __vxge_hw_virtualpath *vpath;
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_hw_vpath_reg __iomem *vp_reg;
@@ -178,8 +174,6 @@ enum vxge_hw_status vxge_hw_vpath_intr_disable(
(u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->vpath_general_int_mask);
- val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
-
writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
@@ -486,9 +480,7 @@ void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
*/
void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
{
- u32 val32;
-
- val32 = readl(&hldev->common_reg->titan_general_int_status);
+ readl(&hldev->common_reg->titan_general_int_status);
}
/**
@@ -1726,8 +1718,8 @@ void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
enum vxge_hw_status
vxge_hw_vpath_mac_addr_add(
struct __vxge_hw_vpath_handle *vp,
- u8 (macaddr)[ETH_ALEN],
- u8 (macaddr_mask)[ETH_ALEN],
+ u8 *macaddr,
+ u8 *macaddr_mask,
enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
{
u32 i;
@@ -1789,8 +1781,8 @@ exit:
enum vxge_hw_status
vxge_hw_vpath_mac_addr_get(
struct __vxge_hw_vpath_handle *vp,
- u8 (macaddr)[ETH_ALEN],
- u8 (macaddr_mask)[ETH_ALEN])
+ u8 *macaddr,
+ u8 *macaddr_mask)
{
u32 i;
u64 data1 = 0ULL;
@@ -1841,8 +1833,8 @@ exit:
enum vxge_hw_status
vxge_hw_vpath_mac_addr_get_next(
struct __vxge_hw_vpath_handle *vp,
- u8 (macaddr)[ETH_ALEN],
- u8 (macaddr_mask)[ETH_ALEN])
+ u8 *macaddr,
+ u8 *macaddr_mask)
{
u32 i;
u64 data1 = 0ULL;
@@ -1894,8 +1886,8 @@ exit:
enum vxge_hw_status
vxge_hw_vpath_mac_addr_delete(
struct __vxge_hw_vpath_handle *vp,
- u8 (macaddr)[ETH_ALEN],
- u8 (macaddr_mask)[ETH_ALEN])
+ u8 *macaddr,
+ u8 *macaddr_mask)
{
u32 i;
u64 data1 = 0ULL;
@@ -2385,7 +2377,6 @@ enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
u8 t_code;
enum vxge_hw_status status = VXGE_HW_OK;
void *first_rxdh;
- u64 val64 = 0;
int new_count = 0;
ring->cmpl_cnt = 0;
@@ -2413,8 +2404,7 @@ enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
}
writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
&ring->vp_reg->prc_rxd_doorbell);
- val64 =
- readl(&ring->common_reg->titan_general_int_status);
+ readl(&ring->common_reg->titan_general_int_status);
ring->doorbell_cnt = 0;
}
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 6a79c8e4a7a4..9043d2cadd5d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -744,8 +744,8 @@ nfp_port_get_fecparam(struct net_device *netdev,
struct nfp_eth_table_port *eth_port;
struct nfp_port *port;
- param->active_fec = ETHTOOL_FEC_NONE_BIT;
- param->fec = ETHTOOL_FEC_NONE_BIT;
+ param->active_fec = ETHTOOL_FEC_NONE;
+ param->fec = ETHTOOL_FEC_NONE;
port = nfp_port_from_netdev(netdev);
eth_port = nfp_port_get_eth_port(port);
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 41d30f55c946..6bd6c261f2ba 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -845,7 +845,8 @@ static int lpc_mii_init(struct netdata_local *pldat)
if (mdiobus_register(pldat->mii_bus))
goto err_out_unregister_bus;
- if (lpc_mii_probe(pldat->ndev) != 0)
+ err = lpc_mii_probe(pldat->ndev);
+ if (err)
goto err_out_unregister_bus;
return 0;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 43c0c10dfeb7..3a4225837049 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -27,7 +27,6 @@
#define DRV_VERSION "1.01"
const char pch_driver_version[] = DRV_VERSION;
-#define PCI_DEVICE_ID_INTEL_IOH1_GBE 0x8802 /* Pci device ID */
#define PCH_GBE_MAR_ENTRIES 16
#define PCH_GBE_SHORT_PKT 64
#define DSC_INIT16 0xC000
@@ -37,11 +36,9 @@ const char pch_driver_version[] = DRV_VERSION;
#define PCH_GBE_PCI_BAR 1
#define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */
-/* Macros for ML7223 */
-#define PCI_VENDOR_ID_ROHM 0x10db
-#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
+#define PCI_DEVICE_ID_INTEL_IOH1_GBE 0x8802
-/* Macros for ML7831 */
+#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
#define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802
#define PCH_GBE_TX_WEIGHT 64
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 65f69e562618..e2c280913fbb 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1089,16 +1089,20 @@ static int pasemi_mac_open(struct net_device *dev)
mac->tx = pasemi_mac_setup_tx_resources(dev);
- if (!mac->tx)
+ if (!mac->tx) {
+ ret = -ENOMEM;
goto out_tx_ring;
+ }
/* We might already have allocated rings in case mtu was changed
* before interface was brought up.
*/
if (dev->mtu > 1500 && !mac->num_cs) {
pasemi_mac_setup_csrings(mac);
- if (!mac->num_cs)
+ if (!mac->num_cs) {
+ ret = -ENOMEM;
goto out_tx_ring;
+ }
}
/* Zero out rmon counters */
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 59c70be22a84..42b99b182616 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -580,11 +580,6 @@ static const struct net_device_ops netxen_netdev_ops = {
.ndo_set_features = netxen_set_features,
};
-static inline bool netxen_function_zero(struct pci_dev *pdev)
-{
- return (PCI_FUNC(pdev->devfn) == 0) ? true : false;
-}
-
static inline void netxen_set_interrupt_mode(struct netxen_adapter *adapter,
u32 mode)
{
@@ -680,7 +675,7 @@ static int netxen_setup_intr(struct netxen_adapter *adapter)
netxen_initialize_interrupt_registers(adapter);
netxen_set_msix_bit(pdev, 0);
- if (netxen_function_zero(pdev)) {
+ if (adapter->portnum == 0) {
if (!netxen_setup_msi_interrupts(adapter, num_msix))
netxen_set_interrupt_mode(adapter, NETXEN_MSI_MODE);
else
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index f1977aa440e5..734462f8d881 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -397,7 +397,7 @@ static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
}
- iids->vf_cids += vf_cids * p_mngr->vf_count;
+ iids->vf_cids = vf_cids;
iids->tids += vf_tids * p_mngr->vf_count;
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
@@ -2074,8 +2074,8 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs);
if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) {
- DP_NOTICE(p_hwfn,
- "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n");
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n");
p_hwfn->hw_info.personality = QED_PCI_ETH_ROCE;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index a6a9688db307..e50fc8f714dc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -3316,26 +3316,20 @@ static void qed_chain_free_single(struct qed_dev *cdev,
static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
{
- void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
+ struct addr_tbl_entry *pp_addr_tbl = p_chain->pbl.pp_addr_tbl;
u32 page_cnt = p_chain->page_cnt, i, pbl_size;
- u8 *p_pbl_virt = p_chain->pbl_sp.p_virt_table;
- if (!pp_virt_addr_tbl)
+ if (!pp_addr_tbl)
return;
- if (!p_pbl_virt)
- goto out;
-
for (i = 0; i < page_cnt; i++) {
- if (!pp_virt_addr_tbl[i])
+ if (!pp_addr_tbl[i].virt_addr || !pp_addr_tbl[i].dma_map)
break;
dma_free_coherent(&cdev->pdev->dev,
QED_CHAIN_PAGE_SIZE,
- pp_virt_addr_tbl[i],
- *(dma_addr_t *)p_pbl_virt);
-
- p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
+ pp_addr_tbl[i].virt_addr,
+ pp_addr_tbl[i].dma_map);
}
pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
@@ -3345,9 +3339,9 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
pbl_size,
p_chain->pbl_sp.p_virt_table,
p_chain->pbl_sp.p_phys_table);
-out:
- vfree(p_chain->pbl.pp_virt_addr_tbl);
- p_chain->pbl.pp_virt_addr_tbl = NULL;
+
+ vfree(p_chain->pbl.pp_addr_tbl);
+ p_chain->pbl.pp_addr_tbl = NULL;
}
void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain)
@@ -3448,19 +3442,19 @@ qed_chain_alloc_pbl(struct qed_dev *cdev,
{
u32 page_cnt = p_chain->page_cnt, size, i;
dma_addr_t p_phys = 0, p_pbl_phys = 0;
- void **pp_virt_addr_tbl = NULL;
+ struct addr_tbl_entry *pp_addr_tbl;
u8 *p_pbl_virt = NULL;
void *p_virt = NULL;
- size = page_cnt * sizeof(*pp_virt_addr_tbl);
- pp_virt_addr_tbl = vzalloc(size);
- if (!pp_virt_addr_tbl)
+ size = page_cnt * sizeof(*pp_addr_tbl);
+ pp_addr_tbl = vzalloc(size);
+ if (!pp_addr_tbl)
return -ENOMEM;
/* The allocation of the PBL table is done with its full size, since it
* is expected to be successive.
* qed_chain_init_pbl_mem() is called even in a case of an allocation
- * failure, since pp_virt_addr_tbl was previously allocated, and it
+ * failure, since tbl was previously allocated, and it
* should be saved to allow its freeing during the error flow.
*/
size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
@@ -3474,8 +3468,7 @@ qed_chain_alloc_pbl(struct qed_dev *cdev,
p_chain->b_external_pbl = true;
}
- qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
- pp_virt_addr_tbl);
+ qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, pp_addr_tbl);
if (!p_pbl_virt)
return -ENOMEM;
@@ -3494,7 +3487,8 @@ qed_chain_alloc_pbl(struct qed_dev *cdev,
/* Fill the PBL table with the physical address of the page */
*(dma_addr_t *)p_pbl_virt = p_phys;
/* Keep the virtual address of the page */
- p_chain->pbl.pp_virt_addr_tbl[i] = p_virt;
+ p_chain->pbl.pp_addr_tbl[i].virt_addr = p_virt;
+ p_chain->pbl.pp_addr_tbl[i].dma_map = p_phys;
p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index f9e475075d3e..61d5d7654568 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -1015,7 +1015,8 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn)
index, attn_bits, attn_acks, asserted_bits,
deasserted_bits, p_sb_attn_sw->known_attn);
} else if (asserted_bits == 0x100) {
- DP_INFO(p_hwfn, "MFW indication via attention\n");
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "MFW indication via attention\n");
} else {
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
"MFW indication [deassertion]\n");
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 39787bb885c8..80afc8f36e00 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -2737,14 +2737,18 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
iwarp_info->partial_fpdus = kcalloc((u16)p_hwfn->p_rdma_info->num_qps,
sizeof(*iwarp_info->partial_fpdus),
GFP_KERNEL);
- if (!iwarp_info->partial_fpdus)
+ if (!iwarp_info->partial_fpdus) {
+ rc = -ENOMEM;
goto err;
+ }
iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL);
- if (!iwarp_info->mpa_intermediate_buf)
+ if (!iwarp_info->mpa_intermediate_buf) {
+ rc = -ENOMEM;
goto err;
+ }
/* The mpa_bufs array serves for pending RX packets received on the
* mpa ll2 that don't have place on the tx ring and require later
@@ -2754,8 +2758,10 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
iwarp_info->mpa_bufs = kcalloc(data.input.rx_num_desc,
sizeof(*iwarp_info->mpa_bufs),
GFP_KERNEL);
- if (!iwarp_info->mpa_bufs)
+ if (!iwarp_info->mpa_bufs) {
+ rc = -ENOMEM;
goto err;
+ }
INIT_LIST_HEAD(&iwarp_info->mpa_buf_pending_list);
INIT_LIST_HEAD(&iwarp_info->mpa_buf_list);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 71a7af134dd8..886c7aae662f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -96,6 +96,7 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
p_ramrod->personality = PERSONALITY_ETH;
break;
case QED_PCI_ETH_ROCE:
+ case QED_PCI_ETH_IWARP:
p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
break;
default:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 5dda547772c1..93a0fbf6a132 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -81,12 +81,17 @@ static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status)
mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
}
+#define QED_VF_CHANNEL_USLEEP_ITERATIONS 90
+#define QED_VF_CHANNEL_USLEEP_DELAY 100
+#define QED_VF_CHANNEL_MSLEEP_ITERATIONS 10
+#define QED_VF_CHANNEL_MSLEEP_DELAY 25
+
static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
{
union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
struct ustorm_trigger_vf_zone trigger;
struct ustorm_vf_zone *zone_data;
- int rc = 0, time = 100;
+ int iter, rc = 0;
zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
@@ -126,11 +131,19 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger));
/* When PF would be done with the response, it would write back to the
- * `done' address. Poll until then.
+ * `done' address from a coherent DMA zone. Poll until then.
*/
- while ((!*done) && time) {
- msleep(25);
- time--;
+
+ iter = QED_VF_CHANNEL_USLEEP_ITERATIONS;
+ while (!*done && iter--) {
+ udelay(QED_VF_CHANNEL_USLEEP_DELAY);
+ dma_rmb();
+ }
+
+ iter = QED_VF_CHANNEL_MSLEEP_ITERATIONS;
+ while (!*done && iter--) {
+ msleep(QED_VF_CHANNEL_MSLEEP_DELAY);
+ dma_rmb();
}
if (!*done) {
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index dc3be8a4acf4..2bdc410d1144 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -550,12 +550,14 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
#define NUM_RX_BDS_MIN 128
+#define NUM_RX_BDS_KDUMP_MIN 63
#define NUM_RX_BDS_DEF ((u16)BIT(10) - 1)
#define TX_RING_SIZE_POW 13
#define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW))
#define NUM_TX_BDS_MAX (TX_RING_SIZE - 1)
#define NUM_TX_BDS_MIN 128
+#define NUM_TX_BDS_KDUMP_MIN 63
#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
#define QEDE_MIN_PKT_LEN 64
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index a96da16f3404..1976279800cd 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1747,6 +1747,11 @@ netdev_features_t qede_features_check(struct sk_buff *skb,
ntohs(udp_hdr(skb)->dest) != gnv_port))
return features & ~(NETIF_F_CSUM_MASK |
NETIF_F_GSO_MASK);
+ } else if (l4_proto == IPPROTO_IPIP) {
+ /* IPIP tunnels are unknown to the device or at least unsupported natively,
+ * offloads for them can't be done trivially, so disable them for such skb.
+ */
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 0d8e39ffbcd1..1aabb2e7a38b 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -29,6 +29,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+#include <linux/crash_dump.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/version.h>
@@ -730,8 +731,14 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
edev->dp_module = dp_module;
edev->dp_level = dp_level;
edev->ops = qed_ops;
- edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
- edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
+
+ if (is_kdump_kernel()) {
+ edev->q_num_rx_buffers = NUM_RX_BDS_KDUMP_MIN;
+ edev->q_num_tx_buffers = NUM_TX_BDS_KDUMP_MIN;
+ } else {
+ edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
+ edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
+ }
DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
info->num_queues, info->num_queues);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index a79d84f99102..6ed8294f7df8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -3651,7 +3651,7 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
ahw->diag_cnt = 0;
ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
if (ret)
- goto fail_diag_irq;
+ goto fail_mbx_args;
if (adapter->flags & QLCNIC_MSIX_ENABLED)
intrpt_id = ahw->intr_tbl[0].id;
@@ -3681,6 +3681,8 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
done:
qlcnic_free_mbx_args(&cmd);
+
+fail_mbx_args:
qlcnic_83xx_diag_free_res(netdev, drv_sds_rings);
fail_diag_irq:
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 07f9067affc6..10286215092f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1720,7 +1720,7 @@ static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_d
ahw->reset.seq_error = 0;
ahw->reset.buff = kzalloc(QLC_83XX_RESTART_TEMPLATE_SIZE, GFP_KERNEL);
- if (p_dev->ahw->reset.buff == NULL)
+ if (ahw->reset.buff == NULL)
return -ENOMEM;
p_buff = p_dev->ahw->reset.buff;
@@ -2251,7 +2251,8 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
/* Boot either flash image or firmware image from host file system */
if (qlcnic_load_fw_file == 1) {
- if (qlcnic_83xx_load_fw_image_from_host(adapter))
+ err = qlcnic_83xx_load_fw_image_from_host(adapter);
+ if (err)
return err;
} else {
QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index a4cd6f2cfb86..4fb3dcb03819 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1049,7 +1049,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
if (!skb)
- break;
+ goto error;
qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
skb_put(skb, QLCNIC_ILB_PKT_SIZE);
adapter->ahw->diag_cnt = 0;
@@ -1073,6 +1073,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
cnt++;
}
if (cnt != i) {
+error:
dev_err(&adapter->pdev->dev,
"LB Test: failed, TX[%d], RX[%d]\n", i, cnt);
if (mode != QLCNIC_ILB_MODE)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index dbd48012224f..ed34b7d1a9e1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2508,6 +2508,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
qlcnic_sriov_vf_register_map(ahw);
break;
default:
+ err = -EINVAL;
goto err_out_free_hw_res;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index f34ae8c75bc5..61a39d167c8b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -1426,6 +1426,7 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
vfree(fw_dump->tmpl_hdr);
+ fw_dump->tmpl_hdr = NULL;
if (qlcnic_83xx_md_check_extended_dump_capability(adapter))
extended = !qlcnic_83xx_extend_md_capab(adapter);
@@ -1444,6 +1445,8 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
struct qlcnic_83xx_dump_template_hdr *hdr;
hdr = fw_dump->tmpl_hdr;
+ if (!hdr)
+ return;
hdr->drv_cap_mask = 0x1f;
fw_dump->cap_mask = 0x1f;
dev_info(&pdev->dev,
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 031f6e6ee9c1..351a90698010 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -1449,6 +1449,7 @@ int emac_mac_tx_buf_send(struct emac_adapter *adpt, struct emac_tx_queue *tx_q,
{
struct emac_tpd tpd;
u32 prod_idx;
+ int len;
memset(&tpd, 0, sizeof(tpd));
@@ -1468,9 +1469,10 @@ int emac_mac_tx_buf_send(struct emac_adapter *adpt, struct emac_tx_queue *tx_q,
if (skb_network_offset(skb) != ETH_HLEN)
TPD_TYP_SET(&tpd, 1);
+ len = skb->len;
emac_tx_fill_tpd(adpt, tx_q, skb, &tpd);
- netdev_sent_queue(adpt->netdev, skb->len);
+ netdev_sent_queue(adpt->netdev, len);
/* Make sure the are enough free descriptors to hold one
* maximum-sized SKB. We need one desc for each fragment,
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 2a0cbc535a2e..19673ed929e6 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -493,13 +493,24 @@ static int emac_clks_phase1_init(struct platform_device *pdev,
ret = clk_prepare_enable(adpt->clk[EMAC_CLK_CFG_AHB]);
if (ret)
- return ret;
+ goto disable_clk_axi;
ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 19200000);
if (ret)
- return ret;
+ goto disable_clk_cfg_ahb;
+
+ ret = clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]);
+ if (ret)
+ goto disable_clk_cfg_ahb;
- return clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]);
+ return 0;
+
+disable_clk_cfg_ahb:
+ clk_disable_unprepare(adpt->clk[EMAC_CLK_CFG_AHB]);
+disable_clk_axi:
+ clk_disable_unprepare(adpt->clk[EMAC_CLK_AXI]);
+
+ return ret;
}
/* Enable clocks; needs emac_clks_phase1_init to be called before */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 37786affa975..05c438f47ff1 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -56,15 +56,23 @@ static int rmnet_unregister_real_device(struct net_device *real_dev)
return 0;
}
-static int rmnet_register_real_device(struct net_device *real_dev)
+static int rmnet_register_real_device(struct net_device *real_dev,
+ struct netlink_ext_ack *extack)
{
struct rmnet_port *port;
int rc, entry;
ASSERT_RTNL();
- if (rmnet_is_real_dev_registered(real_dev))
+ if (rmnet_is_real_dev_registered(real_dev)) {
+ port = rmnet_get_port_rtnl(real_dev);
+ if (port->rmnet_mode != RMNET_EPMODE_VND) {
+ NL_SET_ERR_MSG_MOD(extack, "bridge device already exists");
+ return -EINVAL;
+ }
+
return 0;
+ }
port = kzalloc(sizeof(*port), GFP_ATOMIC);
if (!port)
@@ -143,7 +151,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
- err = rmnet_register_real_device(real_dev);
+ err = rmnet_register_real_device(real_dev, extack);
if (err)
goto err0;
@@ -288,7 +296,6 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
{
struct rmnet_priv *priv = netdev_priv(dev);
struct net_device *real_dev;
- struct rmnet_endpoint *ep;
struct rmnet_port *port;
u16 mux_id;
@@ -303,19 +310,27 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
if (data[IFLA_RMNET_MUX_ID]) {
mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
- if (rmnet_get_endpoint(port, mux_id)) {
- NL_SET_ERR_MSG_MOD(extack, "MUX ID already exists");
- return -EINVAL;
- }
- ep = rmnet_get_endpoint(port, priv->mux_id);
- if (!ep)
- return -ENODEV;
- hlist_del_init_rcu(&ep->hlnode);
- hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
+ if (mux_id != priv->mux_id) {
+ struct rmnet_endpoint *ep;
+
+ ep = rmnet_get_endpoint(port, priv->mux_id);
+ if (!ep)
+ return -ENODEV;
- ep->mux_id = mux_id;
- priv->mux_id = mux_id;
+ if (rmnet_get_endpoint(port, mux_id)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "MUX ID already exists");
+ return -EINVAL;
+ }
+
+ hlist_del_init_rcu(&ep->hlnode);
+ hlist_add_head_rcu(&ep->hlnode,
+ &port->muxed_ep[mux_id]);
+
+ ep->mux_id = mux_id;
+ priv->mux_id = mux_id;
+ }
}
if (data[IFLA_RMNET_FLAGS]) {
@@ -418,13 +433,10 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
if (port->nr_rmnet_devs > 1)
return -EINVAL;
- if (port->rmnet_mode != RMNET_EPMODE_VND)
- return -EINVAL;
-
if (rmnet_is_real_dev_registered(slave_dev))
return -EBUSY;
- err = rmnet_register_real_device(slave_dev);
+ err = rmnet_register_real_device(slave_dev, extack);
if (err)
return -EBUSY;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index c9d43bad1e2f..9b1804a228d9 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -197,6 +197,11 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
dev = skb->dev;
port = rmnet_get_port_rcu(dev);
+ if (unlikely(!port)) {
+ atomic_long_inc(&skb->dev->rx_nohandler);
+ kfree_skb(skb);
+ goto done;
+ }
switch (port->rmnet_mode) {
case RMNET_EPMODE_VND:
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 807ef43a3cda..0c9e6cb0e341 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -229,7 +229,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
- { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
+ { PCI_DEVICE(PCI_VENDOR_ID_USR, 0x0116), 0, 0, RTL_CFG_0 },
{ PCI_VENDOR_ID_LINKSYS, 0x1032,
PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
{ 0x0001, 0x8168,
@@ -4111,6 +4111,27 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
rtl_unlock_work(tp);
}
+static void rtl_init_rxcfg(struct rtl8169_private *tp)
+{
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
+ case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17:
+ RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
+ break;
+ case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
+ case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36:
+ case RTL_GIGA_MAC_VER_38:
+ RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
+ break;
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
+ RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
+ break;
+ default:
+ RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST);
+ break;
+ }
+}
+
static int rtl_set_mac_address(struct net_device *dev, void *p)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -4128,6 +4149,10 @@ static int rtl_set_mac_address(struct net_device *dev, void *p)
pm_runtime_put_noidle(d);
+ /* Reportedly at least Asus X453MA truncates packets otherwise */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_37)
+ rtl_init_rxcfg(tp);
+
return 0;
}
@@ -4212,7 +4237,9 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
return;
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
+ case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30:
+ case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
case RTL_GIGA_MAC_VER_37:
case RTL_GIGA_MAC_VER_39:
case RTL_GIGA_MAC_VER_43:
@@ -4238,7 +4265,9 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
static void r8168_pll_power_up(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
+ case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30:
+ case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
case RTL_GIGA_MAC_VER_37:
case RTL_GIGA_MAC_VER_39:
case RTL_GIGA_MAC_VER_43:
@@ -4289,27 +4318,6 @@ static void rtl_pll_power_up(struct rtl8169_private *tp)
}
}
-static void rtl_init_rxcfg(struct rtl8169_private *tp)
-{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
- case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17:
- RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
- break;
- case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
- case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36:
- case RTL_GIGA_MAC_VER_38:
- RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
- break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
- RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
- break;
- default:
- RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST);
- break;
- }
-}
-
static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
{
tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0;
@@ -4359,7 +4367,7 @@ static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
{
- RTL_W8(tp, MaxTxPacketSize, 0x3f);
+ RTL_W8(tp, MaxTxPacketSize, 0x24);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_512B);
@@ -4367,7 +4375,7 @@ static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
{
- RTL_W8(tp, MaxTxPacketSize, 0x0c);
+ RTL_W8(tp, MaxTxPacketSize, 0x3f);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -6270,7 +6278,8 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
opts[1] |= transport_offset << TCPHO_SHIFT;
} else {
if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
- return !eth_skb_pad(skb);
+ /* eth_skb_pad would free the skb on error */
+ return !__skb_put_padto(skb, ETH_ZLEN, false);
}
return true;
@@ -6626,7 +6635,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
return IRQ_NONE;
rtl_irq_disable(tp);
- napi_schedule_irqoff(&tp->napi);
+ napi_schedule(&tp->napi);
return IRQ_HANDLED;
}
@@ -6826,7 +6835,7 @@ static int rtl8169_close(struct net_device *dev)
phy_disconnect(dev->phydev);
- pci_free_irq(pdev, 0, tp);
+ free_irq(pci_irq_vector(pdev, 0), tp);
dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
tp->RxPhyAddr);
@@ -6881,8 +6890,8 @@ static int rtl_open(struct net_device *dev)
rtl_request_firmware(tp);
- retval = pci_request_irq(pdev, 0, rtl8169_interrupt, NULL, tp,
- dev->name);
+ retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt,
+ IRQF_SHARED, dev->name, tp);
if (retval < 0)
goto err_release_fw_2;
@@ -6915,7 +6924,7 @@ out:
return retval;
err_free_irq:
- pci_free_irq(pdev, 0, tp);
+ free_irq(pci_irq_vector(pdev, 0), tp);
err_release_fw_2:
rtl_release_firmware(tp);
rtl8169_rx_clear(tp);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index faaf74073a12..c24b7ea37e39 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1445,6 +1445,7 @@ static void ravb_tx_timeout_work(struct work_struct *work)
struct ravb_private *priv = container_of(work, struct ravb_private,
work);
struct net_device *ndev = priv->ndev;
+ int error;
netif_tx_stop_all_queues(ndev);
@@ -1453,15 +1454,36 @@ static void ravb_tx_timeout_work(struct work_struct *work)
ravb_ptp_stop(ndev);
/* Wait for DMA stopping */
- ravb_stop_dma(ndev);
+ if (ravb_stop_dma(ndev)) {
+ /* If ravb_stop_dma() fails, the hardware is still operating
+ * for TX and/or RX. So, this should not call the following
+ * functions because ravb_dmac_init() is possible to fail too.
+ * Also, this should not retry ravb_stop_dma() again and again
+ * here because it's possible to wait forever. So, this just
+ * re-enables the TX and RX and skip the following
+ * re-initialization procedure.
+ */
+ ravb_rcv_snd_enable(ndev);
+ goto out;
+ }
ravb_ring_free(ndev, RAVB_BE);
ravb_ring_free(ndev, RAVB_NC);
/* Device init */
- ravb_dmac_init(ndev);
+ error = ravb_dmac_init(ndev);
+ if (error) {
+ /* If ravb_dmac_init() fails, descriptors are freed. So, this
+ * should return here to avoid re-enabling the TX and RX in
+ * ravb_emac_init().
+ */
+ netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
+ __func__, error);
+ return;
+ }
ravb_emac_init(ndev);
+out:
/* Initialise PTP Clock driver */
if (priv->chip_id == RCAR_GEN2)
ravb_ptp_init(ndev, priv->pdev);
@@ -1710,12 +1732,16 @@ static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
config.flags = 0;
config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
HWTSTAMP_TX_OFF;
- if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_V2_L2_EVENT)
+ switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) {
+ case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT:
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
- else if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_ALL)
+ break;
+ case RAVB_RXTSTAMP_TYPE_ALL:
config.rx_filter = HWTSTAMP_FILTER_ALL;
- else
+ break;
+ default:
config.rx_filter = HWTSTAMP_FILTER_NONE;
+ }
return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 441643670ac0..24638cb157ca 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -610,6 +610,8 @@ static struct sh_eth_cpu_data r7s72100_data = {
EESR_TDE,
.fdr_value = 0x0000070f,
+ .trscer_err_mask = DESC_I_RINT8 | DESC_I_RINT5,
+
.no_psr = 1,
.apr = 1,
.mpr = 1,
@@ -825,6 +827,8 @@ static struct sh_eth_cpu_data r7s9210_data = {
.fdr_value = 0x0000070f,
+ .trscer_err_mask = DESC_I_RINT8 | DESC_I_RINT5,
+
.apr = 1,
.mpr = 1,
.tpauser = 1,
@@ -1126,6 +1130,9 @@ static struct sh_eth_cpu_data sh771x_data = {
EESIPR_CEEFIP | EESIPR_CELFIP |
EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
EESIPR_PREIP | EESIPR_CERFIP,
+
+ .trscer_err_mask = DESC_I_RINT8,
+
.tsu = 1,
.dual_port = 1,
};
@@ -2620,10 +2627,10 @@ static int sh_eth_close(struct net_device *ndev)
/* Free all the skbuffs in the Rx queue and the DMA buffer. */
sh_eth_ring_free(ndev);
- pm_runtime_put_sync(&mdp->pdev->dev);
-
mdp->is_opened = 0;
+ pm_runtime_put(&mdp->pdev->dev);
+
return 0;
}
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index aeafdb9ac015..b13ab4eee4c7 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -651,10 +651,10 @@ static int rocker_dma_rings_init(struct rocker *rocker)
err_dma_event_ring_bufs_alloc:
rocker_dma_ring_destroy(rocker, &rocker->event_ring);
err_dma_event_ring_create:
+ rocker_dma_cmd_ring_waits_free(rocker);
+err_dma_cmd_ring_waits_alloc:
rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
PCI_DMA_BIDIRECTIONAL);
-err_dma_cmd_ring_waits_alloc:
- rocker_dma_cmd_ring_waits_free(rocker);
err_dma_cmd_ring_bufs_alloc:
rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
return err;
diff --git a/drivers/net/ethernet/sfc/falcon/farch.c b/drivers/net/ethernet/sfc/falcon/farch.c
index 411a2f419447..8a14f7716302 100644
--- a/drivers/net/ethernet/sfc/falcon/farch.c
+++ b/drivers/net/ethernet/sfc/falcon/farch.c
@@ -873,17 +873,12 @@ static u16 ef4_farch_handle_rx_not_ok(struct ef4_rx_queue *rx_queue,
{
struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue);
struct ef4_nic *efx = rx_queue->efx;
- bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
+ bool __maybe_unused rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
- bool rx_ev_other_err, rx_ev_pause_frm;
- bool rx_ev_hdr_type, rx_ev_mcast_pkt;
- unsigned rx_ev_pkt_type;
+ bool rx_ev_pause_frm;
- rx_ev_hdr_type = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
- rx_ev_mcast_pkt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
rx_ev_tobe_disc = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
- rx_ev_pkt_type = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
rx_ev_buf_owner_id_err = EF4_QWORD_FIELD(*event,
FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
rx_ev_ip_hdr_chksum_err = EF4_QWORD_FIELD(*event,
@@ -896,10 +891,6 @@ static u16 ef4_farch_handle_rx_not_ok(struct ef4_rx_queue *rx_queue,
0 : EF4_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
rx_ev_pause_frm = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
- /* Every error apart from tobe_disc and pause_frm */
- rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
- rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
- rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
/* Count errors that are not in MAC stats. Ignore expected
* checksum errors during self-test. */
@@ -919,6 +910,13 @@ static u16 ef4_farch_handle_rx_not_ok(struct ef4_rx_queue *rx_queue,
* to a FIFO overflow.
*/
#ifdef DEBUG
+ {
+ /* Every error apart from tobe_disc and pause_frm */
+
+ bool rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
+ rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
+ rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
+
if (rx_ev_other_err && net_ratelimit()) {
netif_dbg(efx, rx_err, efx->net_dev,
" RX queue %d unexpected RX event "
@@ -935,6 +933,7 @@ static u16 ef4_farch_handle_rx_not_ok(struct ef4_rx_queue *rx_queue,
rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
rx_ev_pause_frm ? " [PAUSE]" : "");
}
+ }
#endif
/* The frame must be discarded if any of these are true. */
@@ -1646,15 +1645,11 @@ void ef4_farch_rx_push_indir_table(struct ef4_nic *efx)
*/
void ef4_farch_dimension_resources(struct ef4_nic *efx, unsigned sram_lim_qw)
{
- unsigned vi_count, buftbl_min;
+ unsigned vi_count;
/* Account for the buffer table entries backing the datapath channels
* and the descriptor caches for those channels.
*/
- buftbl_min = ((efx->n_rx_channels * EF4_MAX_DMAQ_SIZE +
- efx->n_tx_channels * EF4_TXQ_TYPES * EF4_MAX_DMAQ_SIZE +
- efx->n_channels * EF4_MAX_EVQ_SIZE)
- * sizeof(ef4_qword_t) / EF4_BUF_SIZE);
vi_count = max(efx->n_channels, efx->n_tx_channels * EF4_TXQ_TYPES);
efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
@@ -2535,7 +2530,6 @@ int ef4_farch_filter_remove_safe(struct ef4_nic *efx,
enum ef4_farch_filter_table_id table_id;
struct ef4_farch_filter_table *table;
unsigned int filter_idx;
- struct ef4_farch_filter_spec *spec;
int rc;
table_id = ef4_farch_filter_id_table_id(filter_id);
@@ -2546,7 +2540,6 @@ int ef4_farch_filter_remove_safe(struct ef4_nic *efx,
filter_idx = ef4_farch_filter_id_index(filter_id);
if (filter_idx >= table->size)
return -ENOENT;
- spec = &table->spec[filter_idx];
spin_lock_bh(&efx->filter_lock);
rc = ef4_farch_filter_remove(efx, table, filter_idx, priority);
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index d5bcbc40a55f..823873030a21 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -783,10 +783,9 @@ static u16 sis900_default_phy(struct net_device * net_dev)
static void sis900_set_capability(struct net_device *net_dev, struct mii_phy *phy)
{
u16 cap;
- u16 status;
- status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
- status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
+ mdio_read(net_dev, phy->phy_addr, MII_STATUS);
+ mdio_read(net_dev, phy->phy_addr, MII_STATUS);
cap = MII_NWAY_CSMA_CD |
((phy->status & MII_STAT_CAN_TX_FDX)? MII_NWAY_TX_FDX:0) |
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 8d6cff8bd162..0f870ed5a9c8 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2287,7 +2287,7 @@ static int smc_drv_probe(struct platform_device *pdev)
ret = try_toggle_control_gpio(&pdev->dev, &lp->power_gpio,
"power", 0, 0, 100);
if (ret)
- return ret;
+ goto out_free_netdev;
/*
* Optional reset GPIO configured? Minimum 100 ns reset needed
@@ -2296,7 +2296,7 @@ static int smc_drv_probe(struct platform_device *pdev)
ret = try_toggle_control_gpio(&pdev->dev, &lp->reset_gpio,
"reset", 0, 0, 100);
if (ret)
- return ret;
+ goto out_free_netdev;
/*
* Need to wait for optional EEPROM to load, max 750 us according
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index ce4bfecc26c7..ae80a223975d 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2515,20 +2515,20 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
retval = smsc911x_init(dev);
if (retval < 0)
- goto out_disable_resources;
+ goto out_init_fail;
netif_carrier_off(dev);
retval = smsc911x_mii_init(pdev, dev);
if (retval) {
SMSC_WARN(pdata, probe, "Error %i initialising mii", retval);
- goto out_disable_resources;
+ goto out_init_fail;
}
retval = register_netdev(dev);
if (retval) {
SMSC_WARN(pdata, probe, "Error %i registering device", retval);
- goto out_disable_resources;
+ goto out_init_fail;
} else {
SMSC_TRACE(pdata, probe,
"Network interface: \"%s\"", dev->name);
@@ -2569,9 +2569,10 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
return 0;
-out_disable_resources:
+out_init_fail:
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+out_disable_resources:
(void)smsc911x_disable_resources(pdev);
out_enable_resources_fail:
smsc911x_free_resources(pdev);
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 027367b9cc48..3693a59b6d01 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -1386,14 +1386,17 @@ static int netsec_netdev_init(struct net_device *ndev)
goto err1;
/* set phy power down */
- data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR) |
- BMCR_PDOWN;
- netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data);
+ data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR);
+ netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR,
+ data | BMCR_PDOWN);
ret = netsec_reset_hardware(priv, true);
if (ret)
goto err2;
+ /* Restore phy power state */
+ netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data);
+
return 0;
err2:
netsec_free_dring(priv, NETSEC_RING_RX);
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index c309accc6797..01cde5f27ead 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1196,7 +1196,7 @@ static int ave_init(struct net_device *ndev)
ret = regmap_update_bits(priv->regmap, SG_ETPINMODE,
priv->pinmode_mask, priv->pinmode_val);
if (ret)
- return ret;
+ goto out_reset_assert;
ave_global_reset(ndev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 0d21082ceb93..0f56f8e33691 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -318,6 +318,19 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
/* Enable PTP clock */
regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val);
val |= NSS_COMMON_CLK_GATE_PTP_EN(gmac->id);
+ switch (gmac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val |= NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) |
+ NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id);
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ val |= NSS_COMMON_CLK_GATE_GMII_RX_EN(gmac->id) |
+ NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id);
+ break;
+ default:
+ /* We don't get here; the switch above will have errored out */
+ unreachable();
+ }
regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) {
@@ -337,6 +350,9 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
plat_dat->has_gmac = true;
plat_dat->bsp_priv = gmac;
plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
+ plat_dat->multicast_filter_bins = 0;
+ plat_dat->tx_fifo_size = 8192;
+ plat_dat->rx_fifo_size = 8192;
err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (err)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 0a17535f13ae..5020d5b28c6a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -35,7 +35,6 @@
#define PRG_ETH0_EXT_RMII_MODE 4
/* mux to choose between fclk_div2 (bit unset) and mpll2 (bit set) */
-#define PRG_ETH0_CLK_M250_SEL_SHIFT 4
#define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4)
#define PRG_ETH0_TXDLY_SHIFT 5
@@ -125,6 +124,7 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
{ .div = 5, .val = 5, },
{ .div = 6, .val = 6, },
{ .div = 7, .val = 7, },
+ { /* end of array */ }
};
clk_configs = devm_kzalloc(dev, sizeof(*clk_configs), GFP_KERNEL);
@@ -148,8 +148,9 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
}
clk_configs->m250_mux.reg = dwmac->regs + PRG_ETH0;
- clk_configs->m250_mux.shift = PRG_ETH0_CLK_M250_SEL_SHIFT;
- clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK;
+ clk_configs->m250_mux.shift = __ffs(PRG_ETH0_CLK_M250_SEL_MASK);
+ clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK >>
+ clk_configs->m250_mux.shift;
clk = meson8b_dwmac_register_clk(dwmac, "m250_sel", mux_parent_names,
MUX_CLK_NUM_PARENTS, &clk_mux_ops,
&clk_configs->m250_mux.hw);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 5b3b06a0a3bf..33407df6bea6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -274,16 +274,19 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
phymode == PHY_INTERFACE_MODE_MII ||
phymode == PHY_INTERFACE_MODE_GMII ||
phymode == PHY_INTERFACE_MODE_SGMII) {
- ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
&module);
module |= (SYSMGR_FPGAGRP_MODULE_EMAC << (reg_shift / 2));
regmap_write(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
module);
- } else {
- ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2));
}
+ if (dwmac->f2h_ptp_ref_clk)
+ ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
+ else
+ ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK <<
+ (reg_shift / 2));
+
regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
/* Deassert reset for the phy configuration to be sampled by
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index ef13a462c36d..4382deaeb570 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -73,6 +73,7 @@ struct emac_variant {
* @variant: reference to the current board variant
* @regmap: regmap for using the syscon
* @internal_phy_powered: Does the internal PHY is enabled
+ * @use_internal_phy: Is the internal PHY selected for use
* @mux_handle: Internal pointer used by mdio-mux lib
*/
struct sunxi_priv_data {
@@ -83,6 +84,7 @@ struct sunxi_priv_data {
const struct emac_variant *variant;
struct regmap_field *regmap_field;
bool internal_phy_powered;
+ bool use_internal_phy;
void *mux_handle;
};
@@ -518,8 +520,11 @@ static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = {
.dma_interrupt = sun8i_dwmac_dma_interrupt,
};
+static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv);
+
static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
{
+ struct net_device *ndev = platform_get_drvdata(pdev);
struct sunxi_priv_data *gmac = priv;
int ret;
@@ -533,13 +538,25 @@ static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
ret = clk_prepare_enable(gmac->tx_clk);
if (ret) {
- if (gmac->regulator)
- regulator_disable(gmac->regulator);
dev_err(&pdev->dev, "Could not enable AHB clock\n");
- return ret;
+ goto err_disable_regulator;
+ }
+
+ if (gmac->use_internal_phy) {
+ ret = sun8i_dwmac_power_internal_phy(netdev_priv(ndev));
+ if (ret)
+ goto err_disable_clk;
}
return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(gmac->tx_clk);
+err_disable_regulator:
+ if (gmac->regulator)
+ regulator_disable(gmac->regulator);
+
+ return ret;
}
static void sun8i_dwmac_core_init(struct mac_device_info *hw,
@@ -809,7 +826,6 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
u32 reg, val;
int ret = 0;
- bool need_power_ephy = false;
if (current_child ^ desired_child) {
regmap_field_read(gmac->regmap_field, &reg);
@@ -817,13 +833,12 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
case DWMAC_SUN8I_MDIO_MUX_INTERNAL_ID:
dev_info(priv->device, "Switch mux to internal PHY");
val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SELECT;
-
- need_power_ephy = true;
+ gmac->use_internal_phy = true;
break;
case DWMAC_SUN8I_MDIO_MUX_EXTERNAL_ID:
dev_info(priv->device, "Switch mux to external PHY");
val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SHUTDOWN;
- need_power_ephy = false;
+ gmac->use_internal_phy = false;
break;
default:
dev_err(priv->device, "Invalid child ID %x\n",
@@ -831,7 +846,7 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
return -EINVAL;
}
regmap_field_write(gmac->regmap_field, val);
- if (need_power_ephy) {
+ if (gmac->use_internal_phy) {
ret = sun8i_dwmac_power_internal_phy(priv);
if (ret)
return ret;
@@ -977,17 +992,12 @@ static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
struct sunxi_priv_data *gmac = priv;
if (gmac->variant->soc_has_internal_phy) {
- /* sun8i_dwmac_exit could be called with mdiomux uninit */
- if (gmac->mux_handle)
- mdio_mux_uninit(gmac->mux_handle);
if (gmac->internal_phy_powered)
sun8i_dwmac_unpower_internal_phy(gmac);
}
sun8i_dwmac_unset_syscon(gmac);
- reset_control_put(gmac->rst_ephy);
-
clk_disable_unprepare(gmac->tx_clk);
if (gmac->regulator)
@@ -1169,6 +1179,8 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
plat_dat->init = sun8i_dwmac_init;
plat_dat->exit = sun8i_dwmac_exit;
plat_dat->setup = sun8i_dwmac_setup;
+ plat_dat->tx_fifo_size = 4096;
+ plat_dat->rx_fifo_size = 16384;
ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv);
if (ret)
@@ -1200,12 +1212,32 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
return ret;
dwmac_mux:
+ reset_control_put(gmac->rst_ephy);
+ clk_put(gmac->ephy_clk);
sun8i_dwmac_unset_syscon(gmac);
dwmac_exit:
stmmac_pltfr_remove(pdev);
return ret;
}
+static int sun8i_dwmac_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
+
+ if (gmac->variant->soc_has_internal_phy) {
+ mdio_mux_uninit(gmac->mux_handle);
+ sun8i_dwmac_unpower_internal_phy(gmac);
+ reset_control_put(gmac->rst_ephy);
+ clk_put(gmac->ephy_clk);
+ }
+
+ stmmac_pltfr_remove(pdev);
+
+ return 0;
+}
+
static const struct of_device_id sun8i_dwmac_match[] = {
{ .compatible = "allwinner,sun8i-h3-emac",
.data = &emac_variant_h3 },
@@ -1223,7 +1255,7 @@ MODULE_DEVICE_TABLE(of, sun8i_dwmac_match);
static struct platform_driver sun8i_dwmac_driver = {
.probe = sun8i_dwmac_probe,
- .remove = stmmac_pltfr_remove,
+ .remove = sun8i_dwmac_remove,
.driver = {
.name = "dwmac-sun8i",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index fc1fa0f9f338..f31067659e95 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -39,7 +39,7 @@ struct sunxi_priv_data {
static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
{
struct sunxi_priv_data *gmac = priv;
- int ret;
+ int ret = 0;
if (gmac->regulator) {
ret = regulator_enable(gmac->regulator);
@@ -60,11 +60,11 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
} else {
clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
ret = clk_prepare(gmac->tx_clk);
- if (ret)
- return ret;
+ if (ret && gmac->regulator)
+ regulator_disable(gmac->regulator);
}
- return 0;
+ return ret;
}
static void sun7i_gmac_exit(struct platform_device *pdev, void *priv)
@@ -155,6 +155,8 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
plat_dat->init = sun7i_gmac_init;
plat_dat->exit = sun7i_gmac_exit;
plat_dat->fix_mac_speed = sun7i_fix_speed;
+ plat_dat->tx_fifo_size = 4096;
+ plat_dat->rx_fifo_size = 16384;
ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv);
if (ret)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index e4e9a7591efe..4d617ba11ecb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -176,6 +176,9 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
value = GMAC_FRAME_FILTER_PR;
} else if (dev->flags & IFF_ALLMULTI) {
value = GMAC_FRAME_FILTER_PM; /* pass all multi */
+ } else if (!netdev_mc_empty(dev) && (mcbitslog2 == 0)) {
+ /* Fall back to all multicast if we've no filter */
+ value = GMAC_FRAME_FILTER_PM;
} else if (!netdev_mc_empty(dev)) {
struct netdev_hw_addr *ha;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index edb6053bd980..232efe17ac2c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -119,6 +119,23 @@ static void dwmac4_dma_init_channel(void __iomem *ioaddr,
ioaddr + DMA_CHAN_INTR_ENA(chan));
}
+static void dwmac410_dma_init_channel(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg, u32 chan)
+{
+ u32 value;
+
+ /* common channel control register config */
+ value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
+ if (dma_cfg->pblx8)
+ value = value | DMA_BUS_MODE_PBL;
+
+ writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
+
+ /* Mask interrupts by writing to CSR7 */
+ writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10,
+ ioaddr + DMA_CHAN_INTR_ENA(chan));
+}
+
static void dwmac4_dma_init(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg, int atds)
{
@@ -197,7 +214,7 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
u32 channel, int fifosz, u8 qmode)
{
unsigned int rqs = fifosz / 256 - 1;
- u32 mtl_rx_op, mtl_rx_int;
+ u32 mtl_rx_op;
mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
@@ -268,11 +285,6 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
}
writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
-
- /* Enable MTL RX overflow */
- mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
- writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
- ioaddr + MTL_CHAN_INT_CTRL(channel));
}
static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
@@ -461,7 +473,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = {
const struct stmmac_dma_ops dwmac410_dma_ops = {
.reset = dwmac4_dma_reset,
.init = dwmac4_dma_init,
- .init_chan = dwmac4_dma_init_channel,
+ .init_chan = dwmac410_dma_init_channel,
.init_rx_chan = dwmac4_dma_init_rx_chan,
.init_tx_chan = dwmac4_dma_init_tx_chan,
.axi = dwmac4_dma_axi,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index 49f5687879df..32461909264a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -63,10 +63,6 @@ void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan)
value &= ~DMA_CONTROL_ST;
writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
-
- value = readl(ioaddr + GMAC_CONFIG);
- value &= ~GMAC_CONFIG_TE;
- writel(value, ioaddr + GMAC_CONFIG);
}
void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 4d5fb4b51cc4..5986fe927ad0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -694,23 +694,16 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
- if (!edata->eee_enabled) {
+ if (!priv->dma_cap.eee)
+ return -EOPNOTSUPP;
+
+ if (!edata->eee_enabled)
stmmac_disable_eee_mode(priv);
- } else {
- /* We are asking for enabling the EEE but it is safe
- * to verify all by invoking the eee_init function.
- * In case of failure it will return an error.
- */
- edata->eee_enabled = stmmac_eee_init(priv);
- if (!edata->eee_enabled)
- return -EOPNOTSUPP;
- }
ret = phy_ethtool_set_eee(dev->phydev, edata);
if (ret)
return ret;
- priv->eee_enabled = edata->eee_enabled;
priv->tx_lpi_timer = edata->tx_lpi_timer;
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 7423262ce590..e1fbd7c81bfa 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -36,12 +36,16 @@ static void config_sub_second_increment(void __iomem *ioaddr,
unsigned long data;
u32 reg_value;
- /* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second
- * formula = (1/ptp_clock) * 1000000000
- * where ptp_clock is 50MHz if fine method is used to update system
+ /* For GMAC3.x, 4.x versions, in "fine adjustement mode" set sub-second
+ * increment to twice the number of nanoseconds of a clock cycle.
+ * The calculation of the default_addend value by the caller will set it
+ * to mid-range = 2^31 when the remainder of this division is zero,
+ * which will make the accumulator overflow once every 2 ptp_clock
+ * cycles, adding twice the number of nanoseconds of a clock cycle :
+ * 2000000000ULL / ptp_clock.
*/
if (value & PTP_TCR_TSCFUPDT)
- data = (1000000000ULL / 50000000);
+ data = (2000000000ULL / ptp_clock);
else
data = (1000000000ULL / ptp_clock);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 9c7b1d8e8220..af59761ddfa0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -177,32 +177,6 @@ static void stmmac_enable_all_queues(struct stmmac_priv *priv)
}
}
-/**
- * stmmac_stop_all_queues - Stop all queues
- * @priv: driver private structure
- */
-static void stmmac_stop_all_queues(struct stmmac_priv *priv)
-{
- u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
- u32 queue;
-
- for (queue = 0; queue < tx_queues_cnt; queue++)
- netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
-}
-
-/**
- * stmmac_start_all_queues - Start all queues
- * @priv: driver private structure
- */
-static void stmmac_start_all_queues(struct stmmac_priv *priv)
-{
- u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
- u32 queue;
-
- for (queue = 0; queue < tx_queues_cnt; queue++)
- netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
-}
-
static void stmmac_service_event_schedule(struct stmmac_priv *priv)
{
if (!test_bit(STMMAC_DOWN, &priv->state) &&
@@ -1455,6 +1429,19 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
}
/**
+ * stmmac_free_tx_skbufs - free TX skb buffers
+ * @priv: private structure
+ */
+static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
+{
+ u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ for (queue = 0; queue < tx_queue_cnt; queue++)
+ dma_free_tx_skbufs(priv, queue);
+}
+
+/**
* free_dma_rx_desc_resources - free RX dma desc resources
* @priv: private structure
*/
@@ -2678,7 +2665,7 @@ static int stmmac_open(struct net_device *dev)
}
stmmac_enable_all_queues(priv);
- stmmac_start_all_queues(priv);
+ netif_tx_start_all_queues(priv->dev);
return 0;
@@ -2715,17 +2702,12 @@ static int stmmac_release(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
u32 chan;
- if (priv->eee_enabled)
- del_timer_sync(&priv->eee_ctrl_timer);
-
/* Stop and disconnect the PHY */
if (dev->phydev) {
phy_stop(dev->phydev);
phy_disconnect(dev->phydev);
}
- stmmac_stop_all_queues(priv);
-
stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
@@ -2738,6 +2720,11 @@ static int stmmac_release(struct net_device *dev)
if (priv->lpi_irq > 0)
free_irq(priv->lpi_irq, dev);
+ if (priv->eee_enabled) {
+ priv->tx_path_in_lpi_mode = false;
+ del_timer_sync(&priv->eee_ctrl_timer);
+ }
+
/* Stop TX/RX DMA and clear the descriptors */
stmmac_stop_all_dma(priv);
@@ -3609,6 +3596,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
{
struct stmmac_priv *priv = netdev_priv(dev);
int txfifosz = priv->plat->tx_fifo_size;
+ const int mtu = new_mtu;
if (txfifosz == 0)
txfifosz = priv->dma_cap.tx_fifo_size;
@@ -3626,7 +3614,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
return -EINVAL;
- dev->mtu = new_mtu;
+ dev->mtu = mtu;
netdev_update_features(dev);
@@ -3684,7 +3672,7 @@ static int stmmac_set_features(struct net_device *netdev,
/**
* stmmac_interrupt - main ISR
* @irq: interrupt number.
- * @dev_id: to pass the net device pointer.
+ * @dev_id: to pass the net device pointer (must be valid).
* Description: this is the main driver interrupt service routine.
* It can call:
* o DMA service routine (to manage incoming frame reception and transmission
@@ -3708,11 +3696,6 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
if (priv->irq_wake)
pm_wakeup_event(priv->device, 0);
- if (unlikely(!dev)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
@@ -3723,7 +3706,6 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
/* To handle GMAC own interrupts */
if ((priv->plat->has_gmac) || xmac) {
int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
- int mtl_status;
if (unlikely(status)) {
/* For LPI we need to save the tx status */
@@ -3734,17 +3716,8 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
}
for (queue = 0; queue < queues_count; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
-
- mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
- queue);
- if (mtl_status != -EINVAL)
- status |= mtl_status;
-
- if (status & CORE_IRQ_MTL_RX_OVERFLOW)
- stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
- rx_q->rx_tail_addr,
- queue);
+ status = stmmac_host_mtl_irq_status(priv, priv->hw,
+ queue);
}
/* PCS link status */
@@ -4524,13 +4497,17 @@ int stmmac_suspend(struct device *dev)
mutex_lock(&priv->lock);
netif_device_detach(ndev);
- stmmac_stop_all_queues(priv);
stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
del_timer_sync(&priv->tx_queue[chan].txtimer);
+ if (priv->eee_enabled) {
+ priv->tx_path_in_lpi_mode = false;
+ del_timer_sync(&priv->eee_ctrl_timer);
+ }
+
/* Stop TX/RX DMA */
stmmac_stop_all_dma(priv);
@@ -4579,6 +4556,8 @@ static void stmmac_reset_queues_param(struct stmmac_priv *priv)
tx_q->cur_tx = 0;
tx_q->dirty_tx = 0;
tx_q->mss = 0;
+
+ netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
}
}
@@ -4625,6 +4604,7 @@ int stmmac_resume(struct device *dev)
stmmac_reset_queues_param(priv);
+ stmmac_free_tx_skbufs(priv);
stmmac_clear_descriptors(priv);
stmmac_hw_setup(ndev, false);
@@ -4633,8 +4613,6 @@ int stmmac_resume(struct device *dev)
stmmac_enable_all_queues(priv);
- stmmac_start_all_queues(priv);
-
mutex_unlock(&priv->lock);
if (ndev->phydev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 37c0bc699cd9..cc1895a32b9d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -314,7 +314,12 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
} else if (!qopt->enable) {
- return stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_DCB);
+ ret = stmmac_dma_qmode(priv, priv->ioaddr, queue,
+ MTL_QUEUE_DCB);
+ if (ret)
+ return ret;
+
+ priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
}
/* Port Transmit Rate and Speed Divider */
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 7ec4eb74fe21..d323dd9daccb 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -4971,7 +4971,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
cas_cacheline_size)) {
dev_err(&pdev->dev, "Could not set PCI cache "
"line size\n");
- goto err_write_cacheline;
+ goto err_out_free_res;
}
}
#endif
@@ -5144,7 +5144,6 @@ err_out_iounmap:
err_out_free_res:
pci_release_regions(pdev);
-err_write_cacheline:
/* Try to restore it in case the error occurred after we
* set it.
*/
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index d84501441edd..5894edf79d65 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3933,8 +3933,6 @@ static void niu_xmac_interrupt(struct niu *np)
mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT;
if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
- if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
- mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP)
mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT;
if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP)
@@ -8147,10 +8145,10 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
"VPD_SCAN: Reading in property [%s] len[%d]\n",
namebuf, prop_len);
for (i = 0; i < prop_len; i++) {
- err = niu_pci_eeprom_read(np, off + i);
- if (err >= 0)
- *prop_buf = err;
- ++prop_buf;
+ err = niu_pci_eeprom_read(np, off + i);
+ if (err < 0)
+ return err;
+ *prop_buf++ = err;
}
}
@@ -8161,14 +8159,14 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
}
/* ESPC_PIO_EN_ENABLE must be set */
-static void niu_pci_vpd_fetch(struct niu *np, u32 start)
+static int niu_pci_vpd_fetch(struct niu *np, u32 start)
{
u32 offset;
int err;
err = niu_pci_eeprom_read16_swp(np, start + 1);
if (err < 0)
- return;
+ return err;
offset = err + 3;
@@ -8177,12 +8175,14 @@ static void niu_pci_vpd_fetch(struct niu *np, u32 start)
u32 end;
err = niu_pci_eeprom_read(np, here);
+ if (err < 0)
+ return err;
if (err != 0x90)
- return;
+ return -EINVAL;
err = niu_pci_eeprom_read16_swp(np, here + 1);
if (err < 0)
- return;
+ return err;
here = start + offset + 3;
end = start + offset + err;
@@ -8190,9 +8190,12 @@ static void niu_pci_vpd_fetch(struct niu *np, u32 start)
offset += err;
err = niu_pci_vpd_scan_props(np, here, end);
- if (err < 0 || err == 1)
- return;
+ if (err < 0)
+ return err;
+ if (err == 1)
+ return -EINVAL;
}
+ return 0;
}
/* ESPC_PIO_EN_ENABLE must be set */
@@ -9283,8 +9286,11 @@ static int niu_get_invariants(struct niu *np)
offset = niu_pci_vpd_offset(np);
netif_printk(np, probe, KERN_DEBUG, np->dev,
"%s() VPD offset [%08x]\n", __func__, offset);
- if (offset)
- niu_pci_vpd_fetch(np, offset);
+ if (offset) {
+ err = niu_pci_vpd_fetch(np, offset);
+ if (err < 0)
+ return err;
+ }
nw64(ESPC_PIO_EN, 0);
if (np->flags & NIU_FLAGS_VPD_VALID) {
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index baa3088b475c..59fc14b58c83 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -1353,27 +1353,12 @@ sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
if (vio_version_after_eq(&port->vio, 1, 3))
localmtu -= VLAN_HLEN;
- if (skb->protocol == htons(ETH_P_IP)) {
- struct flowi4 fl4;
- struct rtable *rt = NULL;
-
- memset(&fl4, 0, sizeof(fl4));
- fl4.flowi4_oif = dev->ifindex;
- fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
- fl4.daddr = ip_hdr(skb)->daddr;
- fl4.saddr = ip_hdr(skb)->saddr;
-
- rt = ip_route_output_key(dev_net(dev), &fl4);
- if (!IS_ERR(rt)) {
- skb_dst_set(skb, &rt->dst);
- icmp_send(skb, ICMP_DEST_UNREACH,
- ICMP_FRAG_NEEDED,
- htonl(localmtu));
- }
- }
+ if (skb->protocol == htons(ETH_P_IP))
+ icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+ htonl(localmtu));
#if IS_ENABLED(CONFIG_IPV6)
else if (skb->protocol == htons(ETH_P_IPV6))
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu);
+ icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu);
#endif
goto out_dropped;
}
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
index eb1c6b03c329..df26cea45904 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
@@ -513,7 +513,7 @@ void xlgmac_get_all_hw_features(struct xlgmac_pdata *pdata)
void xlgmac_print_all_hw_features(struct xlgmac_pdata *pdata)
{
- char *str = NULL;
+ char __maybe_unused *str = NULL;
XLGMAC_PR("\n");
XLGMAC_PR("=====================================================\n");
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index dc966ddb6d81..358f911fcd9d 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -2056,6 +2056,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/*bdx_hw_reset(priv); */
if (bdx_read_mac(priv)) {
pr_err("load MAC address failed\n");
+ err = -EFAULT;
goto err_out_iomap;
}
SET_NETDEV_DEV(ndev, &pdev->dev);
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index d7543811dfae..10b301e79086 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -476,6 +476,7 @@ void cpts_unregister(struct cpts *cpts)
ptp_clock_unregister(cpts->clock);
cpts->clock = NULL;
+ cpts->phc_index = -1;
cpts_write32(cpts, 0, int_enable);
cpts_write32(cpts, 0, control);
@@ -577,6 +578,7 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
cpts->cc.read = cpts_systim_read;
cpts->cc.mask = CLOCKSOURCE_MASK(32);
cpts->info = cpts_info;
+ cpts->phc_index = -1;
cpts_calc_mult_shift(cpts);
/* save cc.mult original value as it can be modified
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index f270beebb428..56130cf293f3 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -183,11 +183,11 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
/* EMAC mac_status register */
#define EMAC_MACSTATUS_TXERRCODE_MASK (0xF00000)
#define EMAC_MACSTATUS_TXERRCODE_SHIFT (20)
-#define EMAC_MACSTATUS_TXERRCH_MASK (0x7)
+#define EMAC_MACSTATUS_TXERRCH_MASK (0x70000)
#define EMAC_MACSTATUS_TXERRCH_SHIFT (16)
#define EMAC_MACSTATUS_RXERRCODE_MASK (0xF000)
#define EMAC_MACSTATUS_RXERRCODE_SHIFT (12)
-#define EMAC_MACSTATUS_RXERRCH_MASK (0x7)
+#define EMAC_MACSTATUS_RXERRCH_MASK (0x700)
#define EMAC_MACSTATUS_RXERRCH_SHIFT (8)
/* EMAC RX register masks */
@@ -1240,7 +1240,7 @@ static int emac_poll(struct napi_struct *napi, int budget)
struct net_device *ndev = priv->ndev;
struct device *emac_dev = &ndev->dev;
u32 status = 0;
- u32 num_tx_pkts = 0, num_rx_pkts = 0;
+ u32 num_rx_pkts = 0;
/* Check interrupt vectors and call packet processing */
status = emac_read(EMAC_MACINVECTOR);
@@ -1251,8 +1251,7 @@ static int emac_poll(struct napi_struct *napi, int budget)
mask = EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC;
if (status & mask) {
- num_tx_pkts = cpdma_chan_process(priv->txchan,
- EMAC_DEF_TX_MAX_SERVICE);
+ cpdma_chan_process(priv->txchan, EMAC_DEF_TX_MAX_SERVICE);
} /* TX processing */
mask = EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC;
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index a1d335a3c5e4..6099865217f2 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1364,9 +1364,9 @@ int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
tx_pipe->dma_queue = knav_queue_open(name, tx_pipe->dma_queue_id,
KNAV_QUEUE_SHARED);
if (IS_ERR(tx_pipe->dma_queue)) {
+ ret = PTR_ERR(tx_pipe->dma_queue);
dev_err(dev, "Could not open DMA queue for channel \"%s\": %d\n",
name, ret);
- ret = PTR_ERR(tx_pipe->dma_queue);
goto err;
}
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 93d142867c2a..01f99e5df145 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -671,7 +671,6 @@ module_exit(tlan_exit);
static void __init tlan_eisa_probe(void)
{
long ioaddr;
- int rc = -ENODEV;
int irq;
u16 device_id;
@@ -736,8 +735,7 @@ static void __init tlan_eisa_probe(void)
/* Setup the newly found eisa adapter */
- rc = tlan_probe1(NULL, ioaddr, irq,
- 12, NULL);
+ tlan_probe1(NULL, ioaddr, irq, 12, NULL);
continue;
out:
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 23417266b7ec..e66014e0427f 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -296,8 +296,8 @@ spider_net_free_chain(struct spider_net_card *card,
descr = descr->next;
} while (descr != chain->ring);
- dma_free_coherent(&card->pdev->dev, chain->num_desc,
- chain->hwring, chain->dma_addr);
+ dma_free_coherent(&card->pdev->dev, chain->num_desc * sizeof(struct spider_net_hw_descr),
+ chain->hwring, chain->dma_addr);
}
/**
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index ef9538ee53d0..f0c33d00dcb4 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -875,26 +875,13 @@ static u32 check_connection_type(struct mac_regs __iomem *regs)
*/
static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
{
- u32 curr_status;
struct mac_regs __iomem *regs = vptr->mac_regs;
vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
- curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL);
/* Set mii link status */
set_mii_flow_control(vptr);
- /*
- Check if new status is consistent with current status
- if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) ||
- (mii_status==curr_status)) {
- vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
- vptr->mii_status=check_connection_type(vptr->mac_regs);
- VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n");
- return 0;
- }
- */
-
if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
diff --git a/drivers/net/fddi/Kconfig b/drivers/net/fddi/Kconfig
index 3a424c864f4d..ecebeeb9b2a0 100644
--- a/drivers/net/fddi/Kconfig
+++ b/drivers/net/fddi/Kconfig
@@ -28,17 +28,20 @@ config DEFXX
config DEFXX_MMIO
bool
- prompt "Use MMIO instead of PIO" if PCI || EISA
+ prompt "Use MMIO instead of IOP" if PCI || EISA
depends on DEFXX
- default n if PCI || EISA
+ default n if EISA
default y
---help---
This instructs the driver to use EISA or PCI memory-mapped I/O
- (MMIO) as appropriate instead of programmed I/O ports (PIO).
+ (MMIO) as appropriate instead of programmed I/O ports (IOP).
Enabling this gives an improvement in processing time in parts
- of the driver, but it may cause problems with EISA (DEFEA)
- adapters. TURBOchannel does not have the concept of I/O ports,
- so MMIO is always used for these (DEFTA) adapters.
+ of the driver, but it requires a memory window to be configured
+ for EISA (DEFEA) adapters that may not always be available.
+ Conversely some PCIe host bridges do not support IOP, so MMIO
+ may be required to access PCI (DEFPA) adapters on downstream PCI
+ buses with some systems. TURBOchannel does not have the concept
+ of I/O ports, so MMIO is always used for these (DEFTA) adapters.
If unsure, say N.
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 61fceee73c1b..3b48c890540a 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -495,6 +495,25 @@ static const struct net_device_ops dfx_netdev_ops = {
.ndo_set_mac_address = dfx_ctl_set_mac_address,
};
+static void dfx_register_res_alloc_err(const char *print_name, bool mmio,
+ bool eisa)
+{
+ pr_err("%s: Cannot use %s, no address set, aborting\n",
+ print_name, mmio ? "MMIO" : "I/O");
+ pr_err("%s: Recompile driver with \"CONFIG_DEFXX_MMIO=%c\"\n",
+ print_name, mmio ? 'n' : 'y');
+ if (eisa && mmio)
+ pr_err("%s: Or run ECU and set adapter's MMIO location\n",
+ print_name);
+}
+
+static void dfx_register_res_err(const char *print_name, bool mmio,
+ unsigned long start, unsigned long len)
+{
+ pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, aborting\n",
+ print_name, mmio ? "MMIO" : "I/O", len, start);
+}
+
/*
* ================
* = dfx_register =
@@ -568,15 +587,12 @@ static int dfx_register(struct device *bdev)
dev_set_drvdata(bdev, dev);
dfx_get_bars(bdev, bar_start, bar_len);
- if (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0) {
- pr_err("%s: Cannot use MMIO, no address set, aborting\n",
- print_name);
- pr_err("%s: Run ECU and set adapter's MMIO location\n",
- print_name);
- pr_err("%s: Or recompile driver with \"CONFIG_DEFXX_MMIO=n\""
- "\n", print_name);
+ if (bar_len[0] == 0 ||
+ (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0)) {
+ dfx_register_res_alloc_err(print_name, dfx_use_mmio,
+ dfx_bus_eisa);
err = -ENXIO;
- goto err_out;
+ goto err_out_disable;
}
if (dfx_use_mmio)
@@ -585,18 +601,16 @@ static int dfx_register(struct device *bdev)
else
region = request_region(bar_start[0], bar_len[0], print_name);
if (!region) {
- pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, "
- "aborting\n", dfx_use_mmio ? "MMIO" : "I/O", print_name,
- (long)bar_len[0], (long)bar_start[0]);
+ dfx_register_res_err(print_name, dfx_use_mmio,
+ bar_start[0], bar_len[0]);
err = -EBUSY;
goto err_out_disable;
}
if (bar_start[1] != 0) {
region = request_region(bar_start[1], bar_len[1], print_name);
if (!region) {
- pr_err("%s: Cannot reserve I/O resource "
- "0x%lx @ 0x%lx, aborting\n", print_name,
- (long)bar_len[1], (long)bar_start[1]);
+ dfx_register_res_err(print_name, 0,
+ bar_start[1], bar_len[1]);
err = -EBUSY;
goto err_out_csr_region;
}
@@ -604,9 +618,8 @@ static int dfx_register(struct device *bdev)
if (bar_start[2] != 0) {
region = request_region(bar_start[2], bar_len[2], print_name);
if (!region) {
- pr_err("%s: Cannot reserve I/O resource "
- "0x%lx @ 0x%lx, aborting\n", print_name,
- (long)bar_len[2], (long)bar_start[2]);
+ dfx_register_res_err(print_name, 0,
+ bar_start[2], bar_len[2]);
err = -EBUSY;
goto err_out_bh_region;
}
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index ff83408733d4..8c458c8f57a3 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -223,8 +223,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
if (ip_tunnel_collect_metadata() || gs->collect_md) {
__be16 flags;
- flags = TUNNEL_KEY | TUNNEL_GENEVE_OPT |
- (gnvh->oam ? TUNNEL_OAM : 0) |
+ flags = TUNNEL_KEY | (gnvh->oam ? TUNNEL_OAM : 0) |
(gnvh->critical ? TUNNEL_CRIT_OPT : 0);
tun_dst = udp_tun_rx_dst(skb, geneve_get_sk_family(gs), flags,
@@ -721,7 +720,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
struct net_device *dev,
struct geneve_sock *gs4,
struct flowi4 *fl4,
- const struct ip_tunnel_info *info)
+ const struct ip_tunnel_info *info,
+ __be16 dport, __be16 sport)
{
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct geneve_dev *geneve = netdev_priv(dev);
@@ -737,6 +737,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
fl4->flowi4_proto = IPPROTO_UDP;
fl4->daddr = info->key.u.ipv4.dst;
fl4->saddr = info->key.u.ipv4.src;
+ fl4->fl4_dport = dport;
+ fl4->fl4_sport = sport;
tos = info->key.tos;
if ((tos == 1) && !geneve->collect_md) {
@@ -771,7 +773,8 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
struct net_device *dev,
struct geneve_sock *gs6,
struct flowi6 *fl6,
- const struct ip_tunnel_info *info)
+ const struct ip_tunnel_info *info,
+ __be16 dport, __be16 sport)
{
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct geneve_dev *geneve = netdev_priv(dev);
@@ -787,6 +790,9 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
fl6->flowi6_proto = IPPROTO_UDP;
fl6->daddr = info->key.u.ipv6.dst;
fl6->saddr = info->key.u.ipv6.src;
+ fl6->fl6_dport = dport;
+ fl6->fl6_sport = sport;
+
prio = info->key.tos;
if ((prio == 1) && !geneve->collect_md) {
prio = ip_tunnel_get_dsfield(ip_hdr(skb), skb);
@@ -801,7 +807,9 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
if (dst)
return dst;
}
- if (ipv6_stub->ipv6_dst_lookup(geneve->net, gs6->sock->sk, &dst, fl6)) {
+ dst = ipv6_stub->ipv6_dst_lookup_flow(geneve->net, gs6->sock->sk, fl6,
+ NULL);
+ if (IS_ERR(dst)) {
netdev_dbg(dev, "no route to %pI6\n", &fl6->daddr);
return ERR_PTR(-ENETUNREACH);
}
@@ -831,14 +839,18 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 df;
int err;
- rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info);
+ if (!pskb_inet_may_pull(skb))
+ return -EINVAL;
+
+ sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+ rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
+ geneve->info.key.tp_dst, sport);
if (IS_ERR(rt))
return PTR_ERR(rt);
skb_tunnel_check_pmtu(skb, &rt->dst,
GENEVE_IPV4_HLEN + info->options_len);
- sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
if (geneve->collect_md) {
tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
ttl = key->ttl;
@@ -873,13 +885,17 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info);
+ if (!pskb_inet_may_pull(skb))
+ return -EINVAL;
+
+ sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+ dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info,
+ geneve->info.key.tp_dst, sport);
if (IS_ERR(dst))
return PTR_ERR(dst);
skb_tunnel_check_pmtu(skb, dst, GENEVE_IPV6_HLEN + info->options_len);
- sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
if (geneve->collect_md) {
prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
ttl = key->ttl;
@@ -909,9 +925,10 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
if (geneve->collect_md) {
info = skb_tunnel_info(skb);
if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
- err = -EINVAL;
netdev_dbg(dev, "no tunnel metadata\n");
- goto tx_error;
+ dev_kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
}
} else {
info = &geneve->info;
@@ -928,7 +945,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
if (likely(!err))
return NETDEV_TX_OK;
-tx_error:
+
dev_kfree_skb(skb);
if (err == -ELOOP)
@@ -955,13 +972,18 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
{
struct ip_tunnel_info *info = skb_tunnel_info(skb);
struct geneve_dev *geneve = netdev_priv(dev);
+ __be16 sport;
if (ip_tunnel_info_af(info) == AF_INET) {
struct rtable *rt;
struct flowi4 fl4;
+
struct geneve_sock *gs4 = rcu_dereference(geneve->sock4);
+ sport = udp_flow_src_port(geneve->net, skb,
+ 1, USHRT_MAX, true);
- rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info);
+ rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
+ geneve->info.key.tp_dst, sport);
if (IS_ERR(rt))
return PTR_ERR(rt);
@@ -971,9 +993,13 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
} else if (ip_tunnel_info_af(info) == AF_INET6) {
struct dst_entry *dst;
struct flowi6 fl6;
+
struct geneve_sock *gs6 = rcu_dereference(geneve->sock6);
+ sport = udp_flow_src_port(geneve->net, skb,
+ 1, USHRT_MAX, true);
- dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info);
+ dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info,
+ geneve->info.key.tp_dst, sport);
if (IS_ERR(dst))
return PTR_ERR(dst);
@@ -984,8 +1010,7 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
return -EINVAL;
}
- info->key.tp_src = udp_flow_src_port(geneve->net, skb,
- 1, USHRT_MAX, true);
+ info->key.tp_src = sport;
info->key.tp_dst = geneve->info.key.tp_dst;
return 0;
}
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index eab9984f73a8..e18d06cb2173 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -549,9 +549,8 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
mtu < ntohs(iph->tot_len)) {
netdev_dbg(dev, "packet too big, fragmentation needed\n");
- memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
- htonl(mtu));
+ icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+ htonl(mtu));
goto err_rt;
}
@@ -667,10 +666,6 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
gtp = netdev_priv(dev);
- err = gtp_encap_enable(gtp, data);
- if (err < 0)
- return err;
-
if (!data[IFLA_GTP_PDP_HASHSIZE]) {
hashsize = 1024;
} else {
@@ -681,12 +676,16 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
err = gtp_hashtable_new(gtp, hashsize);
if (err < 0)
- goto out_encap;
+ return err;
+
+ err = gtp_encap_enable(gtp, data);
+ if (err < 0)
+ goto out_hashtable;
err = register_netdevice(dev);
if (err < 0) {
netdev_dbg(dev, "failed to register new netdev %d\n", err);
- goto out_hashtable;
+ goto out_encap;
}
gn = net_generic(dev_net(dev), gtp_net_id);
@@ -697,11 +696,11 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
return 0;
+out_encap:
+ gtp_encap_disable(gtp);
out_hashtable:
kfree(gtp->addr_hash);
kfree(gtp->tid_hash);
-out_encap:
- gtp_encap_disable(gtp);
return err;
}
@@ -1177,16 +1176,17 @@ out_unlock:
static struct genl_family gtp_genl_family;
static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
- u32 type, struct pdp_ctx *pctx)
+ int flags, u32 type, struct pdp_ctx *pctx)
{
void *genlh;
- genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, 0,
+ genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, flags,
type);
if (genlh == NULL)
goto nlmsg_failure;
if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) ||
+ nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex) ||
nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) ||
nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr))
goto nla_put_failure;
@@ -1235,8 +1235,8 @@ static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
goto err_unlock;
}
- err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid,
- info->snd_seq, info->nlhdr->nlmsg_type, pctx);
+ err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, info->snd_seq,
+ 0, info->nlhdr->nlmsg_type, pctx);
if (err < 0)
goto err_unlock_free;
@@ -1279,6 +1279,7 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb,
gtp_genl_fill_info(skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
+ NLM_F_MULTI,
cb->nlh->nlmsg_type, pctx)) {
cb->args[0] = i;
cb->args[1] = j;
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index ba9df430fca6..fdab49872587 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1148,6 +1148,7 @@ static int __init yam_init_driver(void)
err = register_netdev(dev);
if (err) {
printk(KERN_WARNING "yam: cannot register net device %s\n", dev->name);
+ free_netdev(dev);
goto error;
}
yam_devs[i] = dev;
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 029206e4da3b..2a8c33abb363 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -1248,7 +1248,7 @@ static int rr_open(struct net_device *dev)
rrpriv->info = NULL;
}
if (rrpriv->rx_ctrl) {
- pci_free_consistent(pdev, sizeof(struct ring_ctrl),
+ pci_free_consistent(pdev, 256 * sizeof(struct ring_ctrl),
rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
rrpriv->rx_ctrl = NULL;
}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index bdb55db4523b..2dff0e110c6f 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -378,7 +378,7 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
}
rcu_read_unlock();
- while (unlikely(txq >= ndev->real_num_tx_queues))
+ while (txq >= ndev->real_num_tx_queues)
txq -= ndev->real_num_tx_queues;
return txq;
@@ -513,7 +513,7 @@ static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
int rc;
skb->dev = vf_netdev;
- skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
+ skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
rc = dev_queue_xmit(skb);
if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
@@ -543,12 +543,13 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
u32 hash;
struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
- /* if VF is present and up then redirect packets
- * already called with rcu_read_lock_bh
+ /* If VF is present and up then redirect packets to it.
+ * Skip the VF if it is marked down or has no carrier.
+ * If netpoll is in uses, then VF can not be used either.
*/
vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev);
if (vf_netdev && netif_running(vf_netdev) &&
- !netpoll_tx_running(net))
+ netif_carrier_ok(vf_netdev) && !netpoll_tx_running(net))
return netvsc_vf_xmit(net, vf_netdev, skb);
/* We will atmost need two pages to describe the rndis
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index cd6b95e673a5..a686926bba71 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -883,7 +883,9 @@ static int adf7242_rx(struct adf7242_local *lp)
int ret;
u8 lqi, len_u8, *data;
- adf7242_read_reg(lp, 0, &len_u8);
+ ret = adf7242_read_reg(lp, 0, &len_u8);
+ if (ret)
+ return ret;
len = len_u8;
@@ -1270,7 +1272,7 @@ static int adf7242_probe(struct spi_device *spi)
WQ_MEM_RECLAIM);
if (unlikely(!lp->wqueue)) {
ret = -ENOMEM;
- goto err_hw_init;
+ goto err_alloc_wq;
}
ret = adf7242_hw_init(lp);
@@ -1302,6 +1304,8 @@ static int adf7242_probe(struct spi_device *spi)
return ret;
err_hw_init:
+ destroy_workqueue(lp->wqueue);
+err_alloc_wq:
mutex_destroy(&lp->bmux);
ieee802154_free_hw(lp->hw);
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
index 078027bbe002..a2b876abb03b 100644
--- a/drivers/net/ieee802154/atusb.c
+++ b/drivers/net/ieee802154/atusb.c
@@ -368,6 +368,7 @@ static int atusb_alloc_urbs(struct atusb *atusb, int n)
return -ENOMEM;
}
usb_anchor_urb(urb, &atusb->idle_urbs);
+ usb_free_urb(urb);
n--;
}
return 0;
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 38a41651e451..deace0aadad2 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -2923,6 +2923,7 @@ static int ca8210_dev_com_init(struct ca8210_priv *priv)
);
if (!priv->irq_workqueue) {
dev_crit(&priv->spi->dev, "alloc of irq_workqueue failed!\n");
+ destroy_workqueue(priv->mlme_workqueue);
return -ENOMEM;
}
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 87f605a33c37..9fa3c0bd6ec7 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -177,12 +177,21 @@ static void ipvlan_port_destroy(struct net_device *dev)
kfree(port);
}
+#define IPVLAN_ALWAYS_ON_OFLOADS \
+ (NETIF_F_SG | NETIF_F_HW_CSUM | \
+ NETIF_F_GSO_ROBUST | NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL)
+
+#define IPVLAN_ALWAYS_ON \
+ (IPVLAN_ALWAYS_ON_OFLOADS | NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED)
+
#define IPVLAN_FEATURES \
- (NETIF_F_SG | NETIF_F_CSUM_MASK | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
+ (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
NETIF_F_GSO | NETIF_F_TSO | NETIF_F_GSO_ROBUST | \
NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
+ /* NETIF_F_GSO_ENCAP_ALL NETIF_F_GSO_SOFTWARE Newly added */
+
#define IPVLAN_STATE_MASK \
((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
@@ -196,7 +205,9 @@ static int ipvlan_init(struct net_device *dev)
dev->state = (dev->state & ~IPVLAN_STATE_MASK) |
(phy_dev->state & IPVLAN_STATE_MASK);
dev->features = phy_dev->features & IPVLAN_FEATURES;
- dev->features |= NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED;
+ dev->features |= IPVLAN_ALWAYS_ON;
+ dev->vlan_features = phy_dev->vlan_features & IPVLAN_FEATURES;
+ dev->vlan_features |= IPVLAN_ALWAYS_ON_OFLOADS;
dev->gso_max_size = phy_dev->gso_max_size;
dev->gso_max_segs = phy_dev->gso_max_segs;
dev->hard_header_len = phy_dev->hard_header_len;
@@ -297,7 +308,14 @@ static netdev_features_t ipvlan_fix_features(struct net_device *dev,
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
- return features & (ipvlan->sfeatures | ~IPVLAN_FEATURES);
+ features |= NETIF_F_ALL_FOR_ALL;
+ features &= (ipvlan->sfeatures | ~IPVLAN_FEATURES);
+ features = netdev_increment_features(ipvlan->phy_dev->features,
+ features, features);
+ features |= IPVLAN_ALWAYS_ON;
+ features &= (IPVLAN_FEATURES | IPVLAN_ALWAYS_ON);
+
+ return features;
}
static void ipvlan_change_rx_flags(struct net_device *dev, int change)
@@ -802,10 +820,9 @@ static int ipvlan_device_event(struct notifier_block *unused,
case NETDEV_FEAT_CHANGE:
list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
- ipvlan->dev->features = dev->features & IPVLAN_FEATURES;
ipvlan->dev->gso_max_size = dev->gso_max_size;
ipvlan->dev->gso_max_segs = dev->gso_max_segs;
- netdev_features_change(ipvlan->dev);
+ netdev_update_features(ipvlan->dev);
}
break;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index df7d6de7c59c..4c5b67a2d63a 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -1085,6 +1085,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
struct macsec_rx_sa *rx_sa;
struct macsec_rxh_data *rxd;
struct macsec_dev *macsec;
+ unsigned int len;
sci_t sci;
u32 pn;
bool cbit;
@@ -1240,9 +1241,10 @@ deliver:
macsec_rxsc_put(rx_sc);
skb_orphan(skb);
+ len = skb->len;
ret = gro_cells_receive(&macsec->gro_cells, skb);
if (ret == NET_RX_SUCCESS)
- count_rx(dev, skb->len);
+ count_rx(dev, len);
else
macsec->secy.netdev->stats.rx_dropped++;
@@ -1313,7 +1315,8 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
struct crypto_aead *tfm;
int ret;
- tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
+ /* Pick a sync gcm(aes) cipher to ensure order is preserved. */
+ tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
return tfm;
@@ -3238,11 +3241,11 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
struct netlink_ext_ack *extack)
{
struct macsec_dev *macsec = macsec_priv(dev);
+ rx_handler_func_t *rx_handler;
+ u8 icv_len = DEFAULT_ICV_LEN;
struct net_device *real_dev;
- int err;
+ int err, mtu;
sci_t sci;
- u8 icv_len = DEFAULT_ICV_LEN;
- rx_handler_func_t *rx_handler;
if (!tb[IFLA_LINK])
return -EINVAL;
@@ -3258,7 +3261,11 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
if (data && data[IFLA_MACSEC_ICV_LEN])
icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
- dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
+ mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
+ if (mtu < 0)
+ dev->mtu = 0;
+ else
+ dev->mtu = mtu;
rx_handler = rtnl_dereference(real_dev->rx_handler);
if (rx_handler && rx_handler != macsec_handle_frame)
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index a8f338dc0dfa..e226a96da3a3 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -451,6 +451,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
int ret;
rx_handler_result_t handle_res;
+ /* Packets from dev_loopback_xmit() do not have L2 header, bail out */
+ if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
+ return RX_HANDLER_PASS;
+
port = macvlan_port_get_rcu(skb->dev);
if (is_multicast_ether_addr(eth->h_dest)) {
unsigned int hash;
@@ -1226,6 +1230,9 @@ static void macvlan_port_destroy(struct net_device *dev)
static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
+ struct nlattr *nla, *head;
+ int rem, len;
+
if (tb[IFLA_ADDRESS]) {
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
return -EINVAL;
@@ -1273,6 +1280,20 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[],
return -EADDRNOTAVAIL;
}
+ if (data[IFLA_MACVLAN_MACADDR_DATA]) {
+ head = nla_data(data[IFLA_MACVLAN_MACADDR_DATA]);
+ len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]);
+
+ nla_for_each_attr(nla, head, len, rem) {
+ if (nla_type(nla) != IFLA_MACVLAN_MACADDR ||
+ nla_len(nla) != ETH_ALEN)
+ return -EINVAL;
+
+ if (!is_valid_ether_addr(nla_data(nla)))
+ return -EADDRNOTAVAIL;
+ }
+ }
+
if (data[IFLA_MACVLAN_MACADDR_COUNT])
return -EINVAL;
@@ -1329,10 +1350,6 @@ static int macvlan_changelink_sources(struct macvlan_dev *vlan, u32 mode,
len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]);
nla_for_each_attr(nla, head, len, rem) {
- if (nla_type(nla) != IFLA_MACVLAN_MACADDR ||
- nla_len(nla) != ETH_ALEN)
- continue;
-
addr = nla_data(nla);
ret = macvlan_hash_add_source(vlan, addr);
if (ret)
@@ -1676,7 +1693,7 @@ static int macvlan_device_event(struct notifier_block *unused,
struct macvlan_dev,
list);
- if (macvlan_sync_address(vlan->dev, dev->dev_addr))
+ if (vlan && macvlan_sync_address(vlan->dev, dev->dev_addr))
return NOTIFY_BAD;
break;
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index beeb7eb76ca3..57273188b71e 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -62,7 +62,8 @@ static int net_failover_open(struct net_device *dev)
return 0;
err_standby_open:
- dev_close(primary_dev);
+ if (primary_dev)
+ dev_close(primary_dev);
err_primary_open:
netif_tx_disable(dev);
return err;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 1f5fd24cd749..2386871e1294 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -154,6 +154,7 @@ config MDIO_THUNDER
depends on 64BIT
depends on PCI
select MDIO_CAVIUM
+ select MDIO_DEVRES
help
This driver supports the MDIO interfaces found on Cavium
ThunderX SoCs when the MDIO bus device appears as a PCI
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index e10e7b54ec4b..7e5892597533 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -198,7 +198,7 @@ EXPORT_SYMBOL_GPL(bcm_phy_enable_apd);
int bcm_phy_set_eee(struct phy_device *phydev, bool enable)
{
- int val;
+ int val, mask = 0;
/* Enable EEE at PHY level */
val = phy_read_mmd(phydev, MDIO_MMD_AN, BRCM_CL45VEN_EEE_CONTROL);
@@ -217,10 +217,15 @@ int bcm_phy_set_eee(struct phy_device *phydev, bool enable)
if (val < 0)
return val;
+ if (phydev->supported & SUPPORTED_1000baseT_Full)
+ mask |= MDIO_EEE_1000T;
+ if (phydev->supported & SUPPORTED_100baseT_Full)
+ mask |= MDIO_EEE_100TX;
+
if (enable)
- val |= (MDIO_EEE_100TX | MDIO_EEE_1000T);
+ val |= mask;
else
- val &= ~(MDIO_EEE_100TX | MDIO_EEE_1000T);
+ val &= ~mask;
phy_write_mmd(phydev, MDIO_MMD_AN, BCM_CL45VEN_EEE_ADV, (u32)val);
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 59b3f1fbabd4..dd0a658c843d 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1114,7 +1114,7 @@ static struct dp83640_clock *dp83640_clock_get_bus(struct mii_bus *bus)
goto out;
}
dp83640_clock_init(clock, bus);
- list_add_tail(&phyter_clocks, &clock->list);
+ list_add_tail(&clock->list, &phyter_clocks);
out:
mutex_unlock(&phyter_clocks_lock);
@@ -1343,6 +1343,7 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
dp83640->hwts_rx_en = 1;
dp83640->layer = PTP_CLASS_L4;
dp83640->version = PTP_CLASS_V1;
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
@@ -1350,6 +1351,7 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
dp83640->hwts_rx_en = 1;
dp83640->layer = PTP_CLASS_L4;
dp83640->version = PTP_CLASS_V2;
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
@@ -1357,6 +1359,7 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
dp83640->hwts_rx_en = 1;
dp83640->layer = PTP_CLASS_L2;
dp83640->version = PTP_CLASS_V2;
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
@@ -1364,6 +1367,7 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
dp83640->hwts_rx_en = 1;
dp83640->layer = PTP_CLASS_L4 | PTP_CLASS_L2;
dp83640->version = PTP_CLASS_V2;
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
default:
return -ERANGE;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index bb6107f3b947..832a401c5fa5 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -2329,9 +2329,30 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
},
{
- .phy_id = MARVELL_PHY_ID_88E6390,
+ .phy_id = MARVELL_PHY_ID_88E6341_FAMILY,
.phy_id_mask = MARVELL_PHY_ID_MASK,
- .name = "Marvell 88E6390",
+ .name = "Marvell 88E6341 Family",
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .probe = m88e1510_probe,
+ .config_init = &marvell_config_init,
+ .config_aneg = &m88e6390_config_aneg,
+ .read_status = &marvell_read_status,
+ .ack_interrupt = &marvell_ack_interrupt,
+ .config_intr = &marvell_config_intr,
+ .did_interrupt = &m88e1121_did_interrupt,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
+ .get_sset_count = marvell_get_sset_count,
+ .get_strings = marvell_get_strings,
+ .get_stats = marvell_get_stats,
+ },
+ {
+ .phy_id = MARVELL_PHY_ID_88E6390_FAMILY,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .name = "Marvell 88E6390 Family",
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = m88e6390_probe,
@@ -2368,7 +2389,8 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
{ MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1545, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK },
- { MARVELL_PHY_ID_88E6390, MARVELL_PHY_ID_MASK },
+ { MARVELL_PHY_ID_88E6341_FAMILY, MARVELL_PHY_ID_MASK },
+ { MARVELL_PHY_ID_88E6390_FAMILY, MARVELL_PHY_ID_MASK },
{ }
};
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index ab6914f8bd50..1da104150f44 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -75,7 +75,6 @@ static int octeon_mdiobus_probe(struct platform_device *pdev)
return 0;
fail_register:
- mdiobus_free(bus->mii_bus);
smi_en.u64 = 0;
oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
return err;
@@ -89,7 +88,6 @@ static int octeon_mdiobus_remove(struct platform_device *pdev)
bus = platform_get_drvdata(pdev);
mdiobus_unregister(bus->mii_bus);
- mdiobus_free(bus->mii_bus);
smi_en.u64 = 0;
oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
return 0;
diff --git a/drivers/net/phy/mdio-thunder.c b/drivers/net/phy/mdio-thunder.c
index 564616968cad..c0c922eff760 100644
--- a/drivers/net/phy/mdio-thunder.c
+++ b/drivers/net/phy/mdio-thunder.c
@@ -129,7 +129,6 @@ static void thunder_mdiobus_pci_remove(struct pci_dev *pdev)
continue;
mdiobus_unregister(bus->mii_bus);
- mdiobus_free(bus->mii_bus);
oct_mdio_writeq(0, bus->register_base + SMI_EN);
}
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1ee25877c4d1..dd4bf4265a5e 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -335,7 +335,10 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev,
phydev->autoneg = autoneg;
- phydev->speed = speed;
+ if (autoneg == AUTONEG_DISABLE) {
+ phydev->speed = speed;
+ phydev->duplex = duplex;
+ }
phydev->advertising = advertising;
@@ -344,8 +347,6 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev,
else
phydev->advertising &= ~ADVERTISED_Autoneg;
- phydev->duplex = duplex;
-
phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
/* Restart the PHY */
@@ -1302,9 +1303,11 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
/* Restart autonegotiation so the new modes get sent to the
* link partner.
*/
- ret = phy_restart_aneg(phydev);
- if (ret < 0)
- return ret;
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ ret = phy_restart_aneg(phydev);
+ if (ret < 0)
+ return ret;
+ }
}
return 0;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 302d183beb9e..b884b681d5c5 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -606,8 +606,10 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
/* Grab the bits from PHYIR2, and put them in the lower half */
phy_reg = mdiobus_read(bus, addr, MII_PHYSID2);
- if (phy_reg < 0)
- return -EIO;
+ if (phy_reg < 0) {
+ /* returning -ENODEV doesn't stop bus scanning */
+ return (phy_reg == -EIO || phy_reg == -ENODEV) ? -ENODEV : -EIO;
+ }
*phy_id |= (phy_reg & 0xffff);
@@ -1152,7 +1154,8 @@ void phy_detach(struct phy_device *phydev)
phy_led_triggers_unregister(phydev);
- module_put(phydev->mdio.dev.driver->owner);
+ if (phydev->mdio.dev.driver)
+ module_put(phydev->mdio.dev.driver->owner);
/* If the device had no specific driver before (i.e. - it
* was using the generic driver), we unbind the device
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index fef701bfad62..1fe7783c2871 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -8,6 +8,12 @@
#include "sfp.h"
+struct sfp_quirk {
+ const char *vendor;
+ const char *part;
+ void (*modes)(const struct sfp_eeprom_id *id, unsigned long *modes);
+};
+
/**
* struct sfp_bus - internal representation of a sfp bus
*/
@@ -20,6 +26,7 @@ struct sfp_bus {
const struct sfp_socket_ops *socket_ops;
struct device *sfp_dev;
struct sfp *sfp;
+ const struct sfp_quirk *sfp_quirk;
const struct sfp_upstream_ops *upstream_ops;
void *upstream;
@@ -30,6 +37,71 @@ struct sfp_bus {
bool started;
};
+static void sfp_quirk_2500basex(const struct sfp_eeprom_id *id,
+ unsigned long *modes)
+{
+ phylink_set(modes, 2500baseX_Full);
+}
+
+static const struct sfp_quirk sfp_quirks[] = {
+ {
+ // Alcatel Lucent G-010S-P can operate at 2500base-X, but
+ // incorrectly report 2500MBd NRZ in their EEPROM
+ .vendor = "ALCATELLUCENT",
+ .part = "G010SP",
+ .modes = sfp_quirk_2500basex,
+ }, {
+ // Alcatel Lucent G-010S-A can operate at 2500base-X, but
+ // report 3.2GBd NRZ in their EEPROM
+ .vendor = "ALCATELLUCENT",
+ .part = "3FE46541AA",
+ .modes = sfp_quirk_2500basex,
+ }, {
+ // Huawei MA5671A can operate at 2500base-X, but report 1.2GBd
+ // NRZ in their EEPROM
+ .vendor = "HUAWEI",
+ .part = "MA5671A",
+ .modes = sfp_quirk_2500basex,
+ },
+};
+
+static size_t sfp_strlen(const char *str, size_t maxlen)
+{
+ size_t size, i;
+
+ /* Trailing characters should be filled with space chars */
+ for (i = 0, size = 0; i < maxlen; i++)
+ if (str[i] != ' ')
+ size = i + 1;
+
+ return size;
+}
+
+static bool sfp_match(const char *qs, const char *str, size_t len)
+{
+ if (!qs)
+ return true;
+ if (strlen(qs) != len)
+ return false;
+ return !strncmp(qs, str, len);
+}
+
+static const struct sfp_quirk *sfp_lookup_quirk(const struct sfp_eeprom_id *id)
+{
+ const struct sfp_quirk *q;
+ unsigned int i;
+ size_t vs, ps;
+
+ vs = sfp_strlen(id->base.vendor_name, ARRAY_SIZE(id->base.vendor_name));
+ ps = sfp_strlen(id->base.vendor_pn, ARRAY_SIZE(id->base.vendor_pn));
+
+ for (i = 0, q = sfp_quirks; i < ARRAY_SIZE(sfp_quirks); i++, q++)
+ if (sfp_match(q->vendor, id->base.vendor_name, vs) &&
+ sfp_match(q->part, id->base.vendor_pn, ps))
+ return q;
+
+ return NULL;
+}
/**
* sfp_parse_port() - Parse the EEPROM base ID, setting the port type
* @bus: a pointer to the &struct sfp_bus structure for the sfp module
@@ -233,6 +305,9 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
phylink_set(modes, 1000baseX_Full);
}
+ if (bus->sfp_quirk)
+ bus->sfp_quirk->modes(id, modes);
+
bitmap_or(support, support, modes, __ETHTOOL_LINK_MODE_MASK_NBITS);
phylink_set(support, Autoneg);
@@ -556,6 +631,8 @@ int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id)
const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus);
int ret = 0;
+ bus->sfp_quirk = sfp_lookup_quirk(id);
+
if (ops && ops->module_insert)
ret = ops->module_insert(bus->upstream, id);
@@ -569,6 +646,8 @@ void sfp_module_remove(struct sfp_bus *bus)
if (ops && ops->module_remove)
ops->module_remove(bus->upstream);
+
+ bus->sfp_quirk = NULL;
}
EXPORT_SYMBOL_GPL(sfp_module_remove);
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 998d08ae7431..47d518e6d5d4 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -1886,7 +1886,8 @@ static int sfp_probe(struct platform_device *pdev)
continue;
irq = gpiod_to_irq(sfp->gpio[i]);
- if (!irq) {
+ if (irq < 0) {
+ irq = 0;
poll = true;
continue;
}
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index c04f3dc17d76..b8342162f3a0 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -497,6 +497,9 @@ static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev,
if (!skb)
goto out;
+ if (skb->pkt_type != PACKET_HOST)
+ goto abort;
+
if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
goto abort;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 95524c06e64c..8a1e9dba1249 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -294,7 +294,7 @@ inst_rollback:
for (i--; i >= 0; i--)
__team_option_inst_del_option(team, dst_opts[i]);
- i = option_count - 1;
+ i = option_count;
alloc_rollback:
for (i--; i >= 0; i--)
kfree(dst_opts[i]);
@@ -475,6 +475,9 @@ static const struct team_mode *team_mode_get(const char *kind)
struct team_mode_item *mitem;
const struct team_mode *mode = NULL;
+ if (!try_module_get(THIS_MODULE))
+ return NULL;
+
spin_lock(&mode_list_lock);
mitem = __find_mode(kind);
if (!mitem) {
@@ -490,6 +493,7 @@ static const struct team_mode *team_mode_get(const char *kind)
}
spin_unlock(&mode_list_lock);
+ module_put(THIS_MODULE);
return mode;
}
@@ -994,7 +998,8 @@ static void __team_compute_features(struct team *team)
unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
IFF_XMIT_DST_RELEASE_PERM;
- list_for_each_entry(port, &team->port_list, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &team->port_list, list) {
vlan_features = netdev_increment_features(vlan_features,
port->dev->vlan_features,
TEAM_VLAN_FEATURES);
@@ -1008,6 +1013,7 @@ static void __team_compute_features(struct team *team)
if (port->dev->hard_header_len > max_hard_header_len)
max_hard_header_len = port->dev->hard_header_len;
}
+ rcu_read_unlock();
team->dev->vlan_features = vlan_features;
team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
@@ -1023,9 +1029,7 @@ static void __team_compute_features(struct team *team)
static void team_compute_features(struct team *team)
{
- mutex_lock(&team->lock);
__team_compute_features(team);
- mutex_unlock(&team->lock);
netdev_change_features(team->dev);
}
@@ -2082,6 +2086,7 @@ static void team_setup_by_port(struct net_device *dev,
dev->header_ops = port_dev->header_ops;
dev->type = port_dev->type;
dev->hard_header_len = port_dev->hard_header_len;
+ dev->needed_headroom = port_dev->needed_headroom;
dev->addr_len = port_dev->addr_len;
dev->mtu = port_dev->mtu;
memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 09c444d3b496..8ee2c519c9bf 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -77,6 +77,14 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/mutex.h>
+#include <linux/ieee802154.h>
+#include <linux/if_ltalk.h>
+#include <uapi/linux/if_fddi.h>
+#include <uapi/linux/if_hippi.h>
+#include <uapi/linux/if_fc.h>
+#include <net/ax25.h>
+#include <net/rose.h>
+#include <net/6lowpan.h>
#include <linux/uaccess.h>
#include <linux/proc_fs.h>
@@ -1450,7 +1458,7 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
int i;
if (it->nr_segs > MAX_SKB_FRAGS + 1)
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(-EMSGSIZE);
local_bh_disable();
skb = napi_get_frags(&tfile->napi);
@@ -1870,8 +1878,11 @@ drop:
skb->dev = tun->dev;
break;
case IFF_TAP:
- if (!frags)
- skb->protocol = eth_type_trans(skb, tun->dev);
+ if (frags && !pskb_may_pull(skb, ETH_HLEN)) {
+ err = -ENOMEM;
+ goto drop;
+ }
+ skb->protocol = eth_type_trans(skb, tun->dev);
break;
}
@@ -1927,8 +1938,11 @@ drop:
}
if (frags) {
+ u32 headlen;
+
/* Exercise flow dissector code path. */
- u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb));
+ skb_push(skb, ETH_HLEN);
+ headlen = eth_get_headlen(skb->data, skb_headlen(skb));
if (unlikely(headlen > skb_headlen(skb))) {
this_cpu_inc(tun->pcpu_stats->rx_dropped);
@@ -1982,12 +1996,15 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct tun_file *tfile = file->private_data;
struct tun_struct *tun = tun_get(tfile);
ssize_t result;
+ int noblock = 0;
if (!tun)
return -EBADFD;
- result = tun_get_user(tun, tfile, NULL, from,
- file->f_flags & O_NONBLOCK, false);
+ if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
+ noblock = 1;
+
+ result = tun_get_user(tun, tfile, NULL, from, noblock, false);
tun_put(tun);
return result;
@@ -2208,10 +2225,15 @@ static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
struct tun_file *tfile = file->private_data;
struct tun_struct *tun = tun_get(tfile);
ssize_t len = iov_iter_count(to), ret;
+ int noblock = 0;
if (!tun)
return -EBADFD;
- ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL);
+
+ if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
+ noblock = 1;
+
+ ret = tun_do_read(tun, tfile, to, noblock, NULL);
ret = min_t(ssize_t, ret, len);
if (ret > 0)
iocb->ki_pos = ret;
@@ -2850,6 +2872,45 @@ static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog **prog_p,
return __tun_set_ebpf(tun, prog_p, prog);
}
+/* Return correct value for tun->dev->addr_len based on tun->dev->type. */
+static unsigned char tun_get_addr_len(unsigned short type)
+{
+ switch (type) {
+ case ARPHRD_IP6GRE:
+ case ARPHRD_TUNNEL6:
+ return sizeof(struct in6_addr);
+ case ARPHRD_IPGRE:
+ case ARPHRD_TUNNEL:
+ case ARPHRD_SIT:
+ return 4;
+ case ARPHRD_ETHER:
+ return ETH_ALEN;
+ case ARPHRD_IEEE802154:
+ case ARPHRD_IEEE802154_MONITOR:
+ return IEEE802154_EXTENDED_ADDR_LEN;
+ case ARPHRD_PHONET_PIPE:
+ case ARPHRD_PPP:
+ case ARPHRD_NONE:
+ return 0;
+ case ARPHRD_6LOWPAN:
+ return EUI64_ADDR_LEN;
+ case ARPHRD_FDDI:
+ return FDDI_K_ALEN;
+ case ARPHRD_HIPPI:
+ return HIPPI_ALEN;
+ case ARPHRD_IEEE802:
+ return FC_ALEN;
+ case ARPHRD_ROSE:
+ return ROSE_ADDR_LEN;
+ case ARPHRD_NETROM:
+ return AX25_ADDR_LEN;
+ case ARPHRD_LOCALTLK:
+ return LTALK_ALEN;
+ default:
+ return 0;
+ }
+}
+
static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
unsigned long arg, int ifreq_len)
{
@@ -3004,6 +3065,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
ret = -EBUSY;
} else {
tun->dev->type = (int) arg;
+ tun->dev->addr_len = tun_get_addr_len(tun->dev->type);
tun_debug(KERN_INFO, tun, "linktype set to %d\n",
tun->dev->type);
ret = 0;
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 023b8d0bf175..8d27786acad9 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -309,7 +309,7 @@ int asix_read_phy_addr(struct usbnet *dev, int internal)
netdev_dbg(dev->net, "asix_get_phy_addr()\n");
- if (ret < 0) {
+ if (ret < 2) {
netdev_err(dev->net, "Error reading PHYID register: %02x\n", ret);
goto out;
}
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index 914cac55a7ae..909755ef71ac 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -210,6 +210,7 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0);
if (ret < ETH_ALEN) {
netdev_err(dev->net, "Failed to read MAC address: %d\n", ret);
+ ret = -EIO;
goto free;
}
memcpy(dev->net->dev_addr, buf, ETH_ALEN);
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 2207f7a7d1ff..b2434b479846 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -307,12 +307,12 @@ static int ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
int ret;
if (2 == size) {
- u16 buf;
+ u16 buf = 0;
ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0);
le16_to_cpus(&buf);
*((u16 *)data) = buf;
} else if (4 == size) {
- u32 buf;
+ u32 buf = 0;
ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0);
le32_to_cpus(&buf);
*((u32 *)data) = buf;
@@ -1400,10 +1400,10 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
}
if (pkt_cnt == 0) {
- /* Skip IP alignment psudo header */
- skb_pull(skb, 2);
skb->len = pkt_len;
- skb_set_tail_pointer(skb, pkt_len);
+ /* Skip IP alignment pseudo header */
+ skb_pull(skb, 2);
+ skb_set_tail_pointer(skb, skb->len);
skb->truesize = pkt_len + sizeof(struct sk_buff);
ax88179_rx_checksum(skb, pkt_hdr);
return 1;
@@ -1412,8 +1412,9 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
ax_skb = skb_clone(skb, GFP_ATOMIC);
if (ax_skb) {
ax_skb->len = pkt_len;
- ax_skb->data = skb->data + 2;
- skb_set_tail_pointer(ax_skb, pkt_len);
+ /* Skip IP alignment pseudo header */
+ skb_pull(ax_skb, 2);
+ skb_set_tail_pointer(ax_skb, ax_skb->len);
ax_skb->truesize = pkt_len + sizeof(struct sk_buff);
ax88179_rx_checksum(ax_skb, pkt_hdr);
usbnet_skb_return(dev, ax_skb);
@@ -1734,6 +1735,7 @@ static const struct driver_info belkin_info = {
.status = ax88179_status,
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
+ .stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index 78b16eb9e58c..f448e484a341 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -400,6 +400,8 @@ static int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *i
err = register_netdev(dev);
if (err) {
+ /* Set disconnected flag so that disconnect() returns early. */
+ pnd->disconnected = 1;
usb_driver_release_interface(&usbpn_driver, data_intf);
goto out;
}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index c3cf9ae6d1df..529c8fac1531 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -800,6 +800,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* Lenovo Powered USB-C Travel Hub (4X90S92381, based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x721e, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* ThinkPad USB-C Dock Gen 2 (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0xa387, USB_CLASS_COMM,
@@ -821,14 +828,21 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
-/* Microsoft Surface 3 dock (based on Realtek RTL8153) */
+/* Microsoft Surface Ethernet Adapter (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07c6, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = 0,
},
- /* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
+/* Microsoft Surface Ethernet Adapter (based on Realtek RTL8153B) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x0927, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
+/* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, 0x0601, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 1f57a6a2b8a2..faca70c3647d 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1127,7 +1127,10 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
* accordingly. Otherwise, we should check here.
*/
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
- delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus);
+ delayed_ndp_size = ctx->max_ndp_size +
+ max_t(u32,
+ ctx->tx_ndp_modulus,
+ ctx->tx_modulus + ctx->tx_remainder) - 1;
else
delayed_ndp_size = 0;
@@ -1308,7 +1311,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
skb_out->len > ctx->min_tx_pkt) {
padding_count = ctx->tx_curr_size - skb_out->len;
- skb_put_zero(skb_out, padding_count);
+ if (!WARN_ON(padding_count > ctx->tx_curr_size))
+ skb_put_zero(skb_out, padding_count);
} else if (skb_out->len < ctx->tx_curr_size &&
(skb_out->len % dev->maxpacket) == 0) {
skb_put_u8(skb_out, 0); /* force short packet */
@@ -1629,9 +1633,6 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
* USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
* sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
*/
- netif_info(dev, link, dev->net,
- "network connection: %sconnected\n",
- !!event->wValue ? "" : "dis");
usbnet_link_change(dev, !!event->wValue, 0);
break;
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index b91f92e4e5f2..915ac75b55fc 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -625,6 +625,10 @@ static const struct usb_device_id products[] = {
USB_DEVICE(0x0a46, 0x1269), /* DM9621A USB to Fast Ethernet Adapter */
.driver_info = (unsigned long)&dm9601_info,
},
+ {
+ USB_DEVICE(0x0586, 0x3427), /* ZyXEL Keenetic Plus DSL xDSL modem */
+ .driver_info = (unsigned long)&dm9601_info,
+ },
{}, // END
};
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 5251c5f6f96e..a66077f51457 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -625,7 +625,7 @@ static struct hso_serial *get_serial_by_index(unsigned index)
return serial;
}
-static int get_free_serial_index(void)
+static int obtain_minor(struct hso_serial *serial)
{
int index;
unsigned long flags;
@@ -633,8 +633,10 @@ static int get_free_serial_index(void)
spin_lock_irqsave(&serial_table_lock, flags);
for (index = 0; index < HSO_SERIAL_TTY_MINORS; index++) {
if (serial_table[index] == NULL) {
+ serial_table[index] = serial->parent;
+ serial->minor = index;
spin_unlock_irqrestore(&serial_table_lock, flags);
- return index;
+ return 0;
}
}
spin_unlock_irqrestore(&serial_table_lock, flags);
@@ -643,15 +645,12 @@ static int get_free_serial_index(void)
return -1;
}
-static void set_serial_by_index(unsigned index, struct hso_serial *serial)
+static void release_minor(struct hso_serial *serial)
{
unsigned long flags;
spin_lock_irqsave(&serial_table_lock, flags);
- if (serial)
- serial_table[index] = serial->parent;
- else
- serial_table[index] = NULL;
+ serial_table[serial->minor] = NULL;
spin_unlock_irqrestore(&serial_table_lock, flags);
}
@@ -1403,8 +1402,9 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
unsigned long flags;
if (old)
- hso_dbg(0x16, "Termios called with: cflags new[%d] - old[%d]\n",
- tty->termios.c_cflag, old->c_cflag);
+ hso_dbg(0x16, "Termios called with: cflags new[%u] - old[%u]\n",
+ (unsigned int)tty->termios.c_cflag,
+ (unsigned int)old->c_cflag);
/* the actual setup */
spin_lock_irqsave(&serial->serial_lock, flags);
@@ -1703,7 +1703,7 @@ static int hso_serial_tiocmset(struct tty_struct *tty,
spin_unlock_irqrestore(&serial->serial_lock, flags);
return usb_control_msg(serial->parent->usb,
- usb_rcvctrlpipe(serial->parent->usb, 0), 0x22,
+ usb_sndctrlpipe(serial->parent->usb, 0), 0x22,
0x21, val, if_num, NULL, 0,
USB_CTRL_SET_TIMEOUT);
}
@@ -2243,6 +2243,7 @@ static int hso_stop_serial_device(struct hso_device *hso_dev)
static void hso_serial_tty_unregister(struct hso_serial *serial)
{
tty_unregister_device(tty_drv, serial->minor);
+ release_minor(serial);
}
static void hso_serial_common_free(struct hso_serial *serial)
@@ -2266,22 +2267,22 @@ static void hso_serial_common_free(struct hso_serial *serial)
static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
int rx_size, int tx_size)
{
- int minor;
int i;
tty_port_init(&serial->port);
- minor = get_free_serial_index();
- if (minor < 0)
- goto exit;
+ if (obtain_minor(serial))
+ goto exit2;
/* register our minor number */
serial->parent->dev = tty_port_register_device_attr(&serial->port,
- tty_drv, minor, &serial->parent->interface->dev,
+ tty_drv, serial->minor, &serial->parent->interface->dev,
serial->parent, hso_serial_dev_groups);
+ if (IS_ERR(serial->parent->dev)) {
+ release_minor(serial);
+ goto exit2;
+ }
- /* fill in specific data for later use */
- serial->minor = minor;
serial->magic = HSO_SERIAL_MAGIC;
spin_lock_init(&serial->serial_lock);
serial->num_rx_urbs = num_urbs;
@@ -2323,6 +2324,7 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
return 0;
exit:
hso_serial_tty_unregister(serial);
+exit2:
hso_serial_common_free(serial);
return -1;
}
@@ -2448,7 +2450,7 @@ static int hso_rfkill_set_block(void *data, bool blocked)
if (hso_dev->usb_gone)
rv = 0;
else
- rv = usb_control_msg(hso_dev->usb, usb_rcvctrlpipe(hso_dev->usb, 0),
+ rv = usb_control_msg(hso_dev->usb, usb_sndctrlpipe(hso_dev->usb, 0),
enabled ? 0x82 : 0x81, 0x40, 0, 0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
mutex_unlock(&hso_dev->mutex);
@@ -2673,9 +2675,6 @@ static struct hso_device *hso_create_bulk_serial_device(
serial->write_data = hso_std_serial_write_data;
- /* and record this serial */
- set_serial_by_index(serial->minor, serial);
-
/* setup the proc dirs and files if needed */
hso_log_port(hso_dev);
@@ -2732,9 +2731,6 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface,
serial->shared_int->ref_count++;
mutex_unlock(&serial->shared_int->shared_int_lock);
- /* and record this serial */
- set_serial_by_index(serial->minor, serial);
-
/* setup the proc dirs and files if needed */
hso_log_port(hso_dev);
@@ -3118,8 +3114,7 @@ static void hso_free_interface(struct usb_interface *interface)
cancel_work_sync(&serial_table[i]->async_put_intf);
cancel_work_sync(&serial_table[i]->async_get_intf);
hso_serial_tty_unregister(serial);
- kref_put(&serial_table[i]->ref, hso_serial_ref_free);
- set_serial_by_index(i, NULL);
+ kref_put(&serial->parent->ref, hso_serial_ref_free);
}
}
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 3d71f1716390..8e2eb2061354 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -70,7 +70,7 @@
#define IPHETH_USBINTF_SUBCLASS 253
#define IPHETH_USBINTF_PROTO 1
-#define IPHETH_BUF_SIZE 1516
+#define IPHETH_BUF_SIZE 1514
#define IPHETH_IP_ALIGN 2 /* padding at front of URB */
#define IPHETH_TX_TIMEOUT (5 * HZ)
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 92548887df2f..5bd07cdb3e6e 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -388,10 +388,6 @@ struct lan78xx_net {
struct tasklet_struct bh;
struct delayed_work wq;
- struct usb_host_endpoint *ep_blkin;
- struct usb_host_endpoint *ep_blkout;
- struct usb_host_endpoint *ep_intr;
-
int msg_enable;
struct urb *urb_intr;
@@ -2883,78 +2879,12 @@ lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
return NETDEV_TX_OK;
}
-static int
-lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
-{
- int tmp;
- struct usb_host_interface *alt = NULL;
- struct usb_host_endpoint *in = NULL, *out = NULL;
- struct usb_host_endpoint *status = NULL;
-
- for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
- unsigned ep;
-
- in = NULL;
- out = NULL;
- status = NULL;
- alt = intf->altsetting + tmp;
-
- for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
- struct usb_host_endpoint *e;
- int intr = 0;
-
- e = alt->endpoint + ep;
- switch (e->desc.bmAttributes) {
- case USB_ENDPOINT_XFER_INT:
- if (!usb_endpoint_dir_in(&e->desc))
- continue;
- intr = 1;
- /* FALLTHROUGH */
- case USB_ENDPOINT_XFER_BULK:
- break;
- default:
- continue;
- }
- if (usb_endpoint_dir_in(&e->desc)) {
- if (!intr && !in)
- in = e;
- else if (intr && !status)
- status = e;
- } else {
- if (!out)
- out = e;
- }
- }
- if (in && out)
- break;
- }
- if (!alt || !in || !out)
- return -EINVAL;
-
- dev->pipe_in = usb_rcvbulkpipe(dev->udev,
- in->desc.bEndpointAddress &
- USB_ENDPOINT_NUMBER_MASK);
- dev->pipe_out = usb_sndbulkpipe(dev->udev,
- out->desc.bEndpointAddress &
- USB_ENDPOINT_NUMBER_MASK);
- dev->ep_intr = status;
-
- return 0;
-}
-
static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
{
struct lan78xx_priv *pdata = NULL;
int ret;
int i;
- ret = lan78xx_get_endpoints(dev, intf);
- if (ret) {
- netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n",
- ret);
- return ret;
- }
-
dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
pdata = (struct lan78xx_priv *)(dev->data[0]);
@@ -3726,6 +3656,7 @@ static void lan78xx_stat_monitor(struct timer_list *t)
static int lan78xx_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
+ struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
struct lan78xx_net *dev;
struct net_device *netdev;
struct usb_device *udev;
@@ -3774,6 +3705,34 @@ static int lan78xx_probe(struct usb_interface *intf,
mutex_init(&dev->stats.access_lock);
+ if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
+ ret = -ENODEV;
+ goto out2;
+ }
+
+ dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
+ ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
+ if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
+ ret = -ENODEV;
+ goto out2;
+ }
+
+ dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
+ ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
+ if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
+ ret = -ENODEV;
+ goto out2;
+ }
+
+ ep_intr = &intf->cur_altsetting->endpoint[2];
+ if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
+ ret = -ENODEV;
+ goto out2;
+ }
+
+ dev->pipe_intr = usb_rcvintpipe(dev->udev,
+ usb_endpoint_num(&ep_intr->desc));
+
ret = lan78xx_bind(dev, intf);
if (ret < 0)
goto out2;
@@ -3786,18 +3745,7 @@ static int lan78xx_probe(struct usb_interface *intf,
netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
- dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
- dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
- dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
-
- dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
- dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
-
- dev->pipe_intr = usb_rcvintpipe(dev->udev,
- dev->ep_intr->desc.bEndpointAddress &
- USB_ENDPOINT_NUMBER_MASK);
- period = dev->ep_intr->desc.bInterval;
-
+ period = ep_intr->desc.bInterval;
maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
buf = kmalloc(maxp, GFP_KERNEL);
if (buf) {
@@ -3810,6 +3758,7 @@ static int lan78xx_probe(struct usb_interface *intf,
usb_fill_int_urb(dev->urb_intr, dev->udev,
dev->pipe_intr, buf, maxp,
intr_complete, dev, period);
+ dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
}
}
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index a7804def1120..d08e1de26030 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -378,13 +378,6 @@ static ssize_t add_mux_store(struct device *d, struct device_attribute *attr, c
goto err;
}
- /* we don't want to modify a running netdev */
- if (netif_running(dev->net)) {
- netdev_err(dev->net, "Cannot change a running device\n");
- ret = -EBUSY;
- goto err;
- }
-
ret = qmimux_register_device(dev->net, mux_id);
if (!ret) {
info->flags |= QMI_WWAN_FLAG_MUX;
@@ -414,13 +407,6 @@ static ssize_t del_mux_store(struct device *d, struct device_attribute *attr, c
if (!rtnl_trylock())
return restart_syscall();
- /* we don't want to modify a running netdev */
- if (netif_running(dev->net)) {
- netdev_err(dev->net, "Cannot change a running device\n");
- ret = -EBUSY;
- goto err;
- }
-
del_dev = qmimux_find_dev(dev, mux_id);
if (!del_dev) {
netdev_err(dev->net, "mux_id not present\n");
@@ -995,6 +981,7 @@ static const struct usb_device_id products[] = {
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
+ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0620)}, /* Quectel EM160R-GL */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
/* 3. Combined interface devices matching on interface number */
@@ -1029,7 +1016,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x05c6, 0x9011, 4)},
{QMI_FIXED_INTF(0x05c6, 0x9021, 1)},
{QMI_FIXED_INTF(0x05c6, 0x9022, 2)},
- {QMI_FIXED_INTF(0x05c6, 0x9025, 4)}, /* Alcatel-sbell ASB TL131 TDD LTE (China Mobile) */
+ {QMI_QUIRK_SET_DTR(0x05c6, 0x9025, 4)}, /* Alcatel-sbell ASB TL131 TDD LTE (China Mobile) */
{QMI_FIXED_INTF(0x05c6, 0x9026, 3)},
{QMI_FIXED_INTF(0x05c6, 0x902e, 5)},
{QMI_FIXED_INTF(0x05c6, 0x9031, 5)},
@@ -1216,6 +1203,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x1255, 4)},
{QMI_FIXED_INTF(0x19d2, 0x1256, 4)},
{QMI_FIXED_INTF(0x19d2, 0x1270, 5)}, /* ZTE MF667 */
+ {QMI_FIXED_INTF(0x19d2, 0x1275, 3)}, /* ZTE P685M */
{QMI_FIXED_INTF(0x19d2, 0x1401, 2)},
{QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */
{QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
@@ -1227,6 +1215,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2001, 0x7e16, 3)}, /* D-Link DWM-221 */
{QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
{QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
+ {QMI_FIXED_INTF(0x2001, 0x7e3d, 4)}, /* D-Link DWM-222 A2 */
{QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
{QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
{QMI_FIXED_INTF(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */
@@ -1260,11 +1249,14 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1230, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */
@@ -1279,12 +1271,14 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)}, /* Olivetti Olicard 160 */
{QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
+ {QMI_QUIRK_SET_DTR(0x1e2d, 0x006f, 8)}, /* Cinterion PLS83/PLS63 */
{QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
{QMI_FIXED_INTF(0x1e2d, 0x0063, 10)}, /* Cinterion ALASxx (1 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
{QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */
+ {QMI_FIXED_INTF(0x1e2d, 0x00b7, 0)}, /* Cinterion MV31 RmNet */
{QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
@@ -1294,6 +1288,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
+ {QMI_FIXED_INTF(0x413c, 0x81cc, 8)}, /* Dell Wireless 5816e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
{QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
@@ -1303,10 +1298,12 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
+ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0195, 4)}, /* Quectel EG95 */
{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
{QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
{QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */
{QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/
+ {QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 0639178cb009..a27ea04cfa6c 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -2593,29 +2593,6 @@ static void __rtl_set_wol(struct r8152 *tp, u32 wolopts)
device_set_wakeup_enable(&tp->udev->dev, false);
}
-static void r8153_mac_clk_spd(struct r8152 *tp, bool enable)
-{
- /* MAC clock speed down */
- if (enable) {
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL,
- ALDPS_SPDWN_RATIO);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2,
- EEE_SPDWN_RATIO);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3,
- PKT_AVAIL_SPDWN_EN | SUSPEND_SPDWN_EN |
- U1U2_SPDWN_EN | L1_SPDWN_EN);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4,
- PWRSAVE_SPDWN_EN | RXDV_SPDWN_EN | TX10MIDLE_EN |
- TP100_SPDWN_EN | TP500_SPDWN_EN | EEE_SPDWN_EN |
- TP1000_SPDWN_EN);
- } else {
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
- }
-}
-
static void r8153_u1u2en(struct r8152 *tp, bool enable)
{
u8 u1u2[8];
@@ -2847,11 +2824,9 @@ static void rtl8153_runtime_enable(struct r8152 *tp, bool enable)
if (enable) {
r8153_u1u2en(tp, false);
r8153_u2p3en(tp, false);
- r8153_mac_clk_spd(tp, true);
rtl_runtime_suspend_enable(tp, true);
} else {
rtl_runtime_suspend_enable(tp, false);
- r8153_mac_clk_spd(tp, false);
switch (tp->version) {
case RTL_VER_03:
@@ -3413,7 +3388,6 @@ static void r8153_first_init(struct r8152 *tp)
u32 ocp_data;
int i;
- r8153_mac_clk_spd(tp, false);
rxdy_gated_en(tp, true);
r8153_teredo_off(tp);
@@ -3475,8 +3449,6 @@ static void r8153_enter_oob(struct r8152 *tp)
u32 ocp_data;
int i;
- r8153_mac_clk_spd(tp, true);
-
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
ocp_data &= ~NOW_IS_OOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
@@ -4141,9 +4113,14 @@ static void r8153_init(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_USB, USB_CONNECT_TIMER, 0x0001);
+ /* MAC clock speed down */
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
+
r8153_power_cut_en(tp, false);
r8153_u1u2en(tp, true);
- r8153_mac_clk_spd(tp, false);
usb_enable_lpm(tp->udev);
/* rx aggregation */
@@ -5344,6 +5321,7 @@ static const struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
{REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab)},
{REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927)},
{REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)},
@@ -5351,6 +5329,7 @@ static const struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x721e)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0xa387)},
{REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
{REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index b807c91abe1d..d3f79a4067e2 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -213,7 +213,7 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen)
dev_dbg(&info->control->dev,
"rndis response error, code %d\n", retval);
}
- msleep(20);
+ msleep(40);
}
dev_dbg(&info->control->dev, "rndis response timeout\n");
return -ETIMEDOUT;
@@ -399,7 +399,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
reply_len = sizeof *phym;
retval = rndis_query(dev, intf, u.buf,
RNDIS_OID_GEN_PHYSICAL_MEDIUM,
- 0, (void **) &phym, &reply_len);
+ reply_len, (void **)&phym, &reply_len);
if (retval != 0 || !phym) {
/* OID is optional so don't fail here. */
phym_unspec = cpu_to_le32(RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 80373a9171dd..933d1a74bcdb 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -277,12 +277,20 @@ static int write_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 reg)
return 1;
}
-static inline void set_ethernet_addr(rtl8150_t * dev)
+static void set_ethernet_addr(rtl8150_t *dev)
{
- u8 node_id[6];
+ u8 node_id[ETH_ALEN];
+ int ret;
+
+ ret = get_registers(dev, IDR, sizeof(node_id), node_id);
- get_registers(dev, IDR, sizeof(node_id), node_id);
- memcpy(dev->netdev->dev_addr, node_id, sizeof(node_id));
+ if (ret == sizeof(node_id)) {
+ ether_addr_copy(dev->netdev->dev_addr, node_id);
+ } else {
+ eth_hw_addr_random(dev->netdev);
+ netdev_notice(dev->netdev, "Assigned a random MAC address: %pM\n",
+ dev->netdev->dev_addr);
+ }
}
static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index ec287c9741e8..62f2862c9775 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -1495,7 +1495,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
ret = smsc75xx_wait_ready(dev, 0);
if (ret < 0) {
netdev_warn(dev->net, "device not ready in smsc75xx_bind\n");
- return ret;
+ goto err;
}
smsc75xx_init_mac_address(dev);
@@ -1504,7 +1504,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
ret = smsc75xx_reset(dev);
if (ret < 0) {
netdev_warn(dev->net, "smsc75xx_reset error %d\n", ret);
- return ret;
+ goto err;
}
dev->net->netdev_ops = &smsc75xx_netdev_ops;
@@ -1514,6 +1514,10 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
dev->net->max_mtu = MAX_SINGLE_PACKET_SIZE;
return 0;
+
+err:
+ kfree(pdata);
+ return ret;
}
static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 6e971628bb50..4f29010e1aef 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1301,11 +1301,14 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
/* Init all registers */
ret = smsc95xx_reset(dev);
+ if (ret)
+ goto free_pdata;
/* detect device revision as different features may be available */
ret = smsc95xx_read_reg(dev, ID_REV, &val);
if (ret < 0)
- return ret;
+ goto free_pdata;
+
val >>= 16;
pdata->chip_id = val;
pdata->mdix_ctrl = get_mdix_status(dev->net);
@@ -1331,6 +1334,10 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
return 0;
+
+free_pdata:
+ kfree(pdata);
+ return ret;
}
static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
@@ -1338,7 +1345,7 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
if (pdata) {
- cancel_delayed_work(&pdata->carrier_check);
+ cancel_delayed_work_sync(&pdata->carrier_check);
netif_dbg(dev, ifdown, dev->net, "free pdata\n");
kfree(pdata);
pdata = NULL;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 41a00cd76955..fd1843fd256b 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -197,8 +197,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
if (rxq < rcv->real_num_rx_queues) {
rq = &rcv_priv->rq[rxq];
rcv_xdp = rcu_access_pointer(rq->xdp_prog);
- if (rcv_xdp)
- skb_record_rx_queue(skb, rxq);
+ skb_record_rx_queue(skb, rxq);
}
if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) {
@@ -377,13 +376,15 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
unsigned int *xdp_xmit)
{
void *hard_start = frame->data - frame->headroom;
- void *head = hard_start - sizeof(struct xdp_frame);
int len = frame->len, delta = 0;
struct xdp_frame orig_frame;
struct bpf_prog *xdp_prog;
unsigned int headroom;
struct sk_buff *skb;
+ /* bpf_xdp_adjust_head() assures BPF cannot access xdp_frame area */
+ hard_start -= sizeof(struct xdp_frame);
+
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
if (likely(xdp_prog)) {
@@ -405,7 +406,6 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
break;
case XDP_TX:
orig_frame = *frame;
- xdp.data_hard_start = head;
xdp.rxq->mem = frame->mem;
if (unlikely(veth_xdp_tx(rq->dev, &xdp) < 0)) {
trace_xdp_exception(rq->dev, xdp_prog, act);
@@ -417,7 +417,6 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
goto xdp_xmit;
case XDP_REDIRECT:
orig_frame = *frame;
- xdp.data_hard_start = head;
xdp.rxq->mem = frame->mem;
if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
frame = &orig_frame;
@@ -437,7 +436,7 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
rcu_read_unlock();
headroom = sizeof(struct xdp_frame) + frame->headroom - delta;
- skb = veth_build_skb(head, headroom, len, 0);
+ skb = veth_build_skb(hard_start, headroom, len, 0);
if (!skb) {
xdp_return_frame(frame);
goto err;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index c88ee376a2eb..0b1c6a8906b9 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -383,7 +383,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
struct receive_queue *rq,
struct page *page, unsigned int offset,
unsigned int len, unsigned int truesize,
- bool hdr_valid)
+ bool hdr_valid, unsigned int metasize)
{
struct sk_buff *skb;
struct virtio_net_hdr_mrg_rxbuf *hdr;
@@ -405,6 +405,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
else
hdr_padded_len = sizeof(struct padded_vnet_hdr);
+ /* hdr_valid means no XDP, so we can copy the vnet header */
if (hdr_valid)
memcpy(hdr, p, hdr_len);
@@ -417,6 +418,11 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
copy = skb_tailroom(skb);
skb_put_data(skb, p, copy);
+ if (metasize) {
+ __skb_pull(skb, metasize);
+ skb_metadata_set(skb, metasize);
+ }
+
len -= copy;
offset += copy;
@@ -462,10 +468,6 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
struct virtio_net_hdr_mrg_rxbuf *hdr;
int err;
- /* virtqueue want to use data area in-front of packet */
- if (unlikely(xdpf->metasize > 0))
- return -EOPNOTSUPP;
-
if (unlikely(xdpf->headroom < vi->hdr_len))
return -EOVERFLOW;
@@ -656,6 +658,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
unsigned int delta = 0;
struct page *xdp_page;
int err;
+ unsigned int metasize = 0;
len -= vi->hdr_len;
stats->bytes += len;
@@ -695,8 +698,8 @@ static struct sk_buff *receive_small(struct net_device *dev,
xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len;
xdp.data = xdp.data_hard_start + xdp_headroom;
- xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + len;
+ xdp.data_meta = xdp.data;
xdp.rxq = &rq->xdp_rxq;
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -707,6 +710,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
/* Recalculate length in case bpf program changed it */
delta = orig_data - xdp.data;
len = xdp.data_end - xdp.data;
+ metasize = xdp.data - xdp.data_meta;
break;
case XDP_TX:
stats->xdp_tx++;
@@ -752,6 +756,9 @@ static struct sk_buff *receive_small(struct net_device *dev,
memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
} /* keep zeroed vnet hdr since packet was changed by bpf */
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
err:
return skb;
@@ -772,8 +779,8 @@ static struct sk_buff *receive_big(struct net_device *dev,
struct virtnet_rq_stats *stats)
{
struct page *page = buf;
- struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len,
- PAGE_SIZE, true);
+ struct sk_buff *skb =
+ page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0);
stats->bytes += len - vi->hdr_len;
if (unlikely(!skb))
@@ -805,6 +812,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
unsigned int truesize;
unsigned int headroom = mergeable_ctx_to_headroom(ctx);
int err;
+ unsigned int metasize = 0;
head_skb = NULL;
stats->bytes += len - vi->hdr_len;
@@ -851,8 +859,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
data = page_address(xdp_page) + offset;
xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len;
xdp.data = data + vi->hdr_len;
- xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + (len - vi->hdr_len);
+ xdp.data_meta = xdp.data;
xdp.rxq = &rq->xdp_rxq;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -860,24 +868,27 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
switch (act) {
case XDP_PASS:
+ metasize = xdp.data - xdp.data_meta;
+
/* recalculate offset to account for any header
- * adjustments. Note other cases do not build an
- * skb and avoid using offset
+ * adjustments and minus the metasize to copy the
+ * metadata in page_to_skb(). Note other cases do not
+ * build an skb and avoid using offset
*/
- offset = xdp.data -
- page_address(xdp_page) - vi->hdr_len;
+ offset = xdp.data - page_address(xdp_page) -
+ vi->hdr_len - metasize;
- /* recalculate len if xdp.data or xdp.data_end were
- * adjusted
+ /* recalculate len if xdp.data, xdp.data_end or
+ * xdp.data_meta were adjusted
*/
- len = xdp.data_end - xdp.data + vi->hdr_len;
+ len = xdp.data_end - xdp.data + vi->hdr_len + metasize;
/* We can only create skb based on xdp_page. */
if (unlikely(xdp_page != page)) {
rcu_read_unlock();
put_page(page);
- head_skb = page_to_skb(vi, rq, xdp_page,
- offset, len,
- PAGE_SIZE, false);
+ head_skb = page_to_skb(vi, rq, xdp_page, offset,
+ len, PAGE_SIZE, false,
+ metasize);
return head_skb;
}
break;
@@ -933,7 +944,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
goto err_skb;
}
- head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog);
+ head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
+ metasize);
curr_skb = head_skb;
if (unlikely(!curr_skb))
@@ -1242,9 +1254,11 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
break;
} while (rq->vq->num_free);
if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
- u64_stats_update_begin(&rq->stats.syncp);
+ unsigned long flags;
+
+ flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
rq->stats.kicks++;
- u64_stats_update_end(&rq->stats.syncp);
+ u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
}
return !oom;
@@ -2075,14 +2089,16 @@ static int virtnet_set_channels(struct net_device *dev,
get_online_cpus();
err = _virtnet_set_queues(vi, queue_pairs);
- if (!err) {
- netif_set_real_num_tx_queues(dev, queue_pairs);
- netif_set_real_num_rx_queues(dev, queue_pairs);
-
- virtnet_set_affinity(vi);
+ if (err) {
+ put_online_cpus();
+ goto err;
}
+ virtnet_set_affinity(vi);
put_online_cpus();
+ netif_set_real_num_tx_queues(dev, queue_pairs);
+ netif_set_real_num_rx_queues(dev, queue_pairs);
+ err:
return err;
}
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 559db051a500..88d18ab83e54 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -692,6 +692,8 @@ vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key, u8 *hfunc)
*hfunc = ETH_RSS_HASH_TOP;
if (!p)
return 0;
+ if (n > UPT1_RSS_MAX_IND_TABLE_SIZE)
+ return 0;
while (n--)
p[n] = rssConf->indTable[n];
return 0;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 9f895083bc0a..93899a7be9c5 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -192,8 +192,8 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
fl6.flowi6_proto = iph->nexthdr;
fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
- dst = ip6_route_output(net, NULL, &fl6);
- if (dst == dst_null)
+ dst = ip6_dst_lookup_flow(net, NULL, &fl6, NULL);
+ if (IS_ERR(dst) || dst == dst_null)
goto err;
skb_dst_drop(skb);
@@ -336,8 +336,7 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
return ret;
}
-static int vrf_finish_direct(struct net *net, struct sock *sk,
- struct sk_buff *skb)
+static void vrf_finish_direct(struct sk_buff *skb)
{
struct net_device *vrf_dev = skb->dev;
@@ -356,7 +355,8 @@ static int vrf_finish_direct(struct net *net, struct sock *sk,
skb_pull(skb, ETH_HLEN);
}
- return 1;
+ /* reset skb device */
+ nf_reset(skb);
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -435,15 +435,41 @@ static struct sk_buff *vrf_ip6_out_redirect(struct net_device *vrf_dev,
return skb;
}
+static int vrf_output6_direct_finish(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+{
+ vrf_finish_direct(skb);
+
+ return vrf_ip6_local_out(net, sk, skb);
+}
+
static int vrf_output6_direct(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
+ int err = 1;
+
skb->protocol = htons(ETH_P_IPV6);
- return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
- net, sk, skb, NULL, skb->dev,
- vrf_finish_direct,
- !(IPCB(skb)->flags & IPSKB_REROUTED));
+ if (!(IPCB(skb)->flags & IPSKB_REROUTED))
+ err = nf_hook(NFPROTO_IPV6, NF_INET_POST_ROUTING, net, sk, skb,
+ NULL, skb->dev, vrf_output6_direct_finish);
+
+ if (likely(err == 1))
+ vrf_finish_direct(skb);
+
+ return err;
+}
+
+static int vrf_ip6_out_direct_finish(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+{
+ int err;
+
+ err = vrf_output6_direct(net, sk, skb);
+ if (likely(err == 1))
+ err = vrf_ip6_local_out(net, sk, skb);
+
+ return err;
}
static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
@@ -456,18 +482,15 @@ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
skb->dev = vrf_dev;
err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk,
- skb, NULL, vrf_dev, vrf_output6_direct);
+ skb, NULL, vrf_dev, vrf_ip6_out_direct_finish);
if (likely(err == 1))
err = vrf_output6_direct(net, sk, skb);
- /* reset skb device */
if (likely(err == 1))
- nf_reset(skb);
- else
- skb = NULL;
+ return skb;
- return skb;
+ return NULL;
}
static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
@@ -478,7 +501,8 @@ static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
return skb;
- if (qdisc_tx_is_default(vrf_dev))
+ if (qdisc_tx_is_default(vrf_dev) ||
+ IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
return vrf_ip6_out_direct(vrf_dev, sk, skb);
return vrf_ip6_out_redirect(vrf_dev, skb);
@@ -648,15 +672,41 @@ static struct sk_buff *vrf_ip_out_redirect(struct net_device *vrf_dev,
return skb;
}
+static int vrf_output_direct_finish(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+{
+ vrf_finish_direct(skb);
+
+ return vrf_ip_local_out(net, sk, skb);
+}
+
static int vrf_output_direct(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
+ int err = 1;
+
skb->protocol = htons(ETH_P_IP);
- return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
- net, sk, skb, NULL, skb->dev,
- vrf_finish_direct,
- !(IPCB(skb)->flags & IPSKB_REROUTED));
+ if (!(IPCB(skb)->flags & IPSKB_REROUTED))
+ err = nf_hook(NFPROTO_IPV4, NF_INET_POST_ROUTING, net, sk, skb,
+ NULL, skb->dev, vrf_output_direct_finish);
+
+ if (likely(err == 1))
+ vrf_finish_direct(skb);
+
+ return err;
+}
+
+static int vrf_ip_out_direct_finish(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+{
+ int err;
+
+ err = vrf_output_direct(net, sk, skb);
+ if (likely(err == 1))
+ err = vrf_ip_local_out(net, sk, skb);
+
+ return err;
}
static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
@@ -669,18 +719,15 @@ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
skb->dev = vrf_dev;
err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
- skb, NULL, vrf_dev, vrf_output_direct);
+ skb, NULL, vrf_dev, vrf_ip_out_direct_finish);
if (likely(err == 1))
err = vrf_output_direct(net, sk, skb);
- /* reset skb device */
if (likely(err == 1))
- nf_reset(skb);
- else
- skb = NULL;
+ return skb;
- return skb;
+ return NULL;
}
static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
@@ -692,7 +739,8 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
ipv4_is_lbcast(ip_hdr(skb)->daddr))
return skb;
- if (qdisc_tx_is_default(vrf_dev))
+ if (qdisc_tx_is_default(vrf_dev) ||
+ IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
return vrf_ip_out_direct(vrf_dev, sk, skb);
return vrf_ip_out_redirect(vrf_dev, skb);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 64751b089482..49e8c6d42cda 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -975,6 +975,7 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
for (h = 0; h < FDB_HASH_SIZE; ++h) {
struct vxlan_fdb *f;
+ rcu_read_lock();
hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
struct vxlan_rdst *rd;
@@ -987,12 +988,15 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
cb->nlh->nlmsg_seq,
RTM_NEWNEIGH,
NLM_F_MULTI, rd);
- if (err < 0)
+ if (err < 0) {
+ rcu_read_unlock();
goto out;
+ }
skip:
*idx += 1;
}
}
+ rcu_read_unlock();
}
out:
return err;
@@ -1611,6 +1615,10 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request,
ns_olen = request->len - skb_network_offset(request) -
sizeof(struct ipv6hdr) - sizeof(*ns);
for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
+ if (!ns->opt[i + 1]) {
+ kfree_skb(reply);
+ return NULL;
+ }
if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
break;
@@ -1963,7 +1971,6 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct dst_entry *ndst;
struct flowi6 fl6;
- int err;
if (!sock6)
return ERR_PTR(-EIO);
@@ -1986,10 +1993,9 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
fl6.fl6_dport = dport;
fl6.fl6_sport = sport;
- err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
- sock6->sock->sk,
- &ndst, &fl6);
- if (unlikely(err < 0)) {
+ ndst = ipv6_stub->ipv6_dst_lookup_flow(vxlan->net, sock6->sock->sk,
+ &fl6, NULL);
+ if (unlikely(IS_ERR(ndst))) {
netdev_dbg(dev, "no route to %pI6\n", daddr);
return ERR_PTR(-ENETUNREACH);
}
@@ -2217,7 +2223,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
ndst = &rt->dst;
skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM);
- tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
+ tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
vni, md, flags, udp_sum);
@@ -2254,7 +2260,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM);
- tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
+ tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip6_dst_hoplimit(ndst);
skb_scrub_packet(skb, xnet);
err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
@@ -3174,6 +3180,9 @@ static void vxlan_config_apply(struct net_device *dev,
dev->gso_max_segs = lowerdev->gso_max_segs;
needed_headroom = lowerdev->hard_header_len;
+ needed_headroom += lowerdev->needed_headroom;
+
+ dev->needed_tailroom = lowerdev->needed_tailroom;
max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM :
VXLAN_HEADROOM);
@@ -3803,7 +3812,6 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_dev *vxlan, *next;
struct net_device *dev, *aux;
- unsigned int h;
for_each_netdev_safe(net, dev, aux)
if (dev->rtnl_link_ops == &vxlan_link_ops)
@@ -3817,14 +3825,13 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
unregister_netdevice_queue(vxlan->dev, head);
}
- for (h = 0; h < PORT_HASH_SIZE; ++h)
- WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
}
static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
{
struct net *net;
LIST_HEAD(list);
+ unsigned int h;
rtnl_lock();
list_for_each_entry(net, net_list, exit_list)
@@ -3832,6 +3839,13 @@ static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
unregister_netdevice_many(&list);
rtnl_unlock();
+
+ list_for_each_entry(net, net_list, exit_list) {
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+
+ for (h = 0; h < PORT_HASH_SIZE; ++h)
+ WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
+ }
}
static struct pernet_operations vxlan_net_ops = {
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 4e9fe75d7067..17ed5107b90a 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -199,7 +199,7 @@ config WANXL_BUILD_FIRMWARE
depends on WANXL && !PREVENT_FIRMWARE_BUILD
help
Allows you to rebuild firmware run by the QUICC processor.
- It requires as68k, ld68k and hexdump programs.
+ It requires m68k toolchains and hexdump programs.
You should never need this option, say N.
@@ -295,6 +295,7 @@ config SLIC_DS26522
tristate "Slic Maxim ds26522 card support"
depends on SPI
depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST
+ select BITREVERSE
help
This module initializes and configures the slic maxim card
in T1 or E1 mode.
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index 9532e69fda87..0500282e176e 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -41,17 +41,17 @@ $(obj)/wanxl.o: $(obj)/wanxlfw.inc
ifeq ($(CONFIG_WANXL_BUILD_FIRMWARE),y)
ifeq ($(ARCH),m68k)
- AS68K = $(AS)
- LD68K = $(LD)
+ M68KCC = $(CC)
+ M68KLD = $(LD)
else
- AS68K = as68k
- LD68K = ld68k
+ M68KCC = $(CROSS_COMPILE_M68K)gcc
+ M68KLD = $(CROSS_COMPILE_M68K)ld
endif
quiet_cmd_build_wanxlfw = BLD FW $@
cmd_build_wanxlfw = \
- $(CPP) -D__ASSEMBLY__ -Wp,-MD,$(depfile) -I$(srctree)/include/uapi $< | $(AS68K) -m68360 -o $(obj)/wanxlfw.o; \
- $(LD68K) --oformat binary -Ttext 0x1000 $(obj)/wanxlfw.o -o $(obj)/wanxlfw.bin; \
+ $(M68KCC) -D__ASSEMBLY__ -Wp,-MD,$(depfile) -I$(srctree)/include/uapi -c -o $(obj)/wanxlfw.o $<; \
+ $(M68KLD) --oformat binary -Ttext 0x1000 $(obj)/wanxlfw.o -o $(obj)/wanxlfw.bin; \
hexdump -ve '"\n" 16/1 "0x%02X,"' $(obj)/wanxlfw.bin | sed 's/0x ,//g;1s/^/static const u8 firmware[]={/;$$s/,$$/\n};\n/' >$(obj)/wanxlfw.inc; \
rm -f $(obj)/wanxlfw.bin $(obj)/wanxlfw.o
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index f6b000ddcd15..b7bfc0caa5dc 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -902,6 +902,7 @@ static ssize_t cosa_write(struct file *file,
chan->tx_status = 1;
spin_unlock_irqrestore(&cosa->lock, flags);
up(&chan->wsem);
+ kfree(kbuf);
return -ERESTARTSYS;
}
}
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 9ab04ef532f3..5df6e85e7ccb 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -201,14 +201,18 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
priv->rx_skbuff = kcalloc(priv->rx_ring_size,
sizeof(*priv->rx_skbuff),
GFP_KERNEL);
- if (!priv->rx_skbuff)
+ if (!priv->rx_skbuff) {
+ ret = -ENOMEM;
goto free_ucc_pram;
+ }
priv->tx_skbuff = kcalloc(priv->tx_ring_size,
sizeof(*priv->tx_skbuff),
GFP_KERNEL);
- if (!priv->tx_skbuff)
+ if (!priv->tx_skbuff) {
+ ret = -ENOMEM;
goto free_rx_skbuff;
+ }
priv->skb_curtx = 0;
priv->skb_dirtytx = 0;
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index 7221a53b8b14..500463044b1a 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -49,7 +49,15 @@ static struct hdlc_proto *first_proto;
static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *p, struct net_device *orig_dev)
{
- struct hdlc_device *hdlc = dev_to_hdlc(dev);
+ struct hdlc_device *hdlc;
+
+ /* First make sure "dev" is an HDLC device */
+ if (!(dev->priv_flags & IFF_WAN_HDLC)) {
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+ }
+
+ hdlc = dev_to_hdlc(dev);
if (!net_eq(dev_net(dev), &init_net)) {
kfree_skb(skb);
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index 320039d329c7..2c6e3fa6947a 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -121,6 +121,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type,
skb_put(skb, sizeof(struct cisco_packet));
skb->priority = TC_PRIO_CONTROL;
skb->dev = dev;
+ skb->protocol = htons(ETH_P_HDLC);
skb_reset_network_header(skb);
dev_queue_xmit(skb);
@@ -374,6 +375,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
memcpy(&state(hdlc)->settings, &new_settings, size);
spin_lock_init(&state(hdlc)->lock);
dev->header_ops = &cisco_header_ops;
+ dev->hard_header_len = sizeof(struct hdlc_header);
dev->type = ARPHRD_CISCO;
call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
netif_dormant_on(dev);
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 038236a9c60e..96b4ce13f3a5 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -276,63 +276,69 @@ static inline struct net_device **get_dev_p(struct pvc_device *pvc,
static int fr_hard_header(struct sk_buff **skb_p, u16 dlci)
{
- u16 head_len;
struct sk_buff *skb = *skb_p;
- switch (skb->protocol) {
- case cpu_to_be16(NLPID_CCITT_ANSI_LMI):
- head_len = 4;
- skb_push(skb, head_len);
- skb->data[3] = NLPID_CCITT_ANSI_LMI;
- break;
-
- case cpu_to_be16(NLPID_CISCO_LMI):
- head_len = 4;
- skb_push(skb, head_len);
- skb->data[3] = NLPID_CISCO_LMI;
- break;
-
- case cpu_to_be16(ETH_P_IP):
- head_len = 4;
- skb_push(skb, head_len);
- skb->data[3] = NLPID_IP;
- break;
-
- case cpu_to_be16(ETH_P_IPV6):
- head_len = 4;
- skb_push(skb, head_len);
- skb->data[3] = NLPID_IPV6;
- break;
-
- case cpu_to_be16(ETH_P_802_3):
- head_len = 10;
- if (skb_headroom(skb) < head_len) {
- struct sk_buff *skb2 = skb_realloc_headroom(skb,
- head_len);
+ if (!skb->dev) { /* Control packets */
+ switch (dlci) {
+ case LMI_CCITT_ANSI_DLCI:
+ skb_push(skb, 4);
+ skb->data[3] = NLPID_CCITT_ANSI_LMI;
+ break;
+
+ case LMI_CISCO_DLCI:
+ skb_push(skb, 4);
+ skb->data[3] = NLPID_CISCO_LMI;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ } else if (skb->dev->type == ARPHRD_DLCI) {
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ skb_push(skb, 4);
+ skb->data[3] = NLPID_IP;
+ break;
+
+ case htons(ETH_P_IPV6):
+ skb_push(skb, 4);
+ skb->data[3] = NLPID_IPV6;
+ break;
+
+ default:
+ skb_push(skb, 10);
+ skb->data[3] = FR_PAD;
+ skb->data[4] = NLPID_SNAP;
+ /* OUI 00-00-00 indicates an Ethertype follows */
+ skb->data[5] = 0x00;
+ skb->data[6] = 0x00;
+ skb->data[7] = 0x00;
+ /* This should be an Ethertype: */
+ *(__be16 *)(skb->data + 8) = skb->protocol;
+ }
+
+ } else if (skb->dev->type == ARPHRD_ETHER) {
+ if (skb_headroom(skb) < 10) {
+ struct sk_buff *skb2 = skb_realloc_headroom(skb, 10);
if (!skb2)
return -ENOBUFS;
dev_kfree_skb(skb);
skb = *skb_p = skb2;
}
- skb_push(skb, head_len);
+ skb_push(skb, 10);
skb->data[3] = FR_PAD;
skb->data[4] = NLPID_SNAP;
- skb->data[5] = FR_PAD;
+ /* OUI 00-80-C2 stands for the 802.1 organization */
+ skb->data[5] = 0x00;
skb->data[6] = 0x80;
skb->data[7] = 0xC2;
+ /* PID 00-07 stands for Ethernet frames without FCS */
skb->data[8] = 0x00;
- skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */
- break;
+ skb->data[9] = 0x07;
- default:
- head_len = 10;
- skb_push(skb, head_len);
- skb->data[3] = FR_PAD;
- skb->data[4] = NLPID_SNAP;
- skb->data[5] = FR_PAD;
- skb->data[6] = FR_PAD;
- skb->data[7] = FR_PAD;
- *(__be16*)(skb->data + 8) = skb->protocol;
+ } else {
+ return -EINVAL;
}
dlci_to_q922(skb->data, dlci);
@@ -428,14 +434,16 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
skb_put(skb, pad);
memset(skb->data + len, 0, pad);
}
- skb->protocol = cpu_to_be16(ETH_P_802_3);
}
+ skb->dev = dev;
if (!fr_hard_header(&skb, pvc->dlci)) {
dev->stats.tx_bytes += skb->len;
dev->stats.tx_packets++;
if (pvc->state.fecn) /* TX Congestion counter */
dev->stats.tx_compressed++;
skb->dev = pvc->frad;
+ skb->protocol = htons(ETH_P_HDLC);
+ skb_reset_network_header(skb);
dev_queue_xmit(skb);
return NETDEV_TX_OK;
}
@@ -495,10 +503,8 @@ static void fr_lmi_send(struct net_device *dev, int fullrep)
memset(skb->data, 0, len);
skb_reserve(skb, 4);
if (lmi == LMI_CISCO) {
- skb->protocol = cpu_to_be16(NLPID_CISCO_LMI);
fr_hard_header(&skb, LMI_CISCO_DLCI);
} else {
- skb->protocol = cpu_to_be16(NLPID_CCITT_ANSI_LMI);
fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI);
}
data = skb_tail_pointer(skb);
@@ -558,6 +564,7 @@ static void fr_lmi_send(struct net_device *dev, int fullrep)
skb_put(skb, i);
skb->priority = TC_PRIO_CONTROL;
skb->dev = dev;
+ skb->protocol = htons(ETH_P_HDLC);
skb_reset_network_header(skb);
dev_queue_xmit(skb);
@@ -1044,7 +1051,7 @@ static void pvc_setup(struct net_device *dev)
{
dev->type = ARPHRD_DLCI;
dev->flags = IFF_POINTOPOINT;
- dev->hard_header_len = 10;
+ dev->hard_header_len = 0;
dev->addr_len = 2;
netif_keep_dst(dev);
}
@@ -1096,6 +1103,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
dev->mtu = HDLC_MAX_MTU;
dev->min_mtu = 68;
dev->max_mtu = HDLC_MAX_MTU;
+ dev->needed_headroom = 10;
dev->priv_flags |= IFF_NO_QUEUE;
dev->ml_priv = pvc;
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index ab8b3cbbb205..d67623d61dc4 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -254,6 +254,7 @@ static void ppp_tx_cp(struct net_device *dev, u16 pid, u8 code,
skb->priority = TC_PRIO_CONTROL;
skb->dev = dev;
+ skb->protocol = htons(ETH_P_HDLC);
skb_reset_network_header(skb);
skb_queue_tail(&tx_queue, skb);
}
@@ -386,11 +387,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id,
}
for (opt = data; len; len -= opt[1], opt += opt[1]) {
- if (len < 2 || len < opt[1]) {
- dev->stats.rx_errors++;
- kfree(out);
- return; /* bad packet, drop silently */
- }
+ if (len < 2 || opt[1] < 2 || len < opt[1])
+ goto err_out;
if (pid == PID_LCP)
switch (opt[0]) {
@@ -398,6 +396,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id,
continue; /* MRU always OK and > 1500 bytes? */
case LCP_OPTION_ACCM: /* async control character map */
+ if (opt[1] < sizeof(valid_accm))
+ goto err_out;
if (!memcmp(opt, valid_accm,
sizeof(valid_accm)))
continue;
@@ -409,6 +409,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id,
}
break;
case LCP_OPTION_MAGIC:
+ if (len < 6)
+ goto err_out;
if (opt[1] != 6 || (!opt[2] && !opt[3] &&
!opt[4] && !opt[5]))
break; /* reject invalid magic number */
@@ -427,6 +429,11 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id,
ppp_cp_event(dev, pid, RCR_GOOD, CP_CONF_ACK, id, req_len, data);
kfree(out);
+ return;
+
+err_out:
+ dev->stats.rx_errors++;
+ kfree(out);
}
static int ppp_rx(struct sk_buff *skb)
@@ -565,6 +572,13 @@ static void ppp_timer(struct timer_list *t)
unsigned long flags;
spin_lock_irqsave(&ppp->lock, flags);
+ /* mod_timer could be called after we entered this function but
+ * before we got the lock.
+ */
+ if (timer_pending(&proto->timer)) {
+ spin_unlock_irqrestore(&ppp->lock, flags);
+ return;
+ }
switch (proto->state) {
case STOPPING:
case REQ_SENT:
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
index 8bd3ed905813..676dea2918bf 100644
--- a/drivers/net/wan/hdlc_raw_eth.c
+++ b/drivers/net/wan/hdlc_raw_eth.c
@@ -102,6 +102,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
old_qlen = dev->tx_queue_len;
ether_setup(dev);
dev->tx_queue_len = old_qlen;
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
eth_hw_addr_random(dev);
call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
netif_dormant_off(dev);
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 0e3f8ed84660..3ec922bed2d8 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -56,6 +56,8 @@ struct lapbethdev {
struct list_head node;
struct net_device *ethdev; /* link to ethernet device */
struct net_device *axdev; /* lapbeth device (lapb#) */
+ bool up;
+ spinlock_t up_lock; /* Protects "up" */
};
static LIST_HEAD(lapbeth_devices);
@@ -103,8 +105,9 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
rcu_read_lock();
lapbeth = lapbeth_get_x25_dev(dev);
if (!lapbeth)
- goto drop_unlock;
- if (!netif_running(lapbeth->axdev))
+ goto drop_unlock_rcu;
+ spin_lock_bh(&lapbeth->up_lock);
+ if (!lapbeth->up)
goto drop_unlock;
len = skb->data[0] + skb->data[1] * 256;
@@ -119,11 +122,14 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
goto drop_unlock;
}
out:
+ spin_unlock_bh(&lapbeth->up_lock);
rcu_read_unlock();
return 0;
drop_unlock:
kfree_skb(skb);
goto out;
+drop_unlock_rcu:
+ rcu_read_unlock();
drop:
kfree_skb(skb);
return 0;
@@ -151,13 +157,17 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
struct net_device *dev)
{
+ struct lapbethdev *lapbeth = netdev_priv(dev);
int err;
- /*
- * Just to be *really* sure not to send anything if the interface
- * is down, the ethernet device may have gone.
+ spin_lock_bh(&lapbeth->up_lock);
+ if (!lapbeth->up)
+ goto drop;
+
+ /* There should be a pseudo header of 1 byte added by upper layers.
+ * Check to make sure it is there before reading it.
*/
- if (!netif_running(dev))
+ if (skb->len < 1)
goto drop;
switch (skb->data[0]) {
@@ -182,6 +192,7 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
goto drop;
}
out:
+ spin_unlock_bh(&lapbeth->up_lock);
return NETDEV_TX_OK;
drop:
kfree_skb(skb);
@@ -195,8 +206,6 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb)
struct net_device *dev;
int size = skb->len;
- skb->protocol = htons(ETH_P_X25);
-
ptr = skb_push(skb, 2);
*ptr++ = size % 256;
@@ -207,6 +216,10 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb)
skb->dev = dev = lapbeth->ethdev;
+ skb->protocol = htons(ETH_P_DEC);
+
+ skb_reset_network_header(skb);
+
dev_hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0);
dev_queue_xmit(skb);
@@ -271,6 +284,7 @@ static const struct lapb_register_struct lapbeth_callbacks = {
*/
static int lapbeth_open(struct net_device *dev)
{
+ struct lapbethdev *lapbeth = netdev_priv(dev);
int err;
if ((err = lapb_register(dev, &lapbeth_callbacks)) != LAPB_OK) {
@@ -278,15 +292,21 @@ static int lapbeth_open(struct net_device *dev)
return -ENODEV;
}
- netif_start_queue(dev);
+ spin_lock_bh(&lapbeth->up_lock);
+ lapbeth->up = true;
+ spin_unlock_bh(&lapbeth->up_lock);
+
return 0;
}
static int lapbeth_close(struct net_device *dev)
{
+ struct lapbethdev *lapbeth = netdev_priv(dev);
int err;
- netif_stop_queue(dev);
+ spin_lock_bh(&lapbeth->up_lock);
+ lapbeth->up = false;
+ spin_unlock_bh(&lapbeth->up_lock);
if ((err = lapb_unregister(dev)) != LAPB_OK)
pr_err("lapb_unregister error: %d\n", err);
@@ -308,7 +328,7 @@ static void lapbeth_setup(struct net_device *dev)
dev->netdev_ops = &lapbeth_netdev_ops;
dev->needs_free_netdev = true;
dev->type = ARPHRD_X25;
- dev->hard_header_len = 3;
+ dev->hard_header_len = 0;
dev->mtu = 1000;
dev->addr_len = 0;
}
@@ -329,12 +349,25 @@ static int lapbeth_new_device(struct net_device *dev)
if (!ndev)
goto out;
+ /* When transmitting data:
+ * first this driver removes a pseudo header of 1 byte,
+ * then the lapb module prepends an LAPB header of at most 3 bytes,
+ * then this driver prepends a length field of 2 bytes,
+ * then the underlying Ethernet device prepends its own header.
+ */
+ ndev->needed_headroom = -1 + 3 + 2 + dev->hard_header_len
+ + dev->needed_headroom;
+ ndev->needed_tailroom = dev->needed_tailroom;
+
lapbeth = netdev_priv(ndev);
lapbeth->axdev = ndev;
dev_hold(dev);
lapbeth->ethdev = dev;
+ lapbeth->up = false;
+ spin_lock_init(&lapbeth->up_lock);
+
rc = -EIO;
if (register_netdevice(ndev))
goto fail;
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 4907453f17f5..937f56d0a31d 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -915,6 +915,8 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
default:
printk(KERN_WARNING "%s: LMC UNKNOWN CARD!\n", dev->name);
+ unregister_hdlc_device(dev);
+ return -EIO;
break;
}
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 4f25c2d8fff0..6fe9695a5f18 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -183,7 +183,7 @@ static inline void x25_asy_unlock(struct x25_asy *sl)
netif_wake_queue(sl->dev);
}
-/* Send one completely decapsulated IP datagram to the IP layer. */
+/* Send an LAPB frame to the LAPB module to process. */
static void x25_asy_bump(struct x25_asy *sl)
{
@@ -195,13 +195,12 @@ static void x25_asy_bump(struct x25_asy *sl)
count = sl->rcount;
dev->stats.rx_bytes += count;
- skb = dev_alloc_skb(count+1);
+ skb = dev_alloc_skb(count);
if (skb == NULL) {
netdev_warn(sl->dev, "memory squeeze, dropping packet\n");
dev->stats.rx_dropped++;
return;
}
- skb_push(skb, 1); /* LAPB internal control */
skb_put_data(skb, sl->rbuff, count);
skb->protocol = x25_type_trans(skb, sl->dev);
err = lapb_data_received(skb->dev, skb);
@@ -209,7 +208,6 @@ static void x25_asy_bump(struct x25_asy *sl)
kfree_skb(skb);
printk(KERN_DEBUG "x25_asy: data received err - %d\n", err);
} else {
- netif_rx(skb);
dev->stats.rx_packets++;
}
}
@@ -356,12 +354,21 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb,
*/
/*
- * Called when I frame data arrives. We did the work above - throw it
- * at the net layer.
+ * Called when I frame data arrive. We add a pseudo header for upper
+ * layers and pass it to upper layers.
*/
static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb)
{
+ if (skb_cow(skb, 1)) {
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+ skb_push(skb, 1);
+ skb->data[0] = X25_IFACE_DATA;
+
+ skb->protocol = x25_type_trans(skb, dev);
+
return netif_rx(skb);
}
@@ -657,7 +664,7 @@ static void x25_asy_unesc(struct x25_asy *sl, unsigned char s)
switch (s) {
case X25_END:
if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
- sl->rcount > 2)
+ sl->rcount >= 2)
x25_asy_bump(sl);
clear_bit(SLF_ESCAPE, &sl->flags);
sl->rcount = 0;
diff --git a/drivers/net/wimax/i2400m/op-rfkill.c b/drivers/net/wimax/i2400m/op-rfkill.c
index dc6fe93ce71f..e8473047b2d1 100644
--- a/drivers/net/wimax/i2400m/op-rfkill.c
+++ b/drivers/net/wimax/i2400m/op-rfkill.c
@@ -101,7 +101,7 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev,
if (cmd == NULL)
goto error_alloc;
cmd->hdr.type = cpu_to_le16(I2400M_MT_CMD_RF_CONTROL);
- cmd->hdr.length = sizeof(cmd->sw_rf);
+ cmd->hdr.length = cpu_to_le16(sizeof(cmd->sw_rf));
cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION);
cmd->sw_rf.hdr.type = cpu_to_le16(I2400M_TLV_RF_OPERATION);
cmd->sw_rf.hdr.length = cpu_to_le16(sizeof(cmd->sw_rf.status));
diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
index 529ebca1e9e1..1f7709d24f35 100644
--- a/drivers/net/wimax/i2400m/usb-fw.c
+++ b/drivers/net/wimax/i2400m/usb-fw.c
@@ -354,6 +354,7 @@ out:
usb_autopm_put_interface(i2400mu->usb_iface);
d_fnend(8, dev, "(i2400m %p ack %p size %zu) = %ld\n",
i2400m, ack, ack_size, (long) result);
+ usb_put_urb(&notif_urb);
return result;
error_exceeded:
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index da2d179430ca..4c57e79e5779 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1770,6 +1770,8 @@ static const struct usb_device_id ar5523_id_table[] = {
AR5523_DEVICE_UX(0x0846, 0x4300), /* Netgear / WG111U */
AR5523_DEVICE_UG(0x0846, 0x4250), /* Netgear / WG111T */
AR5523_DEVICE_UG(0x0846, 0x5f00), /* Netgear / WPN111 */
+ AR5523_DEVICE_UG(0x083a, 0x4506), /* SMC / EZ Connect
+ SMCWUSBT-G2 */
AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / AR5523_1 */
AR5523_DEVICE_UX(0x157e, 0x3205), /* Umedia / AR5523_2 */
AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / TEW444UBEU */
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index f761d651c16e..2276d608bca3 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1453,7 +1453,7 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
if (ret) {
dma_free_coherent(ar->dev,
- (nentries * sizeof(struct ce_desc_64) +
+ (nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
src_ring->base_addr_owner_space_unaligned,
base_addr);
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 0baaad90b8d1..4e980e78ba95 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -1521,7 +1521,7 @@ static void ath10k_tpc_stats_print(struct ath10k_tpc_stats *tpc_stats,
*len += scnprintf(buf + *len, buf_len - *len,
"No. Preamble Rate_code ");
- for (i = 0; i < WMI_TPC_TX_N_CHAIN; i++)
+ for (i = 0; i < tpc_stats->num_tx_chain; i++)
*len += scnprintf(buf + *len, buf_len - *len,
"tpc_value%d ", i);
@@ -2365,6 +2365,7 @@ void ath10k_debug_destroy(struct ath10k *ar)
ath10k_debug_fw_stats_reset(ar);
kfree(ar->debug.tpc_stats);
+ kfree(ar->debug.tpc_stats_final);
}
int ath10k_debug_register(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 03d4cc6f35bc..0a7551dc0f94 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -153,6 +153,14 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
+
+ if (idx < 0 || idx >= htt->rx_ring.size) {
+ ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n");
+ idx &= htt->rx_ring.size_mask;
+ ret = -ENOMEM;
+ goto fail;
+ }
+
while (num > 0) {
skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
if (!skb) {
@@ -759,6 +767,7 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
u8 preamble = 0;
u8 group_id;
u32 info1, info2, info3;
+ u32 stbc, nsts_su;
info1 = __le32_to_cpu(rxd->ppdu_start.info1);
info2 = __le32_to_cpu(rxd->ppdu_start.info2);
@@ -803,11 +812,16 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
*/
bw = info2 & 3;
sgi = info3 & 1;
+ stbc = (info2 >> 3) & 1;
group_id = (info2 >> 4) & 0x3F;
if (GROUP_ID_IS_SU_MIMO(group_id)) {
mcs = (info3 >> 4) & 0x0F;
- nss = ((info2 >> 10) & 0x07) + 1;
+ nsts_su = ((info2 >> 10) & 0x07);
+ if (stbc)
+ nss = (nsts_su >> 2) + 1;
+ else
+ nss = (nsts_su + 1);
} else {
/* Hardware doesn't decode VHT-SIG-B into Rx descriptor
* so it's impossible to decode MCS. Also since
@@ -1755,14 +1769,62 @@ static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
ath10k_unchain_msdu(amsdu, unchain_cnt);
}
+static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,
+ struct sk_buff_head *amsdu)
+{
+ u8 *subframe_hdr;
+ struct sk_buff *first;
+ bool is_first, is_last;
+ struct htt_rx_desc *rxd;
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len, crypto_len;
+ enum htt_rx_mpdu_encrypt_type enctype;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
+
+ first = skb_peek(amsdu);
+
+ rxd = (void *)first->data - sizeof(*rxd);
+ hdr = (void *)rxd->rx_hdr_status;
+
+ is_first = !!(rxd->msdu_end.common.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
+ is_last = !!(rxd->msdu_end.common.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
+
+ /* Return in case of non-aggregated msdu */
+ if (is_first && is_last)
+ return true;
+
+ /* First msdu flag is not set for the first msdu of the list */
+ if (!is_first)
+ return false;
+
+ enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
+
+ subframe_hdr = (u8 *)hdr + round_up(hdr_len, bytes_aligned) +
+ crypto_len;
+
+ /* Validate if the amsdu has a proper first subframe.
+ * There are chances a single msdu can be received as amsdu when
+ * the unauthenticated amsdu flag of a QoS header
+ * gets flipped in non-SPP AMSDU's, in such cases the first
+ * subframe has llc/snap header in place of a valid da.
+ * return false if the da matches rfc1042 pattern
+ */
+ if (ether_addr_equal(subframe_hdr, rfc1042_header))
+ return false;
+
+ return true;
+}
+
static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
struct sk_buff_head *amsdu,
struct ieee80211_rx_status *rx_status)
{
- /* FIXME: It might be a good idea to do some fuzzy-testing to drop
- * invalid/dangerous frames.
- */
-
if (!rx_status->freq) {
ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
return false;
@@ -1773,6 +1835,11 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
return false;
}
+ if (!ath10k_htt_rx_validate_amsdu(ar, amsdu)) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid amsdu received\n");
+ return false;
+ }
+
return true;
}
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 7cff0d52338f..fd011bdabb96 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -1329,7 +1329,9 @@ static int ath10k_htt_tx_32(struct ath10k_htt *htt,
err_unmap_msdu:
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
err_free_msdu_id:
+ spin_lock_bh(&htt->tx_lock);
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+ spin_unlock_bh(&htt->tx_lock);
err:
return res;
}
@@ -1536,7 +1538,9 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt,
err_unmap_msdu:
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
err_free_msdu_id:
+ spin_lock_bh(&htt->tx_lock);
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+ spin_unlock_bh(&htt->tx_lock);
err:
return res;
}
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index fac58c3c576a..3ff65a0a834a 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -753,7 +753,7 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
#define TARGET_10_4_TX_DBG_LOG_SIZE 1024
#define TARGET_10_4_NUM_WDS_ENTRIES 32
-#define TARGET_10_4_DMA_BURST_SIZE 0
+#define TARGET_10_4_DMA_BURST_SIZE 1
#define TARGET_10_4_MAC_AGGR_DELIM 0
#define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
#define TARGET_10_4_VOW_CONFIG 0
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index a09d7a07e90a..f32d35e03708 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -3567,23 +3567,16 @@ bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
{
struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
- int ret = 0;
-
- spin_lock_bh(&ar->data_lock);
- if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) {
+ if (skb_queue_len_lockless(q) >= ATH10K_MAX_NUM_MGMT_PENDING) {
ath10k_warn(ar, "wmi mgmt tx queue is full\n");
- ret = -ENOSPC;
- goto unlock;
+ return -ENOSPC;
}
- __skb_queue_tail(q, skb);
+ skb_queue_tail(q, skb);
ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
-unlock:
- spin_unlock_bh(&ar->data_lock);
-
- return ret;
+ return 0;
}
static enum ath10k_mac_tx_path
@@ -3852,6 +3845,9 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
if (ret) {
ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n",
ret);
+ /* remove this msdu from idr tracking */
+ ath10k_wmi_cleanup_mgmt_tx_send(ar, skb);
+
dma_unmap_single(ar->dev, paddr, skb->len,
DMA_TO_DEVICE);
ieee80211_free_txskb(ar->hw, skb);
@@ -6859,7 +6855,7 @@ ath10k_mac_update_bss_chan_survey(struct ath10k *ar,
struct ieee80211_channel *channel)
{
int ret;
- enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR;
+ enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ;
lockdep_assert_held(&ar->conf_mutex);
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index 0ecaba824fb2..28d86da65c05 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -561,6 +561,10 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
le16_to_cpu(htc_hdr->len),
ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH);
ret = -ENOMEM;
+
+ queue_work(ar->workqueue, &ar->restart_work);
+ ath10k_warn(ar, "exceeds length, start recovery\n");
+
goto err;
}
@@ -1567,23 +1571,33 @@ static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
size_t buf_len)
{
int ret;
+ void *mem;
+
+ mem = kzalloc(buf_len, GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
/* set window register to start read cycle */
ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address);
if (ret) {
ath10k_warn(ar, "failed to set mbox window read address: %d", ret);
- return ret;
+ goto out;
}
/* read the data */
- ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, buf, buf_len);
+ ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, mem, buf_len);
if (ret) {
ath10k_warn(ar, "failed to read from mbox window data address: %d\n",
ret);
- return ret;
+ goto out;
}
- return 0;
+ memcpy(buf, mem, buf_len);
+
+out:
+ kfree(mem);
+
+ return ret;
}
static int ath10k_sdio_hif_diag_read32(struct ath10k *ar, u32 address,
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index e2d78f77edb7..241e6f0e1dfe 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -789,13 +789,14 @@ static int ath10k_snoc_hif_power_up(struct ath10k *ar)
ret = ath10k_snoc_init_pipes(ar);
if (ret) {
ath10k_err(ar, "failed to initialize CE: %d\n", ret);
- goto err_wlan_enable;
+ goto err_free_rri;
}
napi_enable(&ar->napi);
return 0;
-err_wlan_enable:
+err_free_rri:
+ ath10k_ce_free_rri(ar);
ath10k_snoc_wlan_disable(ar);
return ret;
diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
index c64a03f164c0..16d5fe6d1e2e 100644
--- a/drivers/net/wireless/ath/ath10k/usb.c
+++ b/drivers/net/wireless/ath/ath10k/usb.c
@@ -1019,6 +1019,8 @@ static int ath10k_usb_probe(struct usb_interface *interface,
ar_usb = ath10k_usb_priv(ar);
ret = ath10k_usb_create(ar, interface);
+ if (ret)
+ goto err;
ar_usb->ar = ar;
ar->dev_id = product_id;
@@ -1030,7 +1032,7 @@ static int ath10k_usb_probe(struct usb_interface *interface,
ret = ath10k_core_register(ar, chip_id);
if (ret) {
ath10k_warn(ar, "failed to register driver core: %d\n", ret);
- goto err;
+ goto err_usb_destroy;
}
/* TODO: remove this once USB support is fully implemented */
@@ -1038,6 +1040,9 @@ static int ath10k_usb_probe(struct usb_interface *interface,
return 0;
+err_usb_destroy:
+ ath10k_usb_destroy(ar);
+
err:
ath10k_core_destroy(ar);
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 7fd63bbf8e24..b6cd33fa79f8 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -139,6 +139,7 @@ struct wmi_ops {
struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar,
struct sk_buff *skb,
dma_addr_t paddr);
+ int (*cleanup_mgmt_tx_send)(struct ath10k *ar, struct sk_buff *msdu);
struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
u32 log_level);
struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
@@ -432,6 +433,15 @@ ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
}
static inline int
+ath10k_wmi_cleanup_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu)
+{
+ if (!ar->wmi.ops->cleanup_mgmt_tx_send)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->cleanup_mgmt_tx_send(ar, msdu);
+}
+
+static inline int
ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
dma_addr_t paddr)
{
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 248decb494c2..243887fdb343 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -449,13 +449,13 @@ static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
case WMI_TDLS_TEARDOWN_REASON_TX:
case WMI_TDLS_TEARDOWN_REASON_RSSI:
case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT:
+ rcu_read_lock();
station = ieee80211_find_sta_by_ifaddr(ar->hw,
ev->peer_macaddr.addr,
NULL);
if (!station) {
ath10k_warn(ar, "did not find station from tdls peer event");
- kfree(tb);
- return;
+ goto exit;
}
arvif = ath10k_get_arvif(ar, __le32_to_cpu(ev->vdev_id));
ieee80211_tdls_oper_request(
@@ -465,7 +465,13 @@ static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
GFP_ATOMIC
);
break;
+ default:
+ kfree(tb);
+ return;
}
+
+exit:
+ rcu_read_unlock();
kfree(tb);
}
@@ -1157,13 +1163,15 @@ static int ath10k_wmi_tlv_svc_avail_parse(struct ath10k *ar, u16 tag, u16 len,
switch (tag) {
case WMI_TLV_TAG_STRUCT_SERVICE_AVAILABLE_EVENT:
+ arg->service_map_ext_valid = true;
arg->service_map_ext_len = *(__le32 *)ptr;
arg->service_map_ext = ptr + sizeof(__le32);
return 0;
default:
break;
}
- return -EPROTO;
+
+ return 0;
}
static int ath10k_wmi_tlv_op_pull_svc_avail(struct ath10k *ar,
@@ -2639,6 +2647,18 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
}
static int
+ath10k_wmi_tlv_op_cleanup_mgmt_tx_send(struct ath10k *ar,
+ struct sk_buff *msdu)
+{
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
+ struct ath10k_wmi *wmi = &ar->wmi;
+
+ idr_remove(&wmi->mgmt_pending_tx, cb->msdu_id);
+
+ return 0;
+}
+
+static int
ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb,
dma_addr_t paddr)
{
@@ -2710,6 +2730,8 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
if (desc_id < 0)
goto err_free_skb;
+ cb->msdu_id = desc_id;
+
ptr = (void *)skb->data;
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD);
@@ -3949,6 +3971,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
/* .gen_mgmt_tx = not implemented; HTT is used */
.gen_mgmt_tx_send = ath10k_wmi_tlv_op_gen_mgmt_tx_send,
+ .cleanup_mgmt_tx_send = ath10k_wmi_tlv_op_cleanup_mgmt_tx_send,
.gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg,
.gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable,
.gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 3372dfa0decc..41eb57be9222 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -4550,16 +4550,13 @@ static void ath10k_tpc_config_disp_tables(struct ath10k *ar,
}
pream_idx = 0;
- for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) {
+ for (i = 0; i < tpc_stats->rate_max; i++) {
memset(tpc_value, 0, sizeof(tpc_value));
memset(buff, 0, sizeof(buff));
if (i == pream_table[pream_idx])
pream_idx++;
- for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) {
- if (j >= __le32_to_cpu(ev->num_tx_chain))
- break;
-
+ for (j = 0; j < tpc_stats->num_tx_chain; j++) {
tpc[j] = ath10k_tpc_config_get_rate(ar, ev, i, j + 1,
rate_code[i],
type);
@@ -4672,7 +4669,7 @@ void ath10k_wmi_tpc_config_get_rate_code(u8 *rate_code, u16 *pream_table,
void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
{
- u32 num_tx_chain;
+ u32 num_tx_chain, rate_max;
u8 rate_code[WMI_TPC_RATE_MAX];
u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
struct wmi_pdev_tpc_config_event *ev;
@@ -4688,6 +4685,13 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
return;
}
+ rate_max = __le32_to_cpu(ev->rate_max);
+ if (rate_max > WMI_TPC_RATE_MAX) {
+ ath10k_warn(ar, "number of rate is %d greater than TPC configured rate %d\n",
+ rate_max, WMI_TPC_RATE_MAX);
+ rate_max = WMI_TPC_RATE_MAX;
+ }
+
tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
if (!tpc_stats)
return;
@@ -4704,8 +4708,8 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
__le32_to_cpu(ev->twice_antenna_reduction);
tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
- tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
- tpc_stats->rate_max = __le32_to_cpu(ev->rate_max);
+ tpc_stats->num_tx_chain = num_tx_chain;
+ tpc_stats->rate_max = rate_max;
ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
rate_code, pream_table,
@@ -4900,16 +4904,13 @@ ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar,
}
pream_idx = 0;
- for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) {
+ for (i = 0; i < tpc_stats->rate_max; i++) {
memset(tpc_value, 0, sizeof(tpc_value));
memset(buff, 0, sizeof(buff));
if (i == pream_table[pream_idx])
pream_idx++;
- for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) {
- if (j >= __le32_to_cpu(ev->num_tx_chain))
- break;
-
+ for (j = 0; j < tpc_stats->num_tx_chain; j++) {
tpc[j] = ath10k_wmi_tpc_final_get_rate(ar, ev, i, j + 1,
rate_code[i],
type, pream_idx);
@@ -4925,7 +4926,7 @@ ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar,
void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
{
- u32 num_tx_chain;
+ u32 num_tx_chain, rate_max;
u8 rate_code[WMI_TPC_FINAL_RATE_MAX];
u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
struct wmi_pdev_tpc_final_table_event *ev;
@@ -4933,12 +4934,24 @@ void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
ev = (struct wmi_pdev_tpc_final_table_event *)skb->data;
+ num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
+ if (num_tx_chain > WMI_TPC_TX_N_CHAIN) {
+ ath10k_warn(ar, "number of tx chain is %d greater than TPC final configured tx chain %d\n",
+ num_tx_chain, WMI_TPC_TX_N_CHAIN);
+ return;
+ }
+
+ rate_max = __le32_to_cpu(ev->rate_max);
+ if (rate_max > WMI_TPC_FINAL_RATE_MAX) {
+ ath10k_warn(ar, "number of rate is %d greater than TPC final configured rate %d\n",
+ rate_max, WMI_TPC_FINAL_RATE_MAX);
+ rate_max = WMI_TPC_FINAL_RATE_MAX;
+ }
+
tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
if (!tpc_stats)
return;
- num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
-
ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table,
num_tx_chain);
@@ -4951,8 +4964,8 @@ void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
__le32_to_cpu(ev->twice_antenna_reduction);
tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
- tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
- tpc_stats->rate_max = __le32_to_cpu(ev->rate_max);
+ tpc_stats->num_tx_chain = num_tx_chain;
+ tpc_stats->rate_max = rate_max;
ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats,
rate_code, pream_table,
@@ -5497,8 +5510,13 @@ void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb)
ret);
}
- ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map,
- __le32_to_cpu(arg.service_map_ext_len));
+ /*
+ * Initialization of "arg.service_map_ext_valid" to ZERO is necessary
+ * for the below logic to work.
+ */
+ if (arg.service_map_ext_valid)
+ ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map,
+ __le32_to_cpu(arg.service_map_ext_len));
}
static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb)
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index e341cfb3fcc2..6bd63d1cd039 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -6710,6 +6710,7 @@ struct wmi_svc_rdy_ev_arg {
};
struct wmi_svc_avail_ev_arg {
+ bool service_map_ext_valid;
__le32 service_map_ext_len;
const __le32 *service_map_ext;
};
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 0c61dbaa62a4..702c4761006c 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -429,6 +429,9 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
ath6kl_dbg(ATH6KL_DBG_TRC, "new station %pM aid=%d\n", mac_addr, aid);
+ if (aid < 1 || aid > AP_MAX_NUM_STA)
+ return;
+
if (assoc_req_len > sizeof(struct ieee80211_hdr_3addr)) {
struct ieee80211_mgmt *mgmt =
(struct ieee80211_mgmt *) assoc_info;
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index bc7916f2add0..987ebae8ea0e 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -2648,6 +2648,11 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
return -EINVAL;
}
+ if (tsid >= 16) {
+ ath6kl_err("invalid tsid: %d\n", tsid);
+ return -EINVAL;
+ }
+
skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
if (!skb)
return -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 0fca44e91a71..de3befc2edd1 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -179,7 +179,8 @@ struct ath_frame_info {
s8 txq;
u8 keyix;
u8 rtscts_rate;
- u8 retries : 7;
+ u8 retries : 6;
+ u8 dyn_smps : 1;
u8 baw_tracked : 1;
u8 tx_power;
enum ath9k_key_type keytype:2;
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 0a6eb8a8c1ed..84fe68670949 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -1236,8 +1236,11 @@ static ssize_t write_file_nf_override(struct file *file,
ah->nf_override = val;
- if (ah->curchan)
+ if (ah->curchan) {
+ ath9k_ps_wakeup(sc);
ath9k_hw_loadnf(ah, ah->curchan);
+ ath9k_ps_restore(sc);
+ }
return count;
}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index dd0c32379375..2ed98aaed6fb 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -449,10 +449,19 @@ static void hif_usb_stop(void *hif_handle)
spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
/* The pending URBs have to be canceled. */
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
list_for_each_entry_safe(tx_buf, tx_buf_tmp,
&hif_dev->tx.tx_pending, list) {
+ usb_get_urb(tx_buf->urb);
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
usb_kill_urb(tx_buf->urb);
+ list_del(&tx_buf->list);
+ usb_free_urb(tx_buf->urb);
+ kfree(tx_buf->buf);
+ kfree(tx_buf);
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
}
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
usb_kill_anchored_urbs(&hif_dev->mgmt_submitted);
}
@@ -612,6 +621,11 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
hif_dev->remain_skb = nskb;
spin_unlock(&hif_dev->rx_lock);
} else {
+ if (pool_index == MAX_PKT_NUM_IN_TRANSFER) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: over RX MAX_PKT_NUM\n");
+ goto err;
+ }
nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
if (!nskb) {
dev_err(&hif_dev->udev->dev,
@@ -638,9 +652,9 @@ err:
static void ath9k_hif_usb_rx_cb(struct urb *urb)
{
- struct sk_buff *skb = (struct sk_buff *) urb->context;
- struct hif_device_usb *hif_dev =
- usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
+ struct rx_buf *rx_buf = (struct rx_buf *)urb->context;
+ struct hif_device_usb *hif_dev = rx_buf->hif_dev;
+ struct sk_buff *skb = rx_buf->skb;
int ret;
if (!skb)
@@ -680,14 +694,15 @@ resubmit:
return;
free:
kfree_skb(skb);
+ kfree(rx_buf);
}
static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
{
- struct sk_buff *skb = (struct sk_buff *) urb->context;
+ struct rx_buf *rx_buf = (struct rx_buf *)urb->context;
+ struct hif_device_usb *hif_dev = rx_buf->hif_dev;
+ struct sk_buff *skb = rx_buf->skb;
struct sk_buff *nskb;
- struct hif_device_usb *hif_dev =
- usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
int ret;
if (!skb)
@@ -727,11 +742,13 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
return;
}
+ rx_buf->skb = nskb;
+
usb_fill_int_urb(urb, hif_dev->udev,
usb_rcvintpipe(hif_dev->udev,
USB_REG_IN_PIPE),
nskb->data, MAX_REG_IN_BUF_SIZE,
- ath9k_hif_usb_reg_in_cb, nskb, 1);
+ ath9k_hif_usb_reg_in_cb, rx_buf, 1);
}
resubmit:
@@ -745,6 +762,7 @@ resubmit:
return;
free:
kfree_skb(skb);
+ kfree(rx_buf);
urb->context = NULL;
}
@@ -753,27 +771,37 @@ static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
unsigned long flags;
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
list_for_each_entry_safe(tx_buf, tx_buf_tmp,
&hif_dev->tx.tx_buf, list) {
+ usb_get_urb(tx_buf->urb);
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
usb_kill_urb(tx_buf->urb);
list_del(&tx_buf->list);
usb_free_urb(tx_buf->urb);
kfree(tx_buf->buf);
kfree(tx_buf);
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
}
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
hif_dev->tx.flags |= HIF_USB_TX_FLUSH;
spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
list_for_each_entry_safe(tx_buf, tx_buf_tmp,
&hif_dev->tx.tx_pending, list) {
+ usb_get_urb(tx_buf->urb);
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
usb_kill_urb(tx_buf->urb);
list_del(&tx_buf->list);
usb_free_urb(tx_buf->urb);
kfree(tx_buf->buf);
kfree(tx_buf);
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
}
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
usb_kill_anchored_urbs(&hif_dev->mgmt_submitted);
}
@@ -790,7 +818,7 @@ static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev)
init_usb_anchor(&hif_dev->mgmt_submitted);
for (i = 0; i < MAX_TX_URB_NUM; i++) {
- tx_buf = kzalloc(sizeof(struct tx_buf), GFP_KERNEL);
+ tx_buf = kzalloc(sizeof(*tx_buf), GFP_KERNEL);
if (!tx_buf)
goto err;
@@ -827,8 +855,9 @@ static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev)
static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
{
- struct urb *urb = NULL;
+ struct rx_buf *rx_buf = NULL;
struct sk_buff *skb = NULL;
+ struct urb *urb = NULL;
int i, ret;
init_usb_anchor(&hif_dev->rx_submitted);
@@ -836,6 +865,12 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
for (i = 0; i < MAX_RX_URB_NUM; i++) {
+ rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL);
+ if (!rx_buf) {
+ ret = -ENOMEM;
+ goto err_rxb;
+ }
+
/* Allocate URB */
urb = usb_alloc_urb(0, GFP_KERNEL);
if (urb == NULL) {
@@ -850,11 +885,14 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
goto err_skb;
}
+ rx_buf->hif_dev = hif_dev;
+ rx_buf->skb = skb;
+
usb_fill_bulk_urb(urb, hif_dev->udev,
usb_rcvbulkpipe(hif_dev->udev,
USB_WLAN_RX_PIPE),
skb->data, MAX_RX_BUF_SIZE,
- ath9k_hif_usb_rx_cb, skb);
+ ath9k_hif_usb_rx_cb, rx_buf);
/* Anchor URB */
usb_anchor_urb(urb, &hif_dev->rx_submitted);
@@ -880,6 +918,8 @@ err_submit:
err_skb:
usb_free_urb(urb);
err_urb:
+ kfree(rx_buf);
+err_rxb:
ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
return ret;
}
@@ -891,14 +931,21 @@ static void ath9k_hif_usb_dealloc_reg_in_urbs(struct hif_device_usb *hif_dev)
static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
{
- struct urb *urb = NULL;
+ struct rx_buf *rx_buf = NULL;
struct sk_buff *skb = NULL;
+ struct urb *urb = NULL;
int i, ret;
init_usb_anchor(&hif_dev->reg_in_submitted);
for (i = 0; i < MAX_REG_IN_URB_NUM; i++) {
+ rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL);
+ if (!rx_buf) {
+ ret = -ENOMEM;
+ goto err_rxb;
+ }
+
/* Allocate URB */
urb = usb_alloc_urb(0, GFP_KERNEL);
if (urb == NULL) {
@@ -913,11 +960,14 @@ static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
goto err_skb;
}
+ rx_buf->hif_dev = hif_dev;
+ rx_buf->skb = skb;
+
usb_fill_int_urb(urb, hif_dev->udev,
usb_rcvintpipe(hif_dev->udev,
USB_REG_IN_PIPE),
skb->data, MAX_REG_IN_BUF_SIZE,
- ath9k_hif_usb_reg_in_cb, skb, 1);
+ ath9k_hif_usb_reg_in_cb, rx_buf, 1);
/* Anchor URB */
usb_anchor_urb(urb, &hif_dev->reg_in_submitted);
@@ -943,6 +993,8 @@ err_submit:
err_skb:
usb_free_urb(urb);
err_urb:
+ kfree(rx_buf);
+err_rxb:
ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev);
return ret;
}
@@ -973,7 +1025,7 @@ err:
return -ENOMEM;
}
-static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
+void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
{
usb_kill_anchored_urbs(&hif_dev->regout_submitted);
ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev);
@@ -1341,8 +1393,9 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
if (hif_dev->flags & HIF_USB_READY) {
ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
- ath9k_htc_hw_free(hif_dev->htc_handle);
ath9k_hif_usb_dev_deinit(hif_dev);
+ ath9k_destoy_wmi(hif_dev->htc_handle->drv_priv);
+ ath9k_htc_hw_free(hif_dev->htc_handle);
}
usb_set_intfdata(interface, NULL);
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h
index 7846916aa01d..5985aa15ca93 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.h
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.h
@@ -86,6 +86,11 @@ struct tx_buf {
struct list_head list;
};
+struct rx_buf {
+ struct sk_buff *skb;
+ struct hif_device_usb *hif_dev;
+};
+
#define HIF_USB_TX_STOP BIT(0)
#define HIF_USB_TX_FLUSH BIT(1)
@@ -133,5 +138,6 @@ struct hif_device_usb {
int ath9k_hif_usb_init(void);
void ath9k_hif_usb_exit(void);
+void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev);
#endif /* HTC_USB_H */
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 214c68269a69..cb136d9d4621 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -246,7 +246,7 @@ static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
if (unlikely(r)) {
ath_dbg(common, WMI, "REGISTER READ FAILED: (0x%04x, %d)\n",
reg_offset, r);
- return -EIO;
+ return -1;
}
return be32_to_cpu(val);
@@ -933,8 +933,9 @@ err_init:
int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
u16 devid, char *product, u32 drv_info)
{
- struct ieee80211_hw *hw;
+ struct hif_device_usb *hif_dev;
struct ath9k_htc_priv *priv;
+ struct ieee80211_hw *hw;
int ret;
hw = ieee80211_alloc_hw(sizeof(struct ath9k_htc_priv), &ath9k_htc_ops);
@@ -969,7 +970,10 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
return 0;
err_init:
- ath9k_deinit_wmi(priv);
+ ath9k_stop_wmi(priv);
+ hif_dev = (struct hif_device_usb *)htc_handle->hif_dev;
+ ath9k_hif_usb_dealloc_urbs(hif_dev);
+ ath9k_destoy_wmi(priv);
err_free:
ieee80211_free_hw(hw);
return ret;
@@ -984,7 +988,7 @@ void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug)
htc_handle->drv_priv->ah->ah_flags |= AH_UNPLUGGED;
ath9k_deinit_device(htc_handle->drv_priv);
- ath9k_deinit_wmi(htc_handle->drv_priv);
+ ath9k_stop_wmi(htc_handle->drv_priv);
ieee80211_free_hw(htc_handle->drv_priv->hw);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index b5d7ef4da17f..d567fbe79cff 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -973,7 +973,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
struct ath_htc_rx_status *rxstatus;
struct ath_rx_status rx_stats;
bool decrypt_error = false;
- __be16 rs_datalen;
+ u16 rs_datalen;
bool is_phyerr;
if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
@@ -999,9 +999,9 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
* which are not PHY_ERROR (short radar pulses have a length of 3)
*/
if (unlikely(!rs_datalen || (rs_datalen < 10 && !is_phyerr))) {
- ath_warn(common,
- "Short RX data len, dropping (dlen: %d)\n",
- rs_datalen);
+ ath_dbg(common, ANY,
+ "Short RX data len, dropping (dlen: %d)\n",
+ rs_datalen);
goto rx_next;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index 1bf63a4efb4c..05fca38b38ed 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -113,6 +113,9 @@ static void htc_process_conn_rsp(struct htc_target *target,
if (svc_rspmsg->status == HTC_SERVICE_SUCCESS) {
epid = svc_rspmsg->endpoint_id;
+ if (epid < 0 || epid >= ENDPOINT_MAX)
+ return;
+
service_id = be16_to_cpu(svc_rspmsg->service_id);
max_msglen = be16_to_cpu(svc_rspmsg->max_msg_len);
endpoint = &target->endpoint[epid];
@@ -170,6 +173,7 @@ static int htc_config_pipe_credits(struct htc_target *target)
time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
if (!time_left) {
dev_err(target->dev, "HTC credit config timeout\n");
+ kfree_skb(skb);
return -ETIMEDOUT;
}
@@ -205,6 +209,7 @@ static int htc_setup_complete(struct htc_target *target)
time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
if (!time_left) {
dev_err(target->dev, "HTC start timeout\n");
+ kfree_skb(skb);
return -ETIMEDOUT;
}
@@ -277,6 +282,7 @@ int htc_connect_service(struct htc_target *target,
if (!time_left) {
dev_err(target->dev, "Service connection timeout for: %d\n",
service_connreq->service_id);
+ kfree_skb(skb);
return -ETIMEDOUT;
}
@@ -336,6 +342,8 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
if (skb) {
htc_hdr = (struct htc_frame_hdr *) skb->data;
+ if (htc_hdr->endpoint_id >= ARRAY_SIZE(htc_handle->endpoint))
+ goto ret;
endpoint = &htc_handle->endpoint[htc_hdr->endpoint_id];
skb_pull(skb, sizeof(struct htc_frame_hdr));
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index b4f7ee423d40..9f438d8e59f2 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -287,7 +287,7 @@ static bool ath9k_hw_read_revisions(struct ath_hw *ah)
srev = REG_READ(ah, AR_SREV);
- if (srev == -EIO) {
+ if (srev == -1) {
ath_err(ath9k_hw_common(ah),
"Failed to read SREV register");
return false;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 74f98bbaea88..3e92b88045a5 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1457,6 +1457,9 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
ath_chanctx_set_channel(sc, ctx, &hw->conf.chandef);
}
+ if (changed & IEEE80211_CONF_CHANGE_POWER)
+ ath9k_set_txpower(sc, NULL);
+
mutex_unlock(&sc->mutex);
ath9k_ps_restore(sc);
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index d1f6710ca63b..066677bb83eb 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -112,14 +112,17 @@ struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv)
return wmi;
}
-void ath9k_deinit_wmi(struct ath9k_htc_priv *priv)
+void ath9k_stop_wmi(struct ath9k_htc_priv *priv)
{
struct wmi *wmi = priv->wmi;
mutex_lock(&wmi->op_mutex);
wmi->stopped = true;
mutex_unlock(&wmi->op_mutex);
+}
+void ath9k_destoy_wmi(struct ath9k_htc_priv *priv)
+{
kfree(priv->wmi);
}
@@ -336,6 +339,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n",
wmi_cmd_to_name(cmd_id));
mutex_unlock(&wmi->op_mutex);
+ kfree_skb(skb);
return -ETIMEDOUT;
}
diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h
index 380175d5ecd7..d8b912206232 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.h
+++ b/drivers/net/wireless/ath/ath9k/wmi.h
@@ -179,7 +179,6 @@ struct wmi {
};
struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv);
-void ath9k_deinit_wmi(struct ath9k_htc_priv *priv);
int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi,
enum htc_endpoint_id *wmi_ctrl_epid);
int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
@@ -189,6 +188,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
void ath9k_wmi_event_tasklet(unsigned long data);
void ath9k_fatal_work(struct work_struct *work);
void ath9k_wmi_event_drain(struct ath9k_htc_priv *priv);
+void ath9k_stop_wmi(struct ath9k_htc_priv *priv);
+void ath9k_destoy_wmi(struct ath9k_htc_priv *priv);
#define WMI_CMD(_wmi_cmd) \
do { \
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 4b7a7fc2a0fe..29f71457e26b 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1324,6 +1324,11 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
is_40, is_sgi, is_sp);
if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
+ if (rix >= 8 && fi->dyn_smps) {
+ info->rates[i].RateFlags |=
+ ATH9K_RATESERIES_RTS_CTS;
+ info->flags |= ATH9K_TXDESC_CTSENA;
+ }
info->txpower[i] = ath_get_rate_txpower(sc, bf, rix,
is_40, false);
@@ -2206,6 +2211,7 @@ static void setup_frame_info(struct ieee80211_hw *hw,
fi->keyix = an->ps_key;
else
fi->keyix = ATH9K_TXKEYIX_INVALID;
+ fi->dyn_smps = sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC;
fi->keytype = keytype;
fi->framelen = framelen;
fi->tx_power = txpower;
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index 88045f93a76c..62ed0977f32c 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -351,9 +351,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
if (SUPP(CARL9170FW_WLANTX_CAB)) {
- if_comb_types |=
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_P2P_GO);
+ if_comb_types |= BIT(NL80211_IFTYPE_AP);
#ifdef CONFIG_MAC80211_MESH
if_comb_types |=
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 7f1bdea742b8..7064e6fc5dbb 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -582,11 +582,10 @@ static int carl9170_init_interface(struct ar9170 *ar,
ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) &&
(vif->type != NL80211_IFTYPE_AP));
- /* While the driver supports HW offload in a single
- * P2P client configuration, it doesn't support HW
- * offload in the favourit, concurrent P2P GO+CLIENT
- * configuration. Hence, HW offload will always be
- * disabled for P2P.
+ /* The driver used to have P2P GO+CLIENT support,
+ * but since this was dropped and we don't know if
+ * there are any gremlins lurking in the shadows,
+ * so best we keep HW offload disabled for P2P.
*/
ar->disable_offload |= vif->p2p;
@@ -639,18 +638,6 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw,
if (vif->type == NL80211_IFTYPE_STATION)
break;
- /* P2P GO [master] use-case
- * Because the P2P GO station is selected dynamically
- * by all participating peers of a WIFI Direct network,
- * the driver has be able to change the main interface
- * operating mode on the fly.
- */
- if (main_vif->p2p && vif->p2p &&
- vif->type == NL80211_IFTYPE_AP) {
- old_main = main_vif;
- break;
- }
-
err = -EBUSY;
rcu_read_unlock();
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 79998a3ddb7a..46ae4ec4ad47 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -163,7 +163,7 @@ static struct ieee80211_supported_band wcn_band_5ghz = {
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
.mcs = {
.rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- .rx_highest = cpu_to_le16(72),
+ .rx_highest = cpu_to_le16(150),
.tx_params = IEEE80211_HT_MCS_TX_DEFINED,
}
}
@@ -1341,7 +1341,7 @@ static int wcn36xx_probe(struct platform_device *pdev)
if (addr && ret != ETH_ALEN) {
wcn36xx_err("invalid local-mac-address\n");
ret = -EINVAL;
- goto out_wq;
+ goto out_destroy_ept;
} else if (addr) {
wcn36xx_info("mac address: %pM\n", addr);
SET_IEEE80211_PERM_ADDR(wcn->hw, addr);
@@ -1349,7 +1349,7 @@ static int wcn36xx_probe(struct platform_device *pdev)
ret = wcn36xx_platform_get_resources(wcn, pdev);
if (ret)
- goto out_wq;
+ goto out_destroy_ept;
wcn36xx_init_ieee80211(wcn);
ret = ieee80211_register_hw(wcn->hw);
@@ -1361,6 +1361,8 @@ static int wcn36xx_probe(struct platform_device *pdev)
out_unmap:
iounmap(wcn->ccu_base);
iounmap(wcn->dxe_base);
+out_destroy_ept:
+ rpmsg_destroy_ept(wcn->smd_channel);
out_wq:
ieee80211_free_hw(hw);
out_err:
diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig
index 3548e8d5e18e..5284af423d93 100644
--- a/drivers/net/wireless/ath/wil6210/Kconfig
+++ b/drivers/net/wireless/ath/wil6210/Kconfig
@@ -1,6 +1,7 @@
config WIL6210
tristate "Wilocity 60g WiFi card wil6210 support"
select WANT_DEV_COREDUMP
+ select CRC32
depends on CFG80211
depends on PCI
default n
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 44296c015925..55a809cb3105 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -730,32 +730,6 @@ struct dentry *wil_debugfs_create_ioblob(const char *name,
return debugfs_create_file(name, mode, parent, wil_blob, &fops_ioblob);
}
-/*---reset---*/
-static ssize_t wil_write_file_reset(struct file *file, const char __user *buf,
- size_t len, loff_t *ppos)
-{
- struct wil6210_priv *wil = file->private_data;
- struct net_device *ndev = wil->main_ndev;
-
- /**
- * BUG:
- * this code does NOT sync device state with the rest of system
- * use with care, debug only!!!
- */
- rtnl_lock();
- dev_close(ndev);
- ndev->flags &= ~IFF_UP;
- rtnl_unlock();
- wil_reset(wil, true);
-
- return len;
-}
-
-static const struct file_operations fops_reset = {
- .write = wil_write_file_reset,
- .open = simple_open,
-};
-
/*---write channel 1..4 to rxon for it, 0 to rxoff---*/
static ssize_t wil_write_file_rxon(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
@@ -991,6 +965,8 @@ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf,
int rc;
void *frame;
+ memset(&params, 0, sizeof(params));
+
if (!len)
return -EINVAL;
@@ -2459,7 +2435,6 @@ static const struct {
{"desc", 0444, &fops_txdesc},
{"bf", 0444, &fops_bf},
{"mem_val", 0644, &fops_memread},
- {"reset", 0244, &fops_reset},
{"rxon", 0244, &fops_rxon},
{"tx_mgmt", 0244, &fops_txmgmt},
{"wmi_send", 0244, &fops_wmi},
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 0655cd884514..d161dc930313 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -590,10 +590,14 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
}
if (isr & BIT_DMA_EP_MISC_ICR_HALP) {
- wil_dbg_irq(wil, "irq_misc: HALP IRQ invoked\n");
- wil6210_mask_halp(wil);
isr &= ~BIT_DMA_EP_MISC_ICR_HALP;
- complete(&wil->halp.comp);
+ if (wil->halp.handle_icr) {
+ /* no need to handle HALP ICRs until next vote */
+ wil->halp.handle_icr = false;
+ wil_dbg_irq(wil, "irq_misc: HALP IRQ invoked\n");
+ wil6210_mask_halp(wil);
+ complete(&wil->halp.comp);
+ }
}
wil->isr_misc = isr;
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 10673fa9388e..fe91db3478dc 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -1687,7 +1687,7 @@ int __wil_up(struct wil6210_priv *wil)
return rc;
/* Rx RING. After MAC and beacon */
- rc = wil->txrx_ops.rx_init(wil, 1 << rx_ring_order);
+ rc = wil->txrx_ops.rx_init(wil, rx_ring_order);
if (rc)
return rc;
@@ -1814,11 +1814,14 @@ void wil_halp_vote(struct wil6210_priv *wil)
if (++wil->halp.ref_cnt == 1) {
reinit_completion(&wil->halp.comp);
+ /* mark to IRQ context to handle HALP ICR */
+ wil->halp.handle_icr = true;
wil6210_set_halp(wil);
rc = wait_for_completion_timeout(&wil->halp.comp, to_jiffies);
if (!rc) {
wil_err(wil, "HALP vote timed out\n");
/* Mask HALP as done in case the interrupt is raised */
+ wil->halp.handle_icr = false;
wil6210_mask_halp(wil);
} else {
wil_dbg_irq(wil,
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 73cdf54521f9..236dcb6f5e84 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -881,7 +881,7 @@ static void wil_rx_buf_len_init(struct wil6210_priv *wil)
}
}
-static int wil_rx_init(struct wil6210_priv *wil, u16 size)
+static int wil_rx_init(struct wil6210_priv *wil, uint order)
{
struct wil_ring *vring = &wil->ring_rx;
int rc;
@@ -895,7 +895,7 @@ static int wil_rx_init(struct wil6210_priv *wil, u16 size)
wil_rx_buf_len_init(wil);
- vring->size = size;
+ vring->size = 1 << order;
vring->is_rx = true;
rc = wil_vring_alloc(wil, vring);
if (rc)
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
index 5fa8d6ad6648..fe666a3583c1 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -268,6 +268,9 @@ static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv *wil,
struct list_head *active = &wil->rx_buff_mgmt.active;
dma_addr_t pa;
+ if (!wil->rx_buff_mgmt.buff_arr)
+ return;
+
while (!list_empty(active)) {
struct wil_rx_buff *rx_buff =
list_first_entry(active, struct wil_rx_buff, list);
@@ -590,9 +593,9 @@ static void wil_rx_buf_len_init_edma(struct wil6210_priv *wil)
WIL_MAX_ETH_MTU : WIL_EDMA_RX_BUF_LEN_DEFAULT;
}
-static int wil_rx_init_edma(struct wil6210_priv *wil, u16 desc_ring_size)
+static int wil_rx_init_edma(struct wil6210_priv *wil, uint desc_ring_order)
{
- u16 status_ring_size;
+ u16 status_ring_size, desc_ring_size = 1 << desc_ring_order;
struct wil_ring *ring = &wil->ring_rx;
int rc;
size_t elem_size = wil->use_compressed_rx_status ?
@@ -607,7 +610,12 @@ static int wil_rx_init_edma(struct wil6210_priv *wil, u16 desc_ring_size)
"compressed RX status cannot be used with SW reorder\n");
return -EINVAL;
}
-
+ if (wil->rx_status_ring_order <= desc_ring_order)
+ /* make sure sring is larger than desc ring */
+ wil->rx_status_ring_order = desc_ring_order + 1;
+ if (wil->rx_buff_id_count <= desc_ring_size)
+ /* make sure we will not run out of buff_ids */
+ wil->rx_buff_id_count = desc_ring_size + 512;
if (wil->rx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN ||
wil->rx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX)
wil->rx_status_ring_order = WIL_RX_SRING_SIZE_ORDER_DEFAULT;
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 75fe1a3b7046..bc89044d0b66 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -602,7 +602,7 @@ struct wil_txrx_ops {
struct wil_ring *ring, struct sk_buff *skb);
irqreturn_t (*irq_tx)(int irq, void *cookie);
/* RX ops */
- int (*rx_init)(struct wil6210_priv *wil, u16 ring_size);
+ int (*rx_init)(struct wil6210_priv *wil, uint ring_order);
void (*rx_fini)(struct wil6210_priv *wil);
int (*wmi_addba_rx_resp)(struct wil6210_priv *wil, u8 mid, u8 cid,
u8 tid, u8 token, u16 status, bool amsdu,
@@ -778,6 +778,7 @@ struct wil_halp {
struct mutex lock; /* protect halp ref_cnt */
unsigned int ref_cnt;
struct completion comp;
+ u8 handle_icr;
};
struct wil_blob_wrapper {
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 8a603432f531..3928b13ae026 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -2802,7 +2802,7 @@ static void wmi_event_handle(struct wil6210_priv *wil,
if (mid == MID_BROADCAST)
mid = 0;
- if (mid >= wil->max_vifs) {
+ if (mid >= ARRAY_SIZE(wil->vifs) || mid >= wil->max_vifs) {
wil_dbg_wmi(wil, "invalid mid %d, event skipped\n",
mid);
return;
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index b37e7391f55d..8a226a9d755e 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -5596,7 +5596,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
/* fill hw info */
ieee80211_hw_set(hw, RX_INCLUDES_FCS);
ieee80211_hw_set(hw, SIGNAL_DBM);
-
+ ieee80211_hw_set(hw, MFP_CAPABLE);
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_MESH_POINT) |
diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
index 44ab080d6518..88446258e775 100644
--- a/drivers/net/wireless/broadcom/b43/phy_n.c
+++ b/drivers/net/wireless/broadcom/b43/phy_n.c
@@ -5320,7 +5320,7 @@ static void b43_nphy_restore_cal(struct b43_wldev *dev)
for (i = 0; i < 4; i++) {
if (dev->phy.rev >= 3)
- table[i] = coef[i];
+ coef[i] = table[i];
else
coef[i] = 0;
}
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index 770cc218ca4b..ef1f1b5d63b1 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -3835,6 +3835,7 @@ static int b43legacy_wireless_init(struct ssb_device *dev)
/* fill hw info */
ieee80211_hw_set(hw, RX_INCLUDES_FCS);
ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, MFP_CAPABLE); /* Allow WPA3 in software */
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
diff --git a/drivers/net/wireless/broadcom/b43legacy/xmit.c b/drivers/net/wireless/broadcom/b43legacy/xmit.c
index 35ccf400b02c..87045e30e585 100644
--- a/drivers/net/wireless/broadcom/b43legacy/xmit.c
+++ b/drivers/net/wireless/broadcom/b43legacy/xmit.c
@@ -571,6 +571,7 @@ void b43legacy_rx(struct b43legacy_wldev *dev,
default:
b43legacywarn(dev->wl, "Unexpected value for chanstat (0x%X)\n",
chanstat);
+ goto drop;
}
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index bbdc6000afb9..96dc9e5ab23f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -5282,7 +5282,8 @@ static bool brcmf_is_linkup(struct brcmf_cfg80211_vif *vif,
return false;
}
-static bool brcmf_is_linkdown(const struct brcmf_event_msg *e)
+static bool brcmf_is_linkdown(struct brcmf_cfg80211_vif *vif,
+ const struct brcmf_event_msg *e)
{
u32 event = e->event_code;
u16 flags = e->flags;
@@ -5291,6 +5292,8 @@ static bool brcmf_is_linkdown(const struct brcmf_event_msg *e)
(event == BRCMF_E_DISASSOC_IND) ||
((event == BRCMF_E_LINK) && (!(flags & BRCMF_EVENT_MSG_LINK)))) {
brcmf_dbg(CONN, "Processing link down\n");
+ clear_bit(BRCMF_VIF_STATUS_EAP_SUCCESS, &vif->sme_state);
+ clear_bit(BRCMF_VIF_STATUS_ASSOC_SUCCESS, &vif->sme_state);
return true;
}
return false;
@@ -5581,7 +5584,7 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
} else
brcmf_bss_connect_done(cfg, ndev, e, true);
brcmf_net_setcarrier(ifp, true);
- } else if (brcmf_is_linkdown(e)) {
+ } else if (brcmf_is_linkdown(ifp->vif, e)) {
brcmf_dbg(CONN, "Linkdown\n");
if (!brcmf_is_ibssmode(ifp->vif)) {
brcmf_bss_connect_done(cfg, ndev, e, false);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 9d7b8834b854..db4c541f58ae 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -438,7 +438,7 @@ static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
ret = brcmf_proto_hdrpull(drvr, true, skb, ifp);
if (ret || !(*ifp) || !(*ifp)->ndev) {
- if (ret != -ENODATA && *ifp)
+ if (ret != -ENODATA && *ifp && (*ifp)->ndev)
(*ifp)->ndev->stats.rx_errors++;
brcmu_pkt_buf_free_skb(skb);
return -ENODATA;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 4c5a3995dc35..d7f41caa0b0b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -281,13 +281,14 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
if (!err)
ifp->drvr->feat_flags |= BIT(BRCMF_FEAT_SCAN_RANDOM_MAC);
+ brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa");
+
if (drvr->settings->feature_disable) {
brcmf_dbg(INFO, "Features: 0x%02x, disable: 0x%02x\n",
ifp->drvr->feat_flags,
drvr->settings->feature_disable);
ifp->drvr->feat_flags &= ~drvr->settings->feature_disable;
}
- brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa");
brcmf_feat_firmware_overrides(drvr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index d5bb81e88762..9d2367133c7c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -30,7 +30,7 @@
#define BRCMF_ARP_OL_PEER_AUTO_REPLY 0x00000008
#define BRCMF_BSS_INFO_VERSION 109 /* curr ver of brcmf_bss_info_le struct */
-#define BRCMF_BSS_RSSI_ON_CHANNEL 0x0002
+#define BRCMF_BSS_RSSI_ON_CHANNEL 0x0004
#define BRCMF_STA_BRCM 0x00000001 /* Running a Broadcom driver */
#define BRCMF_STA_WME 0x00000002 /* WMM association */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index 1de8497d92b8..dc7c970257d2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -653,6 +653,7 @@ static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
int ifidx)
{
+ struct brcmf_fws_hanger_item *hi;
bool (*matchfn)(struct sk_buff *, void *) = NULL;
struct sk_buff *skb;
int prec;
@@ -664,6 +665,9 @@ static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
while (skb) {
hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
+ hi = &fws->hanger.items[hslot];
+ WARN_ON(skb != hi->pkt);
+ hi->state = BRCMF_FWS_HANGER_ITEM_STATE_FREE;
brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
true);
brcmu_pkt_buf_free_skb(skb);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index ee922b052561..768a99c15c08 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -1563,6 +1563,8 @@ fail:
BRCMF_TX_IOCTL_MAX_MSG_SIZE,
msgbuf->ioctbuf,
msgbuf->ioctbuf_handle);
+ if (msgbuf->txflow_wq)
+ destroy_workqueue(msgbuf->txflow_wq);
kfree(msgbuf);
}
return -ENOMEM;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index e0211321fe9e..a5195bdb4d9b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -1933,6 +1933,8 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
if (brcmf_sdio_hdparse(bus, bus->rxhdr, &rd_new,
BRCMF_SDIO_FT_NORMAL)) {
rd->len = 0;
+ brcmf_sdio_rxfail(bus, true, true);
+ sdio_release_host(bus->sdiodev->func1);
brcmu_pkt_buf_free_skb(pkt);
continue;
}
@@ -3631,7 +3633,11 @@ static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
if (bus->idlecount > bus->idletime) {
brcmf_dbg(SDIO, "idle\n");
sdio_claim_host(bus->sdiodev->func1);
- brcmf_sdio_wd_timer(bus, false);
+#ifdef DEBUG
+ if (!BRCMF_FWCON_ON() ||
+ bus->console_interval == 0)
+#endif
+ brcmf_sdio_wd_timer(bus, false);
bus->idlecount = 0;
brcmf_sdio_bus_sleep(bus, true, false);
sdio_release_host(bus->sdiodev->func1);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
index 9fb0d9fbd939..d532decc1538 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -5085,8 +5085,10 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
pi->pi_fptr.radioloftget = wlc_lcnphy_get_radio_loft;
pi->pi_fptr.detach = wlc_phy_detach_lcnphy;
- if (!wlc_phy_txpwr_srom_read_lcnphy(pi))
+ if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) {
+ kfree(pi->u.pi_lcnphy);
return false;
+ }
if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
if (pi_lcn->lcnphy_tempsense_option == 3) {
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index c3fe9bfff812..5a6ee0b014da 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -1928,6 +1928,10 @@ static netdev_tx_t mpi_start_xmit(struct sk_buff *skb,
airo_print_err(dev->name, "%s: skb == NULL!",__func__);
return NETDEV_TX_OK;
}
+ if (skb_padto(skb, ETH_ZLEN)) {
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
npacks = skb_queue_len (&ai->txq);
if (npacks >= MAXTXQ - 1) {
@@ -2130,6 +2134,10 @@ static netdev_tx_t airo_start_xmit(struct sk_buff *skb,
airo_print_err(dev->name, "%s: skb == NULL!", __func__);
return NETDEV_TX_OK;
}
+ if (skb_padto(skb, ETH_ZLEN)) {
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
/* Find a vacant FID */
for( i = 0; i < MAX_FIDS / 2 && (fids[i] & 0xffff0000); i++ );
@@ -2204,6 +2212,10 @@ static netdev_tx_t airo_start_xmit11(struct sk_buff *skb,
airo_print_err(dev->name, "%s: skb == NULL!", __func__);
return NETDEV_TX_OK;
}
+ if (skb_padto(skb, ETH_ZLEN)) {
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
/* Find a vacant FID */
for( i = MAX_FIDS / 2; i < MAX_FIDS && (fids[i] & 0xffff0000); i++ );
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
index d32d39fa2686..c129291d3a17 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
@@ -647,8 +647,10 @@ int libipw_wx_set_encodeext(struct libipw_device *ieee,
}
if (ext->alg != IW_ENCODE_ALG_NONE) {
- memcpy(sec.keys[idx], ext->key, ext->key_len);
- sec.key_sizes[idx] = ext->key_len;
+ int key_len = clamp_val(ext->key_len, 0, SCM_KEY_LEN);
+
+ memcpy(sec.keys[idx], ext->key, key_len);
+ sec.key_sizes[idx] = key_len;
sec.flags |= (1 << idx);
if (ext->alg == IW_ENCODE_ALG_WEP) {
sec.encode_alg[idx] = SEC_ALG_WEP;
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index e16f2597c219..c1c1cf330de7 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -4302,8 +4302,8 @@ il_apm_init(struct il_priv *il)
* power savings, even without L1.
*/
if (il->cfg->set_l0s) {
- pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl);
- if (lctl & PCI_EXP_LNKCTL_ASPM_L1) {
+ ret = pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl);
+ if (!ret && (lctl & PCI_EXP_LNKCTL_ASPM_L1)) {
/* L1-ASPM enabled; disable(!) L0S */
il_set_bit(il, CSR_GIO_REG,
CSR_GIO_REG_VAL_L0S_ENABLED);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 798605c4f122..5287f21d7ba6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -520,7 +520,10 @@ static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file,
const size_t bufsz = sizeof(buf);
int pos = 0;
+ mutex_lock(&mvm->mutex);
iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os);
+ mutex_unlock(&mvm->mutex);
+
do_div(curr_os, NSEC_PER_USEC);
diff = curr_os - curr_gp2;
pos += scnprintf(buf + pos, bufsz - pos, "diff=%lld\n", diff);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 58653598db14..2fad20c845b4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -2880,7 +2880,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
/* this would be a mac80211 bug ... but don't crash */
if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
- return -EINVAL;
+ return test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) ? 0 : -EINVAL;
/*
* If we are in a STA removal flow and in DQA mode:
@@ -3424,9 +3424,12 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
aux_roc_req.apply_time_max_delay = cpu_to_le32(delay);
IWL_DEBUG_TE(mvm,
- "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
- channel->hw_value, req_dur, duration, delay,
- dtim_interval);
+ "ROC: Requesting to remain on channel %u for %ums\n",
+ channel->hw_value, req_dur);
+ IWL_DEBUG_TE(mvm,
+ "\t(requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
+ duration, delay, dtim_interval);
+
/* Set the node address */
memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 0e26619fb330..d932171617e6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -1192,6 +1192,7 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk)
reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
if (device_reprobe(reprobe->dev))
dev_err(reprobe->dev, "reprobe failed!\n");
+ put_device(reprobe->dev);
kfree(reprobe);
module_put(THIS_MODULE);
}
@@ -1242,7 +1243,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
module_put(THIS_MODULE);
return;
}
- reprobe->dev = mvm->trans->dev;
+ reprobe->dev = get_device(mvm->trans->dev);
INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
schedule_work(&reprobe->work);
} else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR &&
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index e6a67bc02209..bdb87d8e9644 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -587,6 +587,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_mvm_stat_data {
struct iwl_mvm *mvm;
+ __le32 flags;
__le32 mac_id;
u8 beacon_filter_average_energy;
void *general;
@@ -630,6 +631,13 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
}
}
+ /* make sure that beacon statistics don't go backwards with TCM
+ * request to clear statistics
+ */
+ if (le32_to_cpu(data->flags) & IWL_STATISTICS_REPLY_FLG_CLEAR)
+ mvmvif->beacon_stats.accu_num_beacons +=
+ mvmvif->beacon_stats.num_beacons;
+
if (mvmvif->id != id)
return;
@@ -790,6 +798,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
flags = stats->flag;
}
+ data.flags = flags;
iwl_mvm_rx_stats_check_trigger(mvm, pkt);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 6783b20d9681..a1cecf4a0e82 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -159,8 +159,10 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
/* Allocate IML */
iml_img = dma_alloc_coherent(trans->dev, trans->iml_len,
&trans_pcie->iml_dma_addr, GFP_KERNEL);
- if (!iml_img)
- return -ENOMEM;
+ if (!iml_img) {
+ ret = -ENOMEM;
+ goto err_free_ctxt_info;
+ }
memcpy(iml_img, trans->iml, trans->iml_len);
@@ -177,6 +179,11 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
return 0;
+err_free_ctxt_info:
+ dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
+ trans_pcie->ctxt_info_gen3,
+ trans_pcie->ctxt_info_dma_addr);
+ trans_pcie->ctxt_info_gen3 = NULL;
err_free_prph_info:
dma_free_coherent(trans->dev,
sizeof(*prph_info),
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 24da49615135..fcda33482887 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -2121,18 +2121,38 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
void *buf, int dwords)
{
unsigned long flags;
- int offs, ret = 0;
+ int offs = 0;
u32 *vals = buf;
- if (iwl_trans_grab_nic_access(trans, &flags)) {
- iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
- for (offs = 0; offs < dwords; offs++)
- vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
- iwl_trans_release_nic_access(trans, &flags);
- } else {
- ret = -EBUSY;
+ while (offs < dwords) {
+ /* limit the time we spin here under lock to 1/2s */
+ unsigned long end = jiffies + HZ / 2;
+ bool resched = false;
+
+ if (iwl_trans_grab_nic_access(trans, &flags)) {
+ iwl_write32(trans, HBUS_TARG_MEM_RADDR,
+ addr + 4 * offs);
+
+ while (offs < dwords) {
+ vals[offs] = iwl_read32(trans,
+ HBUS_TARG_MEM_RDAT);
+ offs++;
+
+ if (time_after(jiffies, end)) {
+ resched = true;
+ break;
+ }
+ }
+ iwl_trans_release_nic_access(trans, &flags);
+
+ if (resched)
+ cond_resched();
+ } else {
+ return -EBUSY;
+ }
}
- return ret;
+
+ return 0;
}
static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 7b1dff92b709..dea29a69aaf6 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -654,6 +654,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
struct iwl_tfh_tfd *tfd;
+ unsigned long flags2;
copy_size = sizeof(struct iwl_cmd_header_wide);
cmd_size = sizeof(struct iwl_cmd_header_wide);
@@ -722,14 +723,14 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
goto free_dup_buf;
}
- spin_lock_bh(&txq->lock);
+ spin_lock_irqsave(&txq->lock, flags2);
idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
memset(tfd, 0, sizeof(*tfd));
if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
- spin_unlock_bh(&txq->lock);
+ spin_unlock_irqrestore(&txq->lock, flags2);
IWL_ERR(trans, "No space in command queue\n");
iwl_op_mode_cmd_queue_full(trans->op_mode);
@@ -870,7 +871,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
out:
- spin_unlock_bh(&txq->lock);
+ spin_unlock_irqrestore(&txq->lock, flags2);
free_dup_buf:
if (idx < 0)
kfree(dup_buf);
@@ -1231,6 +1232,9 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
iwl_pcie_gen2_txq_unmap(trans, queue);
+ iwl_pcie_gen2_txq_free_memory(trans, trans_pcie->txq[queue]);
+ trans_pcie->txq[queue] = NULL;
+
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index b73582ec03a0..41ba0a719375 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -631,6 +631,11 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ if (!txq) {
+ IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n");
+ return;
+ }
+
spin_lock_bh(&txq->lock);
while (txq->write_ptr != txq->read_ptr) {
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
@@ -1490,6 +1495,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
u32 cmd_pos;
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
+ unsigned long flags2;
if (WARN(!trans->wide_cmd_header &&
group_id > IWL_ALWAYS_LONG_GROUP,
@@ -1573,10 +1579,10 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
goto free_dup_buf;
}
- spin_lock_bh(&txq->lock);
+ spin_lock_irqsave(&txq->lock, flags2);
if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
- spin_unlock_bh(&txq->lock);
+ spin_unlock_irqrestore(&txq->lock, flags2);
IWL_ERR(trans, "No space in command queue\n");
iwl_op_mode_cmd_queue_full(trans->op_mode);
@@ -1737,7 +1743,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
out:
- spin_unlock_bh(&txq->lock);
+ spin_unlock_irqrestore(&txq->lock, flags2);
free_dup_buf:
if (idx < 0)
kfree(dup_buf);
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
index b704e4bce171..a04d59843022 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
@@ -1237,13 +1237,6 @@ static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev)
if (skb->len < ETH_HLEN)
goto drop;
- ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_TX, 0);
- if (!ctx)
- goto busy;
-
- memset(ctx->buf, 0, BULK_BUF_SIZE);
- buf = ctx->buf->data;
-
tx_control = 0;
err = orinoco_process_xmit_skb(skb, dev, priv, &tx_control,
@@ -1251,6 +1244,13 @@ static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev)
if (err)
goto drop;
+ ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_TX, 0);
+ if (!ctx)
+ goto drop;
+
+ memset(ctx->buf, 0, BULK_BUF_SIZE);
+ buf = ctx->buf->data;
+
{
__le16 *tx_cntl = (__le16 *)buf;
*tx_cntl = cpu_to_le16(tx_control);
diff --git a/drivers/net/wireless/intersil/p54/p54pci.c b/drivers/net/wireless/intersil/p54/p54pci.c
index 57ad56435dda..8bc0286b4f8c 100644
--- a/drivers/net/wireless/intersil/p54/p54pci.c
+++ b/drivers/net/wireless/intersil/p54/p54pci.c
@@ -332,10 +332,12 @@ static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
struct p54p_desc *desc;
dma_addr_t mapping;
u32 idx, i;
+ __le32 device_addr;
spin_lock_irqsave(&priv->lock, flags);
idx = le32_to_cpu(ring_control->host_idx[1]);
i = idx % ARRAY_SIZE(ring_control->tx_data);
+ device_addr = ((struct p54_hdr *)skb->data)->req_id;
mapping = pci_map_single(priv->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
@@ -349,7 +351,7 @@ static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
desc = &ring_control->tx_data[i];
desc->host_addr = cpu_to_le32(mapping);
- desc->device_addr = ((struct p54_hdr *)skb->data)->req_id;
+ desc->device_addr = device_addr;
desc->len = cpu_to_le16(skb->len);
desc->flags = 0;
diff --git a/drivers/net/wireless/intersil/p54/p54usb.c b/drivers/net/wireless/intersil/p54/p54usb.c
index 15661da6eedc..39cfabf968d4 100644
--- a/drivers/net/wireless/intersil/p54/p54usb.c
+++ b/drivers/net/wireless/intersil/p54/p54usb.c
@@ -64,6 +64,7 @@ static const struct usb_device_id p54u_table[] = {
{USB_DEVICE(0x0db0, 0x6826)}, /* MSI UB54G (MS-6826) */
{USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */
{USB_DEVICE(0x124a, 0x4023)}, /* Shuttle PN15, Airvast WM168g, IOGear GWU513 */
+ {USB_DEVICE(0x124a, 0x4026)}, /* AirVasT USB wireless device */
{USB_DEVICE(0x1435, 0x0210)}, /* Inventel UR054G */
{USB_DEVICE(0x15a9, 0x0002)}, /* Gemtek WUBI-100GW 802.11g */
{USB_DEVICE(0x1630, 0x0005)}, /* 2Wire 802.11g USB (v1) / Z-Com */
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index ce2dd06af62e..3564f5869b44 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3327,9 +3327,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
param.no_vif = true;
if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
- hwname = kasprintf(GFP_KERNEL, "%.*s",
- nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
- (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
+ hwname = kstrndup((char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]),
+ nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
+ GFP_KERNEL);
if (!hwname)
return -ENOMEM;
param.hwname = hwname;
@@ -3385,9 +3385,9 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
if (info->attrs[HWSIM_ATTR_RADIO_ID]) {
idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]);
} else if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
- hwname = kasprintf(GFP_KERNEL, "%.*s",
- nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
- (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
+ hwname = kstrndup((char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]),
+ nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
+ GFP_KERNEL);
if (!hwname)
return -ENOMEM;
} else
diff --git a/drivers/net/wireless/marvell/libertas/mesh.c b/drivers/net/wireless/marvell/libertas/mesh.c
index b0cb16ef8d1d..b313c78e2154 100644
--- a/drivers/net/wireless/marvell/libertas/mesh.c
+++ b/drivers/net/wireless/marvell/libertas/mesh.c
@@ -793,19 +793,6 @@ static const struct attribute_group mesh_ie_group = {
.attrs = mesh_ie_attrs,
};
-static void lbs_persist_config_init(struct net_device *dev)
-{
- int ret;
- ret = sysfs_create_group(&(dev->dev.kobj), &boot_opts_group);
- ret = sysfs_create_group(&(dev->dev.kobj), &mesh_ie_group);
-}
-
-static void lbs_persist_config_remove(struct net_device *dev)
-{
- sysfs_remove_group(&(dev->dev.kobj), &boot_opts_group);
- sysfs_remove_group(&(dev->dev.kobj), &mesh_ie_group);
-}
-
/***************************************************************************
* Initializing and starting, stopping mesh
@@ -1005,6 +992,10 @@ static int lbs_add_mesh(struct lbs_private *priv)
SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent);
mesh_dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
+ mesh_dev->sysfs_groups[0] = &lbs_mesh_attr_group;
+ mesh_dev->sysfs_groups[1] = &boot_opts_group;
+ mesh_dev->sysfs_groups[2] = &mesh_ie_group;
+
/* Register virtual mesh interface */
ret = register_netdev(mesh_dev);
if (ret) {
@@ -1012,19 +1003,10 @@ static int lbs_add_mesh(struct lbs_private *priv)
goto err_free_netdev;
}
- ret = sysfs_create_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group);
- if (ret)
- goto err_unregister;
-
- lbs_persist_config_init(mesh_dev);
-
/* Everything successful */
ret = 0;
goto done;
-err_unregister:
- unregister_netdev(mesh_dev);
-
err_free_netdev:
free_netdev(mesh_dev);
@@ -1045,8 +1027,6 @@ void lbs_remove_mesh(struct lbs_private *priv)
netif_stop_queue(mesh_dev);
netif_carrier_off(mesh_dev);
- sysfs_remove_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group);
- lbs_persist_config_remove(mesh_dev);
unregister_netdev(mesh_dev);
priv->mesh_dev = NULL;
kfree(mesh_dev->ieee80211_ptr);
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 7b74ef71bef1..650191db25cb 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -1468,7 +1468,8 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
int idx, u8 *mac, struct station_info *sinfo)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
- static struct mwifiex_sta_node *node;
+ struct mwifiex_sta_node *node;
+ int i;
if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
priv->media_connected && idx == 0) {
@@ -1478,13 +1479,10 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
mwifiex_send_cmd(priv, HOST_CMD_APCMD_STA_LIST,
HostCmd_ACT_GEN_GET, 0, NULL, true);
- if (node && (&node->list == &priv->sta_list)) {
- node = NULL;
- return -ENOENT;
- }
-
- node = list_prepare_entry(node, &priv->sta_list, list);
- list_for_each_entry_continue(node, &priv->sta_list, list) {
+ i = 0;
+ list_for_each_entry(node, &priv->sta_list, list) {
+ if (i++ != idx)
+ continue;
ether_addr_copy(mac, node->mac_addr);
return mwifiex_dump_station_info(priv, node, sinfo);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 1fb76d2f5d3f..8b9d0809daf6 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -953,7 +953,7 @@ struct mwifiex_tkip_param {
struct mwifiex_aes_param {
u8 pn[WPA_PN_SIZE];
__le16 key_len;
- u8 key[WLAN_KEY_LEN_CCMP];
+ u8 key[WLAN_KEY_LEN_CCMP_256];
} __packed;
struct mwifiex_wapi_param {
diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c
index d87aeff70cef..c2cb1e711c06 100644
--- a/drivers/net/wireless/marvell/mwifiex/join.c
+++ b/drivers/net/wireless/marvell/mwifiex/join.c
@@ -877,6 +877,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
memset(adhoc_start->ssid, 0, IEEE80211_MAX_SSID_LEN);
+ if (req_ssid->ssid_len > IEEE80211_MAX_SSID_LEN)
+ req_ssid->ssid_len = IEEE80211_MAX_SSID_LEN;
memcpy(adhoc_start->ssid, req_ssid->ssid, req_ssid->ssid_len);
mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: SSID = %s\n",
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index e48b47f42554..ceac611ef086 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -1474,6 +1474,8 @@ int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter)
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
mwifiex_deauthenticate(priv, NULL);
+ mwifiex_init_shutdown_fw(priv, MWIFIEX_FUNC_SHUTDOWN);
+
mwifiex_uninit_sw(adapter);
if (adapter->if_ops.down_dev)
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 991b9cc18000..5907b34037c2 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -381,6 +381,8 @@ static void mwifiex_pcie_reset_prepare(struct pci_dev *pdev)
clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
+
+ card->pci_reset_ongoing = true;
}
/*
@@ -409,6 +411,8 @@ static void mwifiex_pcie_reset_done(struct pci_dev *pdev)
dev_err(&pdev->dev, "reinit failed: %d\n", ret);
else
mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
+
+ card->pci_reset_ongoing = false;
}
static const struct pci_error_handlers mwifiex_pcie_err_handler = {
@@ -3000,7 +3004,19 @@ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter)
int ret;
u32 fw_status;
- cancel_work_sync(&card->work);
+ /* Perform the cancel_work_sync() only when we're not resetting
+ * the card. It's because that function never returns if we're
+ * in reset path. If we're here when resetting the card, it means
+ * that we failed to reset the card (reset failure path).
+ */
+ if (!card->pci_reset_ongoing) {
+ mwifiex_dbg(adapter, MSG, "performing cancel_work_sync()...\n");
+ cancel_work_sync(&card->work);
+ mwifiex_dbg(adapter, MSG, "cancel_work_sync() done\n");
+ } else {
+ mwifiex_dbg(adapter, MSG,
+ "skipped cancel_work_sync() because we're in card reset failure path\n");
+ }
ret = mwifiex_read_reg(adapter, reg->fw_status, &fw_status);
if (fw_status == FIRMWARE_READY_PCIE) {
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h
index f7ce9b6db6b4..72d0c01ff359 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.h
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.h
@@ -391,6 +391,8 @@ struct pcie_service_card {
struct mwifiex_msix_context share_irq_ctx;
struct work_struct work;
unsigned long work_flags;
+
+ bool pci_reset_ongoing;
};
static inline int
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 85d6d5f3dce5..c9f6cd291969 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -1895,7 +1895,7 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
chan, CFG80211_BSS_FTYPE_UNKNOWN,
bssid, timestamp,
cap_info_bitmap, beacon_period,
- ie_buf, ie_len, rssi, GFP_KERNEL);
+ ie_buf, ie_len, rssi, GFP_ATOMIC);
if (bss) {
bss_priv = (struct mwifiex_bss_priv *)bss->priv;
bss_priv->band = band;
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index bfbe3aa058d9..0773d81072aa 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -1985,6 +1985,8 @@ error:
kfree(card->mpa_rx.buf);
card->mpa_tx.buf_size = 0;
card->mpa_rx.buf_size = 0;
+ card->mpa_tx.buf = NULL;
+ card->mpa_rx.buf = NULL;
}
return ret;
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
index 69e3b624adbb..7003767eef42 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
@@ -581,6 +581,11 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv,
{
struct host_cmd_ds_802_11_key_material *key =
&resp->params.key_material;
+ int len;
+
+ len = le16_to_cpu(key->key_param_set.key_len);
+ if (len > sizeof(key->key_param_set.key))
+ return -EINVAL;
if (le16_to_cpu(key->action) == HostCmd_ACT_GEN_SET) {
if ((le16_to_cpu(key->key_param_set.key_info) & KEY_MCAST)) {
@@ -594,9 +599,8 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv,
memset(priv->aes_key.key_param_set.key, 0,
sizeof(key->key_param_set.key));
- priv->aes_key.key_param_set.key_len = key->key_param_set.key_len;
- memcpy(priv->aes_key.key_param_set.key, key->key_param_set.key,
- le16_to_cpu(priv->aes_key.key_param_set.key_len));
+ priv->aes_key.key_param_set.key_len = cpu_to_le16(len);
+ memcpy(priv->aes_key.key_param_set.key, key->key_param_set.key, len);
return 0;
}
@@ -611,9 +615,14 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp)
{
struct host_cmd_ds_802_11_key_material_v2 *key_v2;
- __le16 len;
+ int len;
key_v2 = &resp->params.key_material_v2;
+
+ len = le16_to_cpu(key_v2->key_param_set.key_params.aes.key_len);
+ if (len > sizeof(key_v2->key_param_set.key_params.aes.key))
+ return -EINVAL;
+
if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) {
if ((le16_to_cpu(key_v2->key_param_set.key_info) & KEY_MCAST)) {
mwifiex_dbg(priv->adapter, INFO, "info: key: GTK is set\n");
@@ -627,12 +636,11 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
return 0;
memset(priv->aes_key_v2.key_param_set.key_params.aes.key, 0,
- WLAN_KEY_LEN_CCMP);
+ sizeof(key_v2->key_param_set.key_params.aes.key));
priv->aes_key_v2.key_param_set.key_params.aes.key_len =
- key_v2->key_param_set.key_params.aes.key_len;
- len = priv->aes_key_v2.key_param_set.key_params.aes.key_len;
+ cpu_to_le16(len);
memcpy(priv->aes_key_v2.key_param_set.key_params.aes.key,
- key_v2->key_param_set.key_params.aes.key, le16_to_cpu(len));
+ key_v2->key_param_set.key_params.aes.key, len);
return 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index d445acc4786b..2a8d40ce463d 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -1355,7 +1355,8 @@ static void mwifiex_usb_cleanup_tx_aggr(struct mwifiex_adapter *adapter)
skb_dequeue(&port->tx_aggr.aggr_list)))
mwifiex_write_data_complete(adapter, skb_tmp,
0, -1);
- del_timer_sync(&port->tx_aggr.timer_cnxt.hold_timer);
+ if (port->tx_aggr.timer_cnxt.hold_timer.function)
+ del_timer_sync(&port->tx_aggr.timer_cnxt.hold_timer);
port->tx_aggr.timer_cnxt.is_hold_timer_set = false;
port->tx_aggr.timer_cnxt.hold_tmo_msecs = 0;
}
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index ffc565ac2192..6769b0c5a5cd 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -1469,6 +1469,7 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
txq->skb = kcalloc(MWL8K_TX_DESCS, sizeof(*txq->skb), GFP_KERNEL);
if (txq->skb == NULL) {
pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma);
+ txq->txd = NULL;
return -ENOMEM;
}
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
index 73c8b2805c97..97df6b3a472b 100644
--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -154,8 +154,8 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
struct ieee80211_sta *sta;
struct mt76_rx_tid *tid;
bool sn_less;
- u16 seqno, head, size;
- u8 ackp, idx;
+ u16 seqno, head, size, idx;
+ u8 ackp;
__skb_queue_tail(frames, skb);
@@ -240,7 +240,7 @@ out:
}
int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno,
- u16 ssn, u8 size)
+ u16 ssn, u16 size)
{
struct mt76_rx_tid *tid;
@@ -264,7 +264,7 @@ EXPORT_SYMBOL_GPL(mt76_rx_aggr_start);
static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
{
- u8 size = tid->size;
+ u16 size = tid->size;
int i;
cancel_delayed_work(&tid->reorder_work);
@@ -278,6 +278,7 @@ static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
if (!skb)
continue;
+ tid->reorder_buf[i] = NULL;
tid->nframes--;
dev_kfree_skb(skb);
}
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index cc6840377bc2..57866c1e9c98 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -393,22 +393,27 @@ static void
mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
int len, bool more)
{
- struct page *page = virt_to_head_page(data);
- int offset = data - page_address(page);
struct sk_buff *skb = q->rx_head;
struct skb_shared_info *shinfo = skb_shinfo(skb);
+ int nr_frags = shinfo->nr_frags;
- if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) {
- offset += q->buf_offset;
- skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len,
- q->buf_size);
+ if (nr_frags < ARRAY_SIZE(shinfo->frags)) {
+ struct page *page = virt_to_head_page(data);
+ int offset = data - page_address(page) + q->buf_offset;
+
+ skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
+ } else {
+ skb_free_frag(data);
}
if (more)
return;
q->rx_head = NULL;
- dev->drv->rx_skb(dev, q - dev->q_rx, skb);
+ if (nr_frags < ARRAY_SIZE(shinfo->frags))
+ dev->drv->rx_skb(dev, q - dev->q_rx, skb);
+ else
+ dev_kfree_skb(skb);
}
static int
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 2eab35879163..7b1667ec619e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -193,8 +193,8 @@ struct mt76_rx_tid {
struct delayed_work reorder_work;
u16 head;
- u8 size;
- u8 nframes;
+ u16 size;
+ u16 nframes;
u8 started:1, stopped:1, timer_pending:1;
@@ -537,7 +537,7 @@ int mt76_get_survey(struct ieee80211_hw *hw, int idx,
void mt76_set_stream_caps(struct mt76_dev *dev, bool vht);
int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid,
- u16 ssn, u8 size);
+ u16 ssn, u16 size);
void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);
void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
index 47cebb2ec05c..5aacabd32923 100644
--- a/drivers/net/wireless/mediatek/mt7601u/dma.c
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
@@ -160,8 +160,7 @@ mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
if (new_p) {
/* we have one extra ref from the allocator */
- __free_pages(e->p, MT_RX_ORDER);
-
+ put_page(e->p);
e->p = new_p;
}
}
@@ -318,7 +317,6 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
}
e = &q->e[q->end];
- e->skb = skb;
usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
mt7601u_complete_tx, q);
ret = usb_submit_urb(e->urb, GFP_ATOMIC);
@@ -336,6 +334,7 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
q->end = (q->end + 1) % q->entries;
q->used++;
+ e->skb = skb;
if (q->used >= q->entries)
ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.c b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
index 76117b402880..6ab1035e4a12 100644
--- a/drivers/net/wireless/mediatek/mt7601u/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
@@ -107,7 +107,7 @@ mt7601u_has_tssi(struct mt7601u_dev *dev, u8 *eeprom)
{
u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
- return ~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN);
+ return (u16)~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN);
}
static void
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index 734844b34c26..dd473b206f12 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -894,6 +894,7 @@ int qtnf_cmd_send_del_intf(struct qtnf_vif *vif)
default:
pr_warn("VIF%u.%u: unsupported iftype %d\n", vif->mac->macid,
vif->vifid, vif->wdev.iftype);
+ dev_kfree_skb(cmd_skb);
ret = -EINVAL;
goto out;
}
@@ -2212,6 +2213,7 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac,
break;
default:
pr_err("unsupported iftype %d\n", vif->wdev.iftype);
+ dev_kfree_skb(cmd_skb);
ret = -EINVAL;
goto out;
}
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 070ea0f456ab..b80cff96dea1 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -5453,7 +5453,6 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw)
ret = usb_submit_urb(urb, GFP_KERNEL);
if (ret) {
usb_unanchor_urb(urb);
- usb_free_urb(urb);
goto error;
}
@@ -5462,6 +5461,7 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw)
rtl8xxxu_write32(priv, REG_USB_HIMR, val32);
error:
+ usb_free_urb(urb);
return ret;
}
@@ -5787,6 +5787,7 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw)
struct rtl8xxxu_priv *priv = hw->priv;
struct rtl8xxxu_rx_urb *rx_urb;
struct rtl8xxxu_tx_urb *tx_urb;
+ struct sk_buff *skb;
unsigned long flags;
int ret, i;
@@ -5837,6 +5838,13 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw)
rx_urb->hw = hw;
ret = rtl8xxxu_submit_rx_urb(priv, rx_urb);
+ if (ret) {
+ if (ret != -ENOMEM) {
+ skb = (struct sk_buff *)rx_urb->urb.context;
+ dev_kfree_skb(skb);
+ }
+ rtl8xxxu_queue_rx_urb(priv, rx_urb);
+ }
}
exit:
/*
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index a3189294ecb8..6d1b6a4a8150 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -457,9 +457,14 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
}
}
-static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
+static int _rtl_init_deferred_work(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct workqueue_struct *wq;
+
+ wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
+ if (!wq)
+ return -ENOMEM;
/* <1> timer */
timer_setup(&rtlpriv->works.watchdog_timer,
@@ -468,11 +473,7 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
rtl_easy_concurrent_retrytimer_callback, 0);
/* <2> work queue */
rtlpriv->works.hw = hw;
- rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
- if (unlikely(!rtlpriv->works.rtl_wq)) {
- pr_err("Failed to allocate work queue\n");
- return;
- }
+ rtlpriv->works.rtl_wq = wq;
INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
(void *)rtl_watchdog_wq_callback);
@@ -486,7 +487,7 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
(void *)rtl_fwevt_wq_callback);
INIT_DELAYED_WORK(&rtlpriv->works.c2hcmd_wq,
(void *)rtl_c2hcmd_wq_callback);
-
+ return 0;
}
void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq)
@@ -586,9 +587,7 @@ int rtl_init_core(struct ieee80211_hw *hw)
rtlmac->link_state = MAC80211_NOLINK;
/* <6> init deferred work */
- _rtl_init_deferred_work(hw);
-
- return 0;
+ return _rtl_init_deferred_work(hw);
}
EXPORT_SYMBOL_GPL(rtl_init_core);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
index f87f9d03b9fa..ac44fd5d0597 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
@@ -272,7 +272,7 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = {
0x824, 0x00030FE0,
0x828, 0x00000000,
0x82C, 0x002081DD,
- 0x830, 0x2AAA8E24,
+ 0x830, 0x2AAAEEC8,
0x834, 0x0037A706,
0x838, 0x06489B44,
0x83C, 0x0000095B,
@@ -347,10 +347,10 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = {
0x9D8, 0x00000000,
0x9DC, 0x00000000,
0x9E0, 0x00005D00,
- 0x9E4, 0x00000002,
+ 0x9E4, 0x00000003,
0x9E8, 0x00000001,
0xA00, 0x00D047C8,
- 0xA04, 0x01FF000C,
+ 0xA04, 0x01FF800C,
0xA08, 0x8C8A8300,
0xA0C, 0x2E68000F,
0xA10, 0x9500BB78,
@@ -1343,7 +1343,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x083, 0x00021800,
0x084, 0x00028000,
0x085, 0x00048000,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x0009483A,
+ 0xA0000000, 0x00000000,
0x086, 0x00094838,
+ 0xB0000000, 0x00000000,
0x087, 0x00044980,
0x088, 0x00048000,
0x089, 0x0000D480,
@@ -1432,36 +1436,32 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x03C, 0x000CA000,
0x0EF, 0x00000000,
0x0EF, 0x00001100,
- 0xFF0F0104, 0xABCD,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x0004ADF3,
0x034, 0x00049DF0,
- 0xFF0F0204, 0xCDEF,
+ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x0004ADF3,
0x034, 0x00049DF0,
- 0xFF0F0404, 0xCDEF,
- 0x034, 0x0004ADF3,
- 0x034, 0x00049DF0,
- 0xFF0F0200, 0xCDEF,
+ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x0004ADF5,
0x034, 0x00049DF2,
- 0xFF0F02C0, 0xCDEF,
+ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A0F3,
+ 0x034, 0x000490B1,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x0004A0F3,
0x034, 0x000490B1,
- 0xCDCDCDCD, 0xCDCD,
+ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004ADF5,
+ 0x034, 0x00049DF2,
+ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004ADF3,
+ 0x034, 0x00049DF0,
+ 0xA0000000, 0x00000000,
0x034, 0x0004ADF7,
0x034, 0x00049DF3,
- 0xFF0F0104, 0xDEAD,
- 0xFF0F0104, 0xABCD,
- 0x034, 0x00048DED,
- 0x034, 0x00047DEA,
- 0x034, 0x00046DE7,
- 0x034, 0x00045CE9,
- 0x034, 0x00044CE6,
- 0x034, 0x000438C6,
- 0x034, 0x00042886,
- 0x034, 0x00041486,
- 0x034, 0x00040447,
- 0xFF0F0204, 0xCDEF,
+ 0xB0000000, 0x00000000,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x00048DED,
0x034, 0x00047DEA,
0x034, 0x00046DE7,
@@ -1471,7 +1471,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x034, 0x00042886,
0x034, 0x00041486,
0x034, 0x00040447,
- 0xFF0F0404, 0xCDEF,
+ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x00048DED,
0x034, 0x00047DEA,
0x034, 0x00046DE7,
@@ -1481,7 +1481,17 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x034, 0x00042886,
0x034, 0x00041486,
0x034, 0x00040447,
- 0xFF0F02C0, 0xCDEF,
+ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000480AE,
+ 0x034, 0x000470AB,
+ 0x034, 0x0004608B,
+ 0x034, 0x00045069,
+ 0x034, 0x00044048,
+ 0x034, 0x00043045,
+ 0x034, 0x00042026,
+ 0x034, 0x00041023,
+ 0x034, 0x00040002,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x000480AE,
0x034, 0x000470AB,
0x034, 0x0004608B,
@@ -1491,7 +1501,17 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x034, 0x00042026,
0x034, 0x00041023,
0x034, 0x00040002,
- 0xCDCDCDCD, 0xCDCD,
+ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x00048DED,
+ 0x034, 0x00047DEA,
+ 0x034, 0x00046DE7,
+ 0x034, 0x00045CE9,
+ 0x034, 0x00044CE6,
+ 0x034, 0x000438C6,
+ 0x034, 0x00042886,
+ 0x034, 0x00041486,
+ 0x034, 0x00040447,
+ 0xA0000000, 0x00000000,
0x034, 0x00048DEF,
0x034, 0x00047DEC,
0x034, 0x00046DE9,
@@ -1501,38 +1521,36 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x034, 0x0004248A,
0x034, 0x0004108D,
0x034, 0x0004008A,
- 0xFF0F0104, 0xDEAD,
- 0xFF0F0200, 0xABCD,
+ 0xB0000000, 0x00000000,
+ 0x80000210, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x0002ADF4,
- 0xFF0F02C0, 0xCDEF,
+ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A0F3,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x0002A0F3,
- 0xCDCDCDCD, 0xCDCD,
+ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002ADF4,
+ 0xA0000000, 0x00000000,
0x034, 0x0002ADF7,
- 0xFF0F0200, 0xDEAD,
- 0xFF0F0104, 0xABCD,
- 0x034, 0x00029DF4,
- 0xFF0F0204, 0xCDEF,
+ 0xB0000000, 0x00000000,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x00029DF4,
- 0xFF0F0404, 0xCDEF,
+ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x00029DF4,
- 0xFF0F0200, 0xCDEF,
+ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x00029DF1,
- 0xFF0F02C0, 0xCDEF,
+ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000290F0,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x000290F0,
- 0xCDCDCDCD, 0xCDCD,
+ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x00029DF1,
+ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x00029DF4,
+ 0xA0000000, 0x00000000,
0x034, 0x00029DF2,
- 0xFF0F0104, 0xDEAD,
- 0xFF0F0104, 0xABCD,
- 0x034, 0x00028DF1,
- 0x034, 0x00027DEE,
- 0x034, 0x00026DEB,
- 0x034, 0x00025CEC,
- 0x034, 0x00024CE9,
- 0x034, 0x000238CA,
- 0x034, 0x00022889,
- 0x034, 0x00021489,
- 0x034, 0x0002044A,
- 0xFF0F0204, 0xCDEF,
+ 0xB0000000, 0x00000000,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x00028DF1,
0x034, 0x00027DEE,
0x034, 0x00026DEB,
@@ -1542,7 +1560,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x034, 0x00022889,
0x034, 0x00021489,
0x034, 0x0002044A,
- 0xFF0F0404, 0xCDEF,
+ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x00028DF1,
0x034, 0x00027DEE,
0x034, 0x00026DEB,
@@ -1552,7 +1570,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x034, 0x00022889,
0x034, 0x00021489,
0x034, 0x0002044A,
- 0xFF0F02C0, 0xCDEF,
+ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x000280AF,
0x034, 0x000270AC,
0x034, 0x0002608B,
@@ -1562,7 +1580,27 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x034, 0x00022026,
0x034, 0x00021023,
0x034, 0x00020002,
- 0xCDCDCDCD, 0xCDCD,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000280AF,
+ 0x034, 0x000270AC,
+ 0x034, 0x0002608B,
+ 0x034, 0x00025069,
+ 0x034, 0x00024048,
+ 0x034, 0x00023045,
+ 0x034, 0x00022026,
+ 0x034, 0x00021023,
+ 0x034, 0x00020002,
+ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x00028DF1,
+ 0x034, 0x00027DEE,
+ 0x034, 0x00026DEB,
+ 0x034, 0x00025CEC,
+ 0x034, 0x00024CE9,
+ 0x034, 0x000238CA,
+ 0x034, 0x00022889,
+ 0x034, 0x00021489,
+ 0x034, 0x0002044A,
+ 0xA0000000, 0x00000000,
0x034, 0x00028DEE,
0x034, 0x00027DEB,
0x034, 0x00026CCD,
@@ -1572,27 +1610,24 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x034, 0x00022849,
0x034, 0x00021449,
0x034, 0x0002004D,
- 0xFF0F0104, 0xDEAD,
- 0xFF0F02C0, 0xABCD,
+ 0xB0000000, 0x00000000,
+ 0x8000020c, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D7,
+ 0x034, 0x000090D3,
+ 0x034, 0x000080B1,
+ 0x034, 0x000070AE,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x0000A0D7,
0x034, 0x000090D3,
0x034, 0x000080B1,
0x034, 0x000070AE,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x034, 0x0000ADF7,
0x034, 0x00009DF4,
0x034, 0x00008DF1,
0x034, 0x00007DEE,
- 0xFF0F02C0, 0xDEAD,
- 0xFF0F0104, 0xABCD,
- 0x034, 0x00006DEB,
- 0x034, 0x00005CEC,
- 0x034, 0x00004CE9,
- 0x034, 0x000038CA,
- 0x034, 0x00002889,
- 0x034, 0x00001489,
- 0x034, 0x0000044A,
- 0xFF0F0204, 0xCDEF,
+ 0xB0000000, 0x00000000,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x00006DEB,
0x034, 0x00005CEC,
0x034, 0x00004CE9,
@@ -1600,7 +1635,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x034, 0x00002889,
0x034, 0x00001489,
0x034, 0x0000044A,
- 0xFF0F0404, 0xCDEF,
+ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x00006DEB,
0x034, 0x00005CEC,
0x034, 0x00004CE9,
@@ -1608,7 +1643,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x034, 0x00002889,
0x034, 0x00001489,
0x034, 0x0000044A,
- 0xFF0F02C0, 0xCDEF,
+ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x0000608D,
0x034, 0x0000506B,
0x034, 0x0000404A,
@@ -1616,7 +1651,23 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x034, 0x00002044,
0x034, 0x00001025,
0x034, 0x00000004,
- 0xCDCDCDCD, 0xCDCD,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000608D,
+ 0x034, 0x0000506B,
+ 0x034, 0x0000404A,
+ 0x034, 0x00003047,
+ 0x034, 0x00002044,
+ 0x034, 0x00001025,
+ 0x034, 0x00000004,
+ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x00006DEB,
+ 0x034, 0x00005CEC,
+ 0x034, 0x00004CE9,
+ 0x034, 0x000038CA,
+ 0x034, 0x00002889,
+ 0x034, 0x00001489,
+ 0x034, 0x0000044A,
+ 0xA0000000, 0x00000000,
0x034, 0x00006DCD,
0x034, 0x00005CCD,
0x034, 0x00004CCA,
@@ -1624,11 +1675,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x034, 0x00002888,
0x034, 0x00001488,
0x034, 0x00000486,
- 0xFF0F0104, 0xDEAD,
+ 0xB0000000, 0x00000000,
0x0EF, 0x00000000,
0x018, 0x0001712A,
0x0EF, 0x00000040,
- 0xFF0F0104, 0xABCD,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
0x035, 0x00000187,
0x035, 0x00008187,
0x035, 0x00010187,
@@ -1638,7 +1689,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x035, 0x00040188,
0x035, 0x00048188,
0x035, 0x00050188,
- 0xFF0F0204, 0xCDEF,
+ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
0x035, 0x00000187,
0x035, 0x00008187,
0x035, 0x00010187,
@@ -1648,7 +1699,37 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x035, 0x00040188,
0x035, 0x00048188,
0x035, 0x00050188,
- 0xFF0F0404, 0xCDEF,
+ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x00000128,
+ 0x035, 0x00008128,
+ 0x035, 0x00010128,
+ 0x035, 0x000201C8,
+ 0x035, 0x000281C8,
+ 0x035, 0x000301C8,
+ 0x035, 0x000401C8,
+ 0x035, 0x000481C8,
+ 0x035, 0x000501C8,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x00000145,
+ 0x035, 0x00008145,
+ 0x035, 0x00010145,
+ 0x035, 0x00020196,
+ 0x035, 0x00028196,
+ 0x035, 0x00030196,
+ 0x035, 0x000401C7,
+ 0x035, 0x000481C7,
+ 0x035, 0x000501C7,
+ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x00000128,
+ 0x035, 0x00008128,
+ 0x035, 0x00010128,
+ 0x035, 0x000201C8,
+ 0x035, 0x000281C8,
+ 0x035, 0x000301C8,
+ 0x035, 0x000401C8,
+ 0x035, 0x000481C8,
+ 0x035, 0x000501C8,
+ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
0x035, 0x00000187,
0x035, 0x00008187,
0x035, 0x00010187,
@@ -1658,7 +1739,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x035, 0x00040188,
0x035, 0x00048188,
0x035, 0x00050188,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x035, 0x00000145,
0x035, 0x00008145,
0x035, 0x00010145,
@@ -1668,11 +1749,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x035, 0x000401C7,
0x035, 0x000481C7,
0x035, 0x000501C7,
- 0xFF0F0104, 0xDEAD,
+ 0xB0000000, 0x00000000,
0x0EF, 0x00000000,
0x018, 0x0001712A,
0x0EF, 0x00000010,
- 0xFF0F0104, 0xABCD,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
0x036, 0x00085733,
0x036, 0x0008D733,
0x036, 0x00095733,
@@ -1685,7 +1766,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x036, 0x000CE4B4,
0x036, 0x000D64B4,
0x036, 0x000DE4B4,
- 0xFF0F0204, 0xCDEF,
+ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
0x036, 0x00085733,
0x036, 0x0008D733,
0x036, 0x00095733,
@@ -1698,7 +1779,46 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x036, 0x000CE4B4,
0x036, 0x000D64B4,
0x036, 0x000DE4B4,
- 0xFF0F0404, 0xCDEF,
+ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x000063B5,
+ 0x036, 0x0000E3B5,
+ 0x036, 0x000163B5,
+ 0x036, 0x0001E3B5,
+ 0x036, 0x000263B5,
+ 0x036, 0x0002E3B5,
+ 0x036, 0x000363B5,
+ 0x036, 0x0003E3B5,
+ 0x036, 0x000463B5,
+ 0x036, 0x0004E3B5,
+ 0x036, 0x000563B5,
+ 0x036, 0x0005E3B5,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x000056B3,
+ 0x036, 0x0000D6B3,
+ 0x036, 0x000156B3,
+ 0x036, 0x0001D6B3,
+ 0x036, 0x00026634,
+ 0x036, 0x0002E634,
+ 0x036, 0x00036634,
+ 0x036, 0x0003E634,
+ 0x036, 0x000467B4,
+ 0x036, 0x0004E7B4,
+ 0x036, 0x000567B4,
+ 0x036, 0x0005E7B4,
+ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x000063B5,
+ 0x036, 0x0000E3B5,
+ 0x036, 0x000163B5,
+ 0x036, 0x0001E3B5,
+ 0x036, 0x000263B5,
+ 0x036, 0x0002E3B5,
+ 0x036, 0x000363B5,
+ 0x036, 0x0003E3B5,
+ 0x036, 0x000463B5,
+ 0x036, 0x0004E3B5,
+ 0x036, 0x000563B5,
+ 0x036, 0x0005E3B5,
+ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
0x036, 0x00085733,
0x036, 0x0008D733,
0x036, 0x00095733,
@@ -1711,7 +1831,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x036, 0x000CE4B4,
0x036, 0x000D64B4,
0x036, 0x000DE4B4,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x036, 0x000056B3,
0x036, 0x0000D6B3,
0x036, 0x000156B3,
@@ -1724,103 +1844,162 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x036, 0x0004E7B4,
0x036, 0x000567B4,
0x036, 0x0005E7B4,
- 0xFF0F0104, 0xDEAD,
+ 0xB0000000, 0x00000000,
0x0EF, 0x00000000,
0x0EF, 0x00000008,
- 0xFF0F0104, 0xABCD,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
0x03C, 0x000001C8,
0x03C, 0x00000492,
- 0xFF0F0204, 0xCDEF,
+ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
0x03C, 0x000001C8,
0x03C, 0x00000492,
- 0xFF0F0404, 0xCDEF,
+ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x000001B6,
+ 0x03C, 0x00000492,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000022A,
+ 0x03C, 0x00000594,
+ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x000001B6,
+ 0x03C, 0x00000492,
+ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
0x03C, 0x000001C8,
0x03C, 0x00000492,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x03C, 0x0000022A,
0x03C, 0x00000594,
- 0xFF0F0104, 0xDEAD,
- 0xFF0F0104, 0xABCD,
+ 0xB0000000, 0x00000000,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
0x03C, 0x00000800,
- 0xFF0F0204, 0xCDEF,
+ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
0x03C, 0x00000800,
- 0xFF0F0404, 0xCDEF,
+ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
0x03C, 0x00000800,
- 0xFF0F02C0, 0xCDEF,
+ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
0x03C, 0x00000820,
- 0xCDCDCDCD, 0xCDCD,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000820,
+ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000800,
+ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000800,
+ 0xA0000000, 0x00000000,
0x03C, 0x00000900,
- 0xFF0F0104, 0xDEAD,
+ 0xB0000000, 0x00000000,
0x0EF, 0x00000000,
0x018, 0x0001712A,
0x0EF, 0x00000002,
- 0xFF0F0104, 0xABCD,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
0x008, 0x0004E400,
- 0xFF0F0204, 0xCDEF,
+ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
0x008, 0x0004E400,
- 0xFF0F0404, 0xCDEF,
+ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
+ 0x008, 0x00002000,
+ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
+ 0x008, 0x00002000,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
+ 0x008, 0x00002000,
+ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
+ 0x008, 0x00002000,
+ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
0x008, 0x0004E400,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x008, 0x00002000,
- 0xFF0F0104, 0xDEAD,
+ 0xB0000000, 0x00000000,
0x0EF, 0x00000000,
0x0DF, 0x000000C0,
- 0x01F, 0x00040064,
- 0xFF0F0104, 0xABCD,
+ 0x01F, 0x00000064,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
0x058, 0x000A7284,
0x059, 0x000600EC,
- 0xFF0F0204, 0xCDEF,
+ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
0x058, 0x000A7284,
0x059, 0x000600EC,
- 0xFF0F0404, 0xCDEF,
+ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
+ 0x058, 0x00081184,
+ 0x059, 0x0006016C,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
+ 0x058, 0x00081184,
+ 0x059, 0x0006016C,
+ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
+ 0x058, 0x00081184,
+ 0x059, 0x0006016C,
+ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
0x058, 0x000A7284,
0x059, 0x000600EC,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x058, 0x00081184,
0x059, 0x0006016C,
- 0xFF0F0104, 0xDEAD,
- 0xFF0F0104, 0xABCD,
+ 0xB0000000, 0x00000000,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
0x061, 0x000E8D73,
0x062, 0x00093FC5,
- 0xFF0F0204, 0xCDEF,
+ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
0x061, 0x000E8D73,
0x062, 0x00093FC5,
- 0xFF0F0404, 0xCDEF,
+ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x000EFD83,
+ 0x062, 0x00093FCC,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x000EAD53,
+ 0x062, 0x00093BC4,
+ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x000EFD83,
+ 0x062, 0x00093FCC,
+ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
0x061, 0x000E8D73,
0x062, 0x00093FC5,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x061, 0x000EAD53,
0x062, 0x00093BC4,
- 0xFF0F0104, 0xDEAD,
- 0xFF0F0104, 0xABCD,
+ 0xB0000000, 0x00000000,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
0x063, 0x000110E9,
- 0xFF0F0204, 0xCDEF,
+ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
0x063, 0x000110E9,
- 0xFF0F0404, 0xCDEF,
+ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000110EB,
+ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
0x063, 0x000110E9,
- 0xFF0F0200, 0xCDEF,
- 0x063, 0x000710E9,
- 0xFF0F02C0, 0xCDEF,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
0x063, 0x000110E9,
- 0xCDCDCDCD, 0xCDCD,
+ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000110EB,
+ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000110E9,
+ 0xA0000000, 0x00000000,
0x063, 0x000714E9,
- 0xFF0F0104, 0xDEAD,
- 0xFF0F0104, 0xABCD,
+ 0xB0000000, 0x00000000,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
+ 0x064, 0x0001C27C,
+ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
+ 0x064, 0x0001C27C,
+ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
0x064, 0x0001C27C,
- 0xFF0F0204, 0xCDEF,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
+ 0x064, 0x0001C67C,
+ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
0x064, 0x0001C27C,
- 0xFF0F0404, 0xCDEF,
+ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
0x064, 0x0001C27C,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x064, 0x0001C67C,
- 0xFF0F0104, 0xDEAD,
- 0xFF0F0200, 0xABCD,
+ 0xB0000000, 0x00000000,
+ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x00091016,
+ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x00091016,
+ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
0x065, 0x00093016,
- 0xFF0F02C0, 0xCDEF,
+ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
0x065, 0x00093015,
- 0xCDCDCDCD, 0xCDCD,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x00093015,
+ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x00093016,
+ 0xA0000000, 0x00000000,
0x065, 0x00091016,
- 0xFF0F0200, 0xDEAD,
+ 0xB0000000, 0x00000000,
0x018, 0x00000006,
0x0EF, 0x00002000,
0x03B, 0x0003824B,
@@ -1918,9 +2097,10 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x0B4, 0x0001214C,
0x0B7, 0x0003000C,
0x01C, 0x000539D2,
+ 0x0C4, 0x000AFE00,
0x018, 0x0001F12A,
- 0x0FE, 0x00000000,
- 0x0FE, 0x00000000,
+ 0xFFE, 0x00000000,
+ 0xFFE, 0x00000000,
0x018, 0x0001712A,
};
@@ -2040,6 +2220,7 @@ u32 RTL8812AE_MAC_REG_ARRAY[] = {
u32 RTL8812AE_MAC_1T_ARRAYLEN = ARRAY_SIZE(RTL8812AE_MAC_REG_ARRAY);
u32 RTL8821AE_MAC_REG_ARRAY[] = {
+ 0x421, 0x0000000F,
0x428, 0x0000000A,
0x429, 0x00000010,
0x430, 0x00000000,
@@ -2508,7 +2689,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
0x81C, 0xA6360001,
0x81C, 0xA5380001,
0x81C, 0xA43A0001,
- 0x81C, 0xA33C0001,
+ 0x81C, 0x683C0001,
0x81C, 0x673E0001,
0x81C, 0x66400001,
0x81C, 0x65420001,
@@ -2542,7 +2723,66 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
0x81C, 0x017A0001,
0x81C, 0x017C0001,
0x81C, 0x017E0001,
- 0xFF0F02C0, 0xABCD,
+ 0x8000020c, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFB000101,
+ 0x81C, 0xFA020101,
+ 0x81C, 0xF9040101,
+ 0x81C, 0xF8060101,
+ 0x81C, 0xF7080101,
+ 0x81C, 0xF60A0101,
+ 0x81C, 0xF50C0101,
+ 0x81C, 0xF40E0101,
+ 0x81C, 0xF3100101,
+ 0x81C, 0xF2120101,
+ 0x81C, 0xF1140101,
+ 0x81C, 0xF0160101,
+ 0x81C, 0xEF180101,
+ 0x81C, 0xEE1A0101,
+ 0x81C, 0xED1C0101,
+ 0x81C, 0xEC1E0101,
+ 0x81C, 0xEB200101,
+ 0x81C, 0xEA220101,
+ 0x81C, 0xE9240101,
+ 0x81C, 0xE8260101,
+ 0x81C, 0xE7280101,
+ 0x81C, 0xE62A0101,
+ 0x81C, 0xE52C0101,
+ 0x81C, 0xE42E0101,
+ 0x81C, 0xE3300101,
+ 0x81C, 0xA5320101,
+ 0x81C, 0xA4340101,
+ 0x81C, 0xA3360101,
+ 0x81C, 0x87380101,
+ 0x81C, 0x863A0101,
+ 0x81C, 0x853C0101,
+ 0x81C, 0x843E0101,
+ 0x81C, 0x69400101,
+ 0x81C, 0x68420101,
+ 0x81C, 0x67440101,
+ 0x81C, 0x66460101,
+ 0x81C, 0x49480101,
+ 0x81C, 0x484A0101,
+ 0x81C, 0x474C0101,
+ 0x81C, 0x2A4E0101,
+ 0x81C, 0x29500101,
+ 0x81C, 0x28520101,
+ 0x81C, 0x27540101,
+ 0x81C, 0x26560101,
+ 0x81C, 0x25580101,
+ 0x81C, 0x245A0101,
+ 0x81C, 0x235C0101,
+ 0x81C, 0x055E0101,
+ 0x81C, 0x04600101,
+ 0x81C, 0x03620101,
+ 0x81C, 0x02640101,
+ 0x81C, 0x01660101,
+ 0x81C, 0x01680101,
+ 0x81C, 0x016A0101,
+ 0x81C, 0x016C0101,
+ 0x81C, 0x016E0101,
+ 0x81C, 0x01700101,
+ 0x81C, 0x01720101,
+ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
0x81C, 0xFB000101,
0x81C, 0xFA020101,
0x81C, 0xF9040101,
@@ -2601,7 +2841,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
0x81C, 0x016E0101,
0x81C, 0x01700101,
0x81C, 0x01720101,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x81C, 0xFF000101,
0x81C, 0xFF020101,
0x81C, 0xFE040101,
@@ -2660,7 +2900,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
0x81C, 0x046E0101,
0x81C, 0x03700101,
0x81C, 0x02720101,
- 0xFF0F02C0, 0xDEAD,
+ 0xB0000000, 0x00000000,
0x81C, 0x01740101,
0x81C, 0x01760101,
0x81C, 0x01780101,
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 1181b725f503..3d6c0d8c71d7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -739,8 +739,11 @@ static int _rtl_usb_receive(struct ieee80211_hw *hw)
usb_anchor_urb(urb, &rtlusb->rx_submitted);
err = usb_submit_urb(urb, GFP_KERNEL);
- if (err)
+ if (err) {
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
goto err_out;
+ }
usb_free_urb(urb);
}
return 0;
@@ -910,10 +913,8 @@ static struct urb *_rtl_usb_tx_urb_setup(struct ieee80211_hw *hw,
WARN_ON(NULL == skb);
_urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!_urb) {
- kfree_skb(skb);
+ if (!_urb)
return NULL;
- }
_rtl_install_trx_info(rtlusb, skb, ep_num);
usb_fill_bulk_urb(_urb, rtlusb->udev, usb_sndbulkpipe(rtlusb->udev,
ep_num), skb->data, skb->len, _rtl_tx_complete, skb);
@@ -927,7 +928,6 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
u32 ep_num;
struct urb *_urb = NULL;
- struct sk_buff *_skb = NULL;
WARN_ON(NULL == rtlusb->usb_tx_aggregate_hdl);
if (unlikely(IS_USB_STOP(rtlusb))) {
@@ -936,8 +936,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
return;
}
ep_num = rtlusb->ep_map.ep_mapping[qnum];
- _skb = skb;
- _urb = _rtl_usb_tx_urb_setup(hw, _skb, ep_num);
+ _urb = _rtl_usb_tx_urb_setup(hw, skb, ep_num);
if (unlikely(!_urb)) {
pr_err("Can't allocate urb. Drop skb!\n");
kfree_skb(skb);
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index a7b341e95e76..0da95777f1c1 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -238,7 +238,8 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
rsi_set_len_qno(&data_desc->len_qno,
(skb->len - FRAME_DESC_SZ),
RSI_WIFI_MGMT_Q);
- if ((skb->len - header_size) == EAPOL4_PACKET_LEN) {
+ if (((skb->len - header_size) == EAPOL4_PACKET_LEN) ||
+ ((skb->len - header_size) == EAPOL4_PACKET_LEN - 2)) {
data_desc->misc_flags |=
RSI_DESC_REQUIRE_CFM_TO_HOST;
xtend_desc->confirm_frame_type = EAPOL4_CONFIRM;
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index 81cc1044532d..609cd07eeafc 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -153,9 +153,7 @@ static void rsi_handle_interrupt(struct sdio_func *function)
if (adapter->priv->fsm_state == FSM_FW_NOT_LOADED)
return;
- dev->sdio_irq_task = current;
- rsi_interrupt_handler(adapter);
- dev->sdio_irq_task = NULL;
+ rsi_set_event(&dev->rx_thread.event);
}
/**
@@ -973,8 +971,6 @@ static int rsi_probe(struct sdio_func *pfunction,
rsi_dbg(ERR_ZONE, "%s: Unable to init rx thrd\n", __func__);
goto fail_kill_thread;
}
- skb_queue_head_init(&sdev->rx_q.head);
- sdev->rx_q.num_rx_pkts = 0;
sdio_claim_host(pfunction);
if (sdio_claim_irq(pfunction, rsi_handle_interrupt)) {
@@ -1404,7 +1400,7 @@ static int rsi_restore(struct device *dev)
}
static const struct dev_pm_ops rsi_pm_ops = {
.suspend = rsi_suspend,
- .resume = rsi_resume,
+ .resume_noirq = rsi_resume,
.freeze = rsi_freeze,
.thaw = rsi_thaw,
.restore = rsi_restore,
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
index 612c211e21a1..d66ae2f57314 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
@@ -60,39 +60,20 @@ int rsi_sdio_master_access_msword(struct rsi_hw *adapter, u16 ms_word)
return status;
}
+static void rsi_rx_handler(struct rsi_hw *adapter);
+
void rsi_sdio_rx_thread(struct rsi_common *common)
{
struct rsi_hw *adapter = common->priv;
struct rsi_91x_sdiodev *sdev = adapter->rsi_dev;
- struct sk_buff *skb;
- int status;
do {
rsi_wait_event(&sdev->rx_thread.event, EVENT_WAIT_FOREVER);
rsi_reset_event(&sdev->rx_thread.event);
+ rsi_rx_handler(adapter);
+ } while (!atomic_read(&sdev->rx_thread.thread_done));
- while (true) {
- if (atomic_read(&sdev->rx_thread.thread_done))
- goto out;
-
- skb = skb_dequeue(&sdev->rx_q.head);
- if (!skb)
- break;
- if (sdev->rx_q.num_rx_pkts > 0)
- sdev->rx_q.num_rx_pkts--;
- status = rsi_read_pkt(common, skb->data, skb->len);
- if (status) {
- rsi_dbg(ERR_ZONE, "Failed to read the packet\n");
- dev_kfree_skb(skb);
- break;
- }
- dev_kfree_skb(skb);
- }
- } while (1);
-
-out:
rsi_dbg(INFO_ZONE, "%s: Terminated SDIO RX thread\n", __func__);
- skb_queue_purge(&sdev->rx_q.head);
atomic_inc(&sdev->rx_thread.thread_done);
complete_and_exit(&sdev->rx_thread.completion, 0);
}
@@ -113,10 +94,6 @@ static int rsi_process_pkt(struct rsi_common *common)
u32 rcv_pkt_len = 0;
int status = 0;
u8 value = 0;
- struct sk_buff *skb;
-
- if (dev->rx_q.num_rx_pkts >= RSI_MAX_RX_PKTS)
- return 0;
num_blks = ((adapter->interrupt_status & 1) |
((adapter->interrupt_status >> RECV_NUM_BLOCKS) << 1));
@@ -144,22 +121,19 @@ static int rsi_process_pkt(struct rsi_common *common)
rcv_pkt_len = (num_blks * 256);
- skb = dev_alloc_skb(rcv_pkt_len);
- if (!skb)
- return -ENOMEM;
-
- status = rsi_sdio_host_intf_read_pkt(adapter, skb->data, rcv_pkt_len);
+ status = rsi_sdio_host_intf_read_pkt(adapter, dev->pktbuffer,
+ rcv_pkt_len);
if (status) {
rsi_dbg(ERR_ZONE, "%s: Failed to read packet from card\n",
__func__);
- dev_kfree_skb(skb);
return status;
}
- skb_put(skb, rcv_pkt_len);
- skb_queue_tail(&dev->rx_q.head, skb);
- dev->rx_q.num_rx_pkts++;
- rsi_set_event(&dev->rx_thread.event);
+ status = rsi_read_pkt(common, dev->pktbuffer, rcv_pkt_len);
+ if (status) {
+ rsi_dbg(ERR_ZONE, "Failed to read the packet\n");
+ return status;
+ }
return 0;
}
@@ -251,12 +225,12 @@ int rsi_init_sdio_slave_regs(struct rsi_hw *adapter)
}
/**
- * rsi_interrupt_handler() - This function read and process SDIO interrupts.
+ * rsi_rx_handler() - Read and process SDIO interrupts.
* @adapter: Pointer to the adapter structure.
*
* Return: None.
*/
-void rsi_interrupt_handler(struct rsi_hw *adapter)
+static void rsi_rx_handler(struct rsi_hw *adapter)
{
struct rsi_common *common = adapter->priv;
struct rsi_91x_sdiodev *dev =
diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h
index 66dcd2ec9051..fd11f16fc74f 100644
--- a/drivers/net/wireless/rsi/rsi_sdio.h
+++ b/drivers/net/wireless/rsi/rsi_sdio.h
@@ -110,11 +110,6 @@ struct receive_info {
u32 buf_available_counter;
};
-struct rsi_sdio_rx_q {
- u8 num_rx_pkts;
- struct sk_buff_head head;
-};
-
struct rsi_91x_sdiodev {
struct sdio_func *pfunction;
struct task_struct *sdio_irq_task;
@@ -127,11 +122,10 @@ struct rsi_91x_sdiodev {
u16 tx_blk_size;
u8 write_fail;
bool buff_status_updated;
- struct rsi_sdio_rx_q rx_q;
struct rsi_thread rx_thread;
+ u8 pktbuffer[8192] __aligned(4);
};
-void rsi_interrupt_handler(struct rsi_hw *adapter);
int rsi_init_sdio_slave_regs(struct rsi_hw *adapter);
int rsi_sdio_read_register(struct rsi_hw *adapter, u32 addr, u8 *data);
int rsi_sdio_host_intf_read_pkt(struct rsi_hw *adapter, u8 *pkt, u32 length);
diff --git a/drivers/net/wireless/st/cw1200/main.c b/drivers/net/wireless/st/cw1200/main.c
index c1608f0bf6d0..0c5a15e2b8f9 100644
--- a/drivers/net/wireless/st/cw1200/main.c
+++ b/drivers/net/wireless/st/cw1200/main.c
@@ -384,6 +384,7 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
CW1200_LINK_ID_MAX,
cw1200_skb_dtor,
priv)) {
+ destroy_workqueue(priv->workqueue);
ieee80211_free_hw(hw);
return NULL;
}
@@ -395,6 +396,7 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
for (; i > 0; i--)
cw1200_queue_deinit(&priv->tx_queue[i - 1]);
cw1200_queue_stats_deinit(&priv->tx_queue_stats);
+ destroy_workqueue(priv->workqueue);
ieee80211_free_hw(hw);
return NULL;
}
diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c
index f5acd24d0e2b..988abb49771f 100644
--- a/drivers/net/wireless/ti/wl1251/event.c
+++ b/drivers/net/wireless/ti/wl1251/event.c
@@ -84,7 +84,7 @@ static int wl1251_event_ps_report(struct wl1251 *wl,
break;
}
- return 0;
+ return ret;
}
static void wl1251_event_mbox_dump(struct event_mailbox *mbox)
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index 4a4f797bb10f..e10fff42751e 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -649,7 +649,6 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
wl->quirks |= WLCORE_QUIRK_LEGACY_NVS |
WLCORE_QUIRK_DUAL_PROBE_TMPL |
WLCORE_QUIRK_TKIP_HEADER_SPACE |
- WLCORE_QUIRK_START_STA_FAILS |
WLCORE_QUIRK_AP_ZERO_SESSION_ID;
wl->sr_fw_name = WL127X_FW_NAME_SINGLE;
wl->mr_fw_name = WL127X_FW_NAME_MULTI;
@@ -673,7 +672,6 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
wl->quirks |= WLCORE_QUIRK_LEGACY_NVS |
WLCORE_QUIRK_DUAL_PROBE_TMPL |
WLCORE_QUIRK_TKIP_HEADER_SPACE |
- WLCORE_QUIRK_START_STA_FAILS |
WLCORE_QUIRK_AP_ZERO_SESSION_ID;
wl->plt_fw_name = WL127X_PLT_FW_NAME;
wl->sr_fw_name = WL127X_FW_NAME_SINGLE;
@@ -702,7 +700,6 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
wl->quirks |= WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN |
WLCORE_QUIRK_DUAL_PROBE_TMPL |
WLCORE_QUIRK_TKIP_HEADER_SPACE |
- WLCORE_QUIRK_START_STA_FAILS |
WLCORE_QUIRK_AP_ZERO_SESSION_ID;
wlcore_set_min_fw_ver(wl, WL128X_CHIP_VER,
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 2ca5658bbc2a..e24ffdff5bdc 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -2875,21 +2875,8 @@ static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
if (is_ibss)
ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
- else {
- if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
- /*
- * TODO: this is an ugly workaround for wl12xx fw
- * bug - we are not able to tx/rx after the first
- * start_sta, so make dummy start+stop calls,
- * and then call start_sta again.
- * this should be fixed in the fw.
- */
- wl12xx_cmd_role_start_sta(wl, wlvif);
- wl12xx_cmd_role_stop_sta(wl, wlvif);
- }
-
+ else
ret = wl12xx_cmd_role_start_sta(wl, wlvif);
- }
return ret;
}
@@ -3671,8 +3658,10 @@ void wlcore_regdomain_config(struct wl1271 *wl)
goto out;
ret = pm_runtime_get_sync(wl->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(wl->dev);
goto out;
+ }
ret = wlcore_cmd_regdomain_config_locked(wl);
if (ret < 0) {
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index b6e19c2d66b0..250bcbf4ea2f 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -877,6 +877,7 @@ void wl1271_tx_work(struct work_struct *work)
ret = wlcore_tx_work_locked(wl);
if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
wl12xx_queue_recovery_work(wl);
goto out;
}
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index d4b1f66ef457..af7cf70b3832 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -559,9 +559,6 @@ wlcore_set_min_fw_ver(struct wl1271 *wl, unsigned int chip,
/* Each RX/TX transaction requires an end-of-transaction transfer */
#define WLCORE_QUIRK_END_OF_TRANSACTION BIT(0)
-/* the first start_role(sta) sometimes doesn't work on wl12xx */
-#define WLCORE_QUIRK_START_STA_FAILS BIT(1)
-
/* wl127x and SPI don't support SDIO block size alignment */
#define WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN BIT(2)
diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
index efdce9ae36ea..a10ee5a68012 100644
--- a/drivers/net/wireless/wl3501.h
+++ b/drivers/net/wireless/wl3501.h
@@ -379,16 +379,7 @@ struct wl3501_get_confirm {
u8 mib_value[100];
};
-struct wl3501_join_req {
- u16 next_blk;
- u8 sig_id;
- u8 reserved;
- struct iw_mgmt_data_rset operational_rset;
- u16 reserved2;
- u16 timeout;
- u16 probe_delay;
- u8 timestamp[8];
- u8 local_time[8];
+struct wl3501_req {
u16 beacon_period;
u16 dtim_period;
u16 cap_info;
@@ -401,6 +392,19 @@ struct wl3501_join_req {
struct iw_mgmt_data_rset bss_basic_rset;
};
+struct wl3501_join_req {
+ u16 next_blk;
+ u8 sig_id;
+ u8 reserved;
+ struct iw_mgmt_data_rset operational_rset;
+ u16 reserved2;
+ u16 timeout;
+ u16 probe_delay;
+ u8 timestamp[8];
+ u8 local_time[8];
+ struct wl3501_req req;
+};
+
struct wl3501_join_confirm {
u16 next_blk;
u8 sig_id;
@@ -443,16 +447,7 @@ struct wl3501_scan_confirm {
u16 status;
char timestamp[8];
char localtime[8];
- u16 beacon_period;
- u16 dtim_period;
- u16 cap_info;
- u8 bss_type;
- u8 bssid[ETH_ALEN];
- struct iw_mgmt_essid_pset ssid;
- struct iw_mgmt_ds_pset ds_pset;
- struct iw_mgmt_cf_pset cf_pset;
- struct iw_mgmt_ibss_pset ibss_pset;
- struct iw_mgmt_data_rset bss_basic_rset;
+ struct wl3501_req req;
u8 rssi;
};
@@ -471,8 +466,10 @@ struct wl3501_md_req {
u16 size;
u8 pri;
u8 service_class;
- u8 daddr[ETH_ALEN];
- u8 saddr[ETH_ALEN];
+ struct {
+ u8 daddr[ETH_ALEN];
+ u8 saddr[ETH_ALEN];
+ } addr;
};
struct wl3501_md_ind {
@@ -484,8 +481,10 @@ struct wl3501_md_ind {
u8 reception;
u8 pri;
u8 service_class;
- u8 daddr[ETH_ALEN];
- u8 saddr[ETH_ALEN];
+ struct {
+ u8 daddr[ETH_ALEN];
+ u8 saddr[ETH_ALEN];
+ } addr;
};
struct wl3501_md_confirm {
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index da62220b9c01..f33ece937047 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -468,6 +468,7 @@ static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len)
struct wl3501_md_req sig = {
.sig_id = WL3501_SIG_MD_REQ,
};
+ size_t sig_addr_len = sizeof(sig.addr);
u8 *pdata = (char *)data;
int rc = -EIO;
@@ -483,9 +484,9 @@ static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len)
goto out;
}
rc = 0;
- memcpy(&sig.daddr[0], pdata, 12);
- pktlen = len - 12;
- pdata += 12;
+ memcpy(&sig.addr, pdata, sig_addr_len);
+ pktlen = len - sig_addr_len;
+ pdata += sig_addr_len;
sig.data = bf;
if (((*pdata) * 256 + (*(pdata + 1))) > 1500) {
u8 addr4[ETH_ALEN] = {
@@ -588,7 +589,7 @@ static int wl3501_mgmt_join(struct wl3501_card *this, u16 stas)
struct wl3501_join_req sig = {
.sig_id = WL3501_SIG_JOIN_REQ,
.timeout = 10,
- .ds_pset = {
+ .req.ds_pset = {
.el = {
.id = IW_MGMT_INFO_ELEMENT_DS_PARAMETER_SET,
.len = 1,
@@ -597,7 +598,7 @@ static int wl3501_mgmt_join(struct wl3501_card *this, u16 stas)
},
};
- memcpy(&sig.beacon_period, &this->bss_set[stas].beacon_period, 72);
+ memcpy(&sig.req, &this->bss_set[stas].req, sizeof(sig.req));
return wl3501_esbq_exec(this, &sig, sizeof(sig));
}
@@ -665,35 +666,37 @@ static void wl3501_mgmt_scan_confirm(struct wl3501_card *this, u16 addr)
if (sig.status == WL3501_STATUS_SUCCESS) {
pr_debug("success");
if ((this->net_type == IW_MODE_INFRA &&
- (sig.cap_info & WL3501_MGMT_CAPABILITY_ESS)) ||
+ (sig.req.cap_info & WL3501_MGMT_CAPABILITY_ESS)) ||
(this->net_type == IW_MODE_ADHOC &&
- (sig.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) ||
+ (sig.req.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) ||
this->net_type == IW_MODE_AUTO) {
if (!this->essid.el.len)
matchflag = 1;
else if (this->essid.el.len == 3 &&
!memcmp(this->essid.essid, "ANY", 3))
matchflag = 1;
- else if (this->essid.el.len != sig.ssid.el.len)
+ else if (this->essid.el.len != sig.req.ssid.el.len)
matchflag = 0;
- else if (memcmp(this->essid.essid, sig.ssid.essid,
+ else if (memcmp(this->essid.essid, sig.req.ssid.essid,
this->essid.el.len))
matchflag = 0;
else
matchflag = 1;
if (matchflag) {
for (i = 0; i < this->bss_cnt; i++) {
- if (ether_addr_equal_unaligned(this->bss_set[i].bssid, sig.bssid)) {
+ if (ether_addr_equal_unaligned(this->bss_set[i].req.bssid,
+ sig.req.bssid)) {
matchflag = 0;
break;
}
}
}
if (matchflag && (i < 20)) {
- memcpy(&this->bss_set[i].beacon_period,
- &sig.beacon_period, 73);
+ memcpy(&this->bss_set[i].req,
+ &sig.req, sizeof(sig.req));
this->bss_cnt++;
this->rssi = sig.rssi;
+ this->bss_set[i].rssi = sig.rssi;
}
}
} else if (sig.status == WL3501_STATUS_TIMEOUT) {
@@ -885,19 +888,19 @@ static void wl3501_mgmt_join_confirm(struct net_device *dev, u16 addr)
if (this->join_sta_bss < this->bss_cnt) {
const int i = this->join_sta_bss;
memcpy(this->bssid,
- this->bss_set[i].bssid, ETH_ALEN);
- this->chan = this->bss_set[i].ds_pset.chan;
+ this->bss_set[i].req.bssid, ETH_ALEN);
+ this->chan = this->bss_set[i].req.ds_pset.chan;
iw_copy_mgmt_info_element(&this->keep_essid.el,
- &this->bss_set[i].ssid.el);
+ &this->bss_set[i].req.ssid.el);
wl3501_mgmt_auth(this);
}
} else {
const int i = this->join_sta_bss;
- memcpy(&this->bssid, &this->bss_set[i].bssid, ETH_ALEN);
- this->chan = this->bss_set[i].ds_pset.chan;
+ memcpy(&this->bssid, &this->bss_set[i].req.bssid, ETH_ALEN);
+ this->chan = this->bss_set[i].req.ds_pset.chan;
iw_copy_mgmt_info_element(&this->keep_essid.el,
- &this->bss_set[i].ssid.el);
+ &this->bss_set[i].req.ssid.el);
wl3501_online(dev);
}
} else {
@@ -979,7 +982,8 @@ static inline void wl3501_md_ind_interrupt(struct net_device *dev,
} else {
skb->dev = dev;
skb_reserve(skb, 2); /* IP headers on 16 bytes boundaries */
- skb_copy_to_linear_data(skb, (unsigned char *)&sig.daddr, 12);
+ skb_copy_to_linear_data(skb, (unsigned char *)&sig.addr,
+ sizeof(sig.addr));
wl3501_receive(this, skb->data, pkt_len);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, dev);
@@ -1574,30 +1578,30 @@ static int wl3501_get_scan(struct net_device *dev, struct iw_request_info *info,
for (i = 0; i < this->bss_cnt; ++i) {
iwe.cmd = SIOCGIWAP;
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
- memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].bssid, ETH_ALEN);
+ memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].req.bssid, ETH_ALEN);
current_ev = iwe_stream_add_event(info, current_ev,
extra + IW_SCAN_MAX_DATA,
&iwe, IW_EV_ADDR_LEN);
iwe.cmd = SIOCGIWESSID;
iwe.u.data.flags = 1;
- iwe.u.data.length = this->bss_set[i].ssid.el.len;
+ iwe.u.data.length = this->bss_set[i].req.ssid.el.len;
current_ev = iwe_stream_add_point(info, current_ev,
extra + IW_SCAN_MAX_DATA,
&iwe,
- this->bss_set[i].ssid.essid);
+ this->bss_set[i].req.ssid.essid);
iwe.cmd = SIOCGIWMODE;
- iwe.u.mode = this->bss_set[i].bss_type;
+ iwe.u.mode = this->bss_set[i].req.bss_type;
current_ev = iwe_stream_add_event(info, current_ev,
extra + IW_SCAN_MAX_DATA,
&iwe, IW_EV_UINT_LEN);
iwe.cmd = SIOCGIWFREQ;
- iwe.u.freq.m = this->bss_set[i].ds_pset.chan;
+ iwe.u.freq.m = this->bss_set[i].req.ds_pset.chan;
iwe.u.freq.e = 0;
current_ev = iwe_stream_add_event(info, current_ev,
extra + IW_SCAN_MAX_DATA,
&iwe, IW_EV_FREQ_LEN);
iwe.cmd = SIOCGIWENCODE;
- if (this->bss_set[i].cap_info & WL3501_MGMT_CAPABILITY_PRIVACY)
+ if (this->bss_set[i].req.cap_info & WL3501_MGMT_CAPABILITY_PRIVACY)
iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
else
iwe.u.data.flags = IW_ENCODE_DISABLED;
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 936c0b3e0ba2..86d23d0f563c 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -140,6 +140,20 @@ struct xenvif_queue { /* Per-queue data for xenvif */
char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
struct xenvif *vif; /* Parent VIF */
+ /*
+ * TX/RX common EOI handling.
+ * When feature-split-event-channels = 0, interrupt handler sets
+ * NETBK_COMMON_EOI, otherwise NETBK_RX_EOI and NETBK_TX_EOI are set
+ * by the RX and TX interrupt handlers.
+ * RX and TX handler threads will issue an EOI when either
+ * NETBK_COMMON_EOI or their specific bits (NETBK_RX_EOI or
+ * NETBK_TX_EOI) are set and they will reset those bits.
+ */
+ atomic_t eoi_pending;
+#define NETBK_RX_EOI 0x01
+#define NETBK_TX_EOI 0x02
+#define NETBK_COMMON_EOI 0x04
+
/* Use NAPI for guest TX */
struct napi_struct napi;
/* When feature-split-event-channels = 0, tx_irq = rx_irq. */
@@ -357,6 +371,7 @@ int xenvif_dealloc_kthread(void *data);
irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
+bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread);
void xenvif_rx_action(struct xenvif_queue *queue);
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 4cafc31b98b7..3b5fdb24ef1b 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -77,12 +77,28 @@ int xenvif_schedulable(struct xenvif *vif)
!vif->disabled;
}
+static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue)
+{
+ bool rc;
+
+ rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
+ if (rc)
+ napi_schedule(&queue->napi);
+ return rc;
+}
+
static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
{
struct xenvif_queue *queue = dev_id;
+ int old;
- if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
- napi_schedule(&queue->napi);
+ old = atomic_fetch_or(NETBK_TX_EOI, &queue->eoi_pending);
+ WARN(old & NETBK_TX_EOI, "Interrupt while EOI pending\n");
+
+ if (!xenvif_handle_tx_interrupt(queue)) {
+ atomic_andnot(NETBK_TX_EOI, &queue->eoi_pending);
+ xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
+ }
return IRQ_HANDLED;
}
@@ -116,19 +132,48 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
return work_done;
}
+static bool xenvif_handle_rx_interrupt(struct xenvif_queue *queue)
+{
+ bool rc;
+
+ rc = xenvif_have_rx_work(queue, false);
+ if (rc)
+ xenvif_kick_thread(queue);
+ return rc;
+}
+
static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
{
struct xenvif_queue *queue = dev_id;
+ int old;
- xenvif_kick_thread(queue);
+ old = atomic_fetch_or(NETBK_RX_EOI, &queue->eoi_pending);
+ WARN(old & NETBK_RX_EOI, "Interrupt while EOI pending\n");
+
+ if (!xenvif_handle_rx_interrupt(queue)) {
+ atomic_andnot(NETBK_RX_EOI, &queue->eoi_pending);
+ xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
+ }
return IRQ_HANDLED;
}
irqreturn_t xenvif_interrupt(int irq, void *dev_id)
{
- xenvif_tx_interrupt(irq, dev_id);
- xenvif_rx_interrupt(irq, dev_id);
+ struct xenvif_queue *queue = dev_id;
+ int old;
+ bool has_rx, has_tx;
+
+ old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending);
+ WARN(old, "Interrupt while EOI pending\n");
+
+ has_tx = xenvif_handle_tx_interrupt(queue);
+ has_rx = xenvif_handle_rx_interrupt(queue);
+
+ if (!has_rx && !has_tx) {
+ atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending);
+ xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
+ }
return IRQ_HANDLED;
}
@@ -595,7 +640,7 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
shared = (struct xen_netif_ctrl_sring *)addr;
BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE);
- err = bind_interdomain_evtchn_to_irq(vif->domid, evtchn);
+ err = bind_interdomain_evtchn_to_irq_lateeoi(vif->domid, evtchn);
if (err < 0)
goto err_unmap;
@@ -653,7 +698,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
if (tx_evtchn == rx_evtchn) {
/* feature-split-event-channels == 0 */
- err = bind_interdomain_evtchn_to_irqhandler(
+ err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
queue->name, queue);
if (err < 0)
@@ -664,7 +709,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
/* feature-split-event-channels == 1 */
snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
"%s-tx", queue->name);
- err = bind_interdomain_evtchn_to_irqhandler(
+ err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
queue->tx_irq_name, queue);
if (err < 0)
@@ -674,7 +719,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
"%s-rx", queue->name);
- err = bind_interdomain_evtchn_to_irqhandler(
+ err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
queue->rx_irq_name, queue);
if (err < 0)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 1c849106b793..41bdfb684d46 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -162,6 +162,10 @@ void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
if (more_to_do)
napi_schedule(&queue->napi);
+ else if (atomic_fetch_andnot(NETBK_TX_EOI | NETBK_COMMON_EOI,
+ &queue->eoi_pending) &
+ (NETBK_TX_EOI | NETBK_COMMON_EOI))
+ xen_irq_lateeoi(queue->tx_irq, 0);
}
static void tx_add_credit(struct xenvif_queue *queue)
@@ -1327,7 +1331,15 @@ int xenvif_tx_action(struct xenvif_queue *queue, int budget)
NULL,
queue->pages_to_map,
nr_mops);
- BUG_ON(ret);
+ if (ret) {
+ unsigned int i;
+
+ netdev_err(queue->vif->dev, "Map fail: nr %u ret %d\n",
+ nr_mops, ret);
+ for (i = 0; i < nr_mops; ++i)
+ WARN_ON_ONCE(queue->tx_map_ops[i].status ==
+ GNTST_okay);
+ }
}
work_done = xenvif_tx_submit(queue);
@@ -1613,9 +1625,14 @@ static bool xenvif_ctrl_work_todo(struct xenvif *vif)
irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data)
{
struct xenvif *vif = data;
+ unsigned int eoi_flag = XEN_EOI_FLAG_SPURIOUS;
- while (xenvif_ctrl_work_todo(vif))
+ while (xenvif_ctrl_work_todo(vif)) {
xenvif_ctrl_action(vif);
+ eoi_flag = 0;
+ }
+
+ xen_irq_lateeoi(irq, eoi_flag);
return IRQ_HANDLED;
}
diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
index ef5887037b22..48e2006f96ce 100644
--- a/drivers/net/xen-netback/rx.c
+++ b/drivers/net/xen-netback/rx.c
@@ -38,10 +38,15 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
RING_IDX prod, cons;
struct sk_buff *skb;
int needed;
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->rx_queue.lock, flags);
skb = skb_peek(&queue->rx_queue);
- if (!skb)
+ if (!skb) {
+ spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
return false;
+ }
needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
if (skb_is_gso(skb))
@@ -49,6 +54,8 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
if (skb->sw_hash)
needed++;
+ spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
+
do {
prod = queue->rx.sring->req_prod;
cons = queue->rx.req_cons;
@@ -490,13 +497,13 @@ static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
return queue->stalled && prod - cons >= 1;
}
-static bool xenvif_have_rx_work(struct xenvif_queue *queue)
+bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)
{
return xenvif_rx_ring_slots_available(queue) ||
(queue->vif->stall_timeout &&
(xenvif_rx_queue_stalled(queue) ||
xenvif_rx_queue_ready(queue))) ||
- kthread_should_stop() ||
+ (test_kthread && kthread_should_stop()) ||
queue->vif->disabled;
}
@@ -527,15 +534,20 @@ static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
{
DEFINE_WAIT(wait);
- if (xenvif_have_rx_work(queue))
+ if (xenvif_have_rx_work(queue, true))
return;
for (;;) {
long ret;
prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
- if (xenvif_have_rx_work(queue))
+ if (xenvif_have_rx_work(queue, true))
break;
+ if (atomic_fetch_andnot(NETBK_RX_EOI | NETBK_COMMON_EOI,
+ &queue->eoi_pending) &
+ (NETBK_RX_EOI | NETBK_COMMON_EOI))
+ xen_irq_lateeoi(queue->rx_irq, 0);
+
ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
if (!ret)
break;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index cd51492ae6c2..78c56149559c 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -777,12 +777,14 @@ static int xen_register_credit_watch(struct xenbus_device *dev,
return -ENOMEM;
snprintf(node, maxlen, "%s/rate", dev->nodename);
vif->credit_watch.node = node;
+ vif->credit_watch.will_handle = NULL;
vif->credit_watch.callback = xen_net_rate_changed;
err = register_xenbus_watch(&vif->credit_watch);
if (err) {
pr_err("Failed to set watcher %s\n", vif->credit_watch.node);
kfree(node);
vif->credit_watch.node = NULL;
+ vif->credit_watch.will_handle = NULL;
vif->credit_watch.callback = NULL;
}
return err;
@@ -829,6 +831,7 @@ static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev,
snprintf(node, maxlen, "%s/request-multicast-control",
dev->otherend);
vif->mcast_ctrl_watch.node = node;
+ vif->mcast_ctrl_watch.will_handle = NULL;
vif->mcast_ctrl_watch.callback = xen_mcast_ctrl_changed;
err = register_xenbus_watch(&vif->mcast_ctrl_watch);
if (err) {
@@ -836,6 +839,7 @@ static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev,
vif->mcast_ctrl_watch.node);
kfree(node);
vif->mcast_ctrl_watch.node = NULL;
+ vif->mcast_ctrl_watch.will_handle = NULL;
vif->mcast_ctrl_watch.callback = NULL;
}
return err;
@@ -1039,11 +1043,15 @@ static void connect(struct backend_info *be)
xenvif_carrier_on(be->vif);
unregister_hotplug_status_watch(be);
- err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
- hotplug_status_changed,
- "%s/%s", dev->nodename, "hotplug-status");
- if (!err)
+ if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) {
+ err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
+ NULL, hotplug_status_changed,
+ "%s/%s", dev->nodename,
+ "hotplug-status");
+ if (err)
+ goto err;
be->have_hotplug_status_watch = 1;
+ }
netif_tx_wake_all_queues(be->vif->dev);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 6b4675a9494b..c8e84276e639 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -63,6 +63,8 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644);
MODULE_PARM_DESC(max_queues,
"Maximum number of queues per virtual interface");
+#define XENNET_TIMEOUT (5 * HZ)
+
static const struct ethtool_ops xennet_ethtool_ops;
struct netfront_cb {
@@ -1337,12 +1339,15 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
netif_carrier_off(netdev);
- xenbus_switch_state(dev, XenbusStateInitialising);
- wait_event(module_wq,
- xenbus_read_driver_state(dev->otherend) !=
- XenbusStateClosed &&
- xenbus_read_driver_state(dev->otherend) !=
- XenbusStateUnknown);
+ do {
+ xenbus_switch_state(dev, XenbusStateInitialising);
+ err = wait_event_timeout(module_wq,
+ xenbus_read_driver_state(dev->otherend) !=
+ XenbusStateClosed &&
+ xenbus_read_driver_state(dev->otherend) !=
+ XenbusStateUnknown, XENNET_TIMEOUT);
+ } while (!err);
+
return netdev;
exit:
@@ -2142,28 +2147,43 @@ static const struct attribute_group xennet_dev_group = {
};
#endif /* CONFIG_SYSFS */
-static int xennet_remove(struct xenbus_device *dev)
+static void xennet_bus_close(struct xenbus_device *dev)
{
- struct netfront_info *info = dev_get_drvdata(&dev->dev);
-
- dev_dbg(&dev->dev, "%s\n", dev->nodename);
+ int ret;
- if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
+ if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
+ return;
+ do {
xenbus_switch_state(dev, XenbusStateClosing);
- wait_event(module_wq,
- xenbus_read_driver_state(dev->otherend) ==
- XenbusStateClosing ||
- xenbus_read_driver_state(dev->otherend) ==
- XenbusStateUnknown);
+ ret = wait_event_timeout(module_wq,
+ xenbus_read_driver_state(dev->otherend) ==
+ XenbusStateClosing ||
+ xenbus_read_driver_state(dev->otherend) ==
+ XenbusStateClosed ||
+ xenbus_read_driver_state(dev->otherend) ==
+ XenbusStateUnknown,
+ XENNET_TIMEOUT);
+ } while (!ret);
+
+ if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
+ return;
+ do {
xenbus_switch_state(dev, XenbusStateClosed);
- wait_event(module_wq,
- xenbus_read_driver_state(dev->otherend) ==
- XenbusStateClosed ||
- xenbus_read_driver_state(dev->otherend) ==
- XenbusStateUnknown);
- }
+ ret = wait_event_timeout(module_wq,
+ xenbus_read_driver_state(dev->otherend) ==
+ XenbusStateClosed ||
+ xenbus_read_driver_state(dev->otherend) ==
+ XenbusStateUnknown,
+ XENNET_TIMEOUT);
+ } while (!ret);
+}
+
+static int xennet_remove(struct xenbus_device *dev)
+{
+ struct netfront_info *info = dev_get_drvdata(&dev->dev);
+ xennet_bus_close(dev);
xennet_disconnect_backend(info);
if (info->netdev->reg_state == NETREG_REGISTERED)
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index a0cc1cc45292..01da9331f4cb 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -692,6 +692,9 @@ static bool pn533_target_type_a_is_valid(struct pn533_target_type_a *type_a,
if (PN533_TYPE_A_SEL_CASCADE(type_a->sel_res) != 0)
return false;
+ if (type_a->nfcid_len > NFC_NFCID1_MAXSIZE)
+ return false;
+
return true;
}
diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c
index 9d9c8d57a042..64b58455e620 100644
--- a/drivers/nfc/s3fwrn5/core.c
+++ b/drivers/nfc/s3fwrn5/core.c
@@ -209,6 +209,7 @@ int s3fwrn5_recv_frame(struct nci_dev *ndev, struct sk_buff *skb,
case S3FWRN5_MODE_FW:
return s3fwrn5_fw_recv_frame(ndev, skb);
default:
+ kfree_skb(skb);
return -ENODEV;
}
}
diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
index b7828fb252f2..b7d5b12035c1 100644
--- a/drivers/nfc/s3fwrn5/firmware.c
+++ b/drivers/nfc/s3fwrn5/firmware.c
@@ -304,8 +304,10 @@ static int s3fwrn5_fw_request_firmware(struct s3fwrn5_fw_info *fw_info)
if (ret < 0)
return ret;
- if (fw->fw->size < S3FWRN5_FW_IMAGE_HEADER_SIZE)
+ if (fw->fw->size < S3FWRN5_FW_IMAGE_HEADER_SIZE) {
+ release_firmware(fw->fw);
return -EINVAL;
+ }
memcpy(fw->date, fw->fw->data + 0x00, 12);
fw->date[12] = '\0';
diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c
index 4da409e77a72..6c78529a8c89 100644
--- a/drivers/nfc/s3fwrn5/i2c.c
+++ b/drivers/nfc/s3fwrn5/i2c.c
@@ -37,8 +37,8 @@ struct s3fwrn5_i2c_phy {
struct i2c_client *i2c_dev;
struct nci_dev *ndev;
- unsigned int gpio_en;
- unsigned int gpio_fw_wake;
+ int gpio_en;
+ int gpio_fw_wake;
struct mutex mutex;
diff --git a/drivers/nfc/st21nfca/dep.c b/drivers/nfc/st21nfca/dep.c
index fd08be2917e6..7399eb2c5e1d 100644
--- a/drivers/nfc/st21nfca/dep.c
+++ b/drivers/nfc/st21nfca/dep.c
@@ -184,8 +184,10 @@ static int st21nfca_tm_send_atr_res(struct nfc_hci_dev *hdev,
memcpy(atr_res->gbi, atr_req->gbi, gb_len);
r = nfc_set_remote_general_bytes(hdev->ndev, atr_res->gbi,
gb_len);
- if (r < 0)
+ if (r < 0) {
+ kfree_skb(skb);
return r;
+ }
}
info->dep_info.curr_nfc_dep_pni = 0;
diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c
index 01acb6e53365..c4b6e29c0719 100644
--- a/drivers/nfc/st95hf/core.c
+++ b/drivers/nfc/st95hf/core.c
@@ -981,7 +981,7 @@ static int st95hf_in_send_cmd(struct nfc_digital_dev *ddev,
rc = down_killable(&stcontext->exchange_lock);
if (rc) {
WARN(1, "Semaphore is not found up in st95hf_in_send_cmd\n");
- return rc;
+ goto free_skb_resp;
}
rc = st95hf_spi_send(&stcontext->spicontext, skb->data,
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
index efb214fc545a..0b1fbb5dba9b 100644
--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
@@ -1036,6 +1036,7 @@ static int amd_ntb_init_pci(struct amd_ntb_dev *ndev,
err_dma_mask:
pci_clear_master(pdev);
+ pci_release_regions(pdev);
err_pci_regions:
pci_disable_device(pdev);
err_pci_enable:
diff --git a/drivers/ntb/ntb.c b/drivers/ntb/ntb.c
index 2581ab724c34..f8f75a504a58 100644
--- a/drivers/ntb/ntb.c
+++ b/drivers/ntb/ntb.c
@@ -214,10 +214,8 @@ int ntb_default_port_number(struct ntb_dev *ntb)
case NTB_TOPO_B2B_DSD:
return NTB_PORT_SEC_DSD;
default:
- break;
+ return 0;
}
-
- return -EINVAL;
}
EXPORT_SYMBOL(ntb_default_port_number);
@@ -240,10 +238,8 @@ int ntb_default_peer_port_number(struct ntb_dev *ntb, int pidx)
case NTB_TOPO_B2B_DSD:
return NTB_PORT_PRI_USD;
default:
- break;
+ return 0;
}
-
- return -EINVAL;
}
EXPORT_SYMBOL(ntb_default_peer_port_number);
@@ -315,4 +311,3 @@ static void __exit ntb_driver_exit(void)
bus_unregister(&ntb_bus);
}
module_exit(ntb_driver_exit);
-
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index 80508da3c8b5..ad5d3919435c 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -158,6 +158,8 @@ struct perf_peer {
/* NTB connection setup service */
struct work_struct service;
unsigned long sts;
+
+ struct completion init_comp;
};
#define to_peer_service(__work) \
container_of(__work, struct perf_peer, service)
@@ -549,6 +551,7 @@ static int perf_setup_outbuf(struct perf_peer *peer)
/* Initialization is finally done */
set_bit(PERF_STS_DONE, &peer->sts);
+ complete_all(&peer->init_comp);
return 0;
}
@@ -559,7 +562,7 @@ static void perf_free_inbuf(struct perf_peer *peer)
return;
(void)ntb_mw_clear_trans(peer->perf->ntb, peer->pidx, peer->gidx);
- dma_free_coherent(&peer->perf->ntb->dev, peer->inbuf_size,
+ dma_free_coherent(&peer->perf->ntb->pdev->dev, peer->inbuf_size,
peer->inbuf, peer->inbuf_xlat);
peer->inbuf = NULL;
}
@@ -588,8 +591,9 @@ static int perf_setup_inbuf(struct perf_peer *peer)
perf_free_inbuf(peer);
- peer->inbuf = dma_alloc_coherent(&perf->ntb->dev, peer->inbuf_size,
- &peer->inbuf_xlat, GFP_KERNEL);
+ peer->inbuf = dma_alloc_coherent(&perf->ntb->pdev->dev,
+ peer->inbuf_size, &peer->inbuf_xlat,
+ GFP_KERNEL);
if (!peer->inbuf) {
dev_err(&perf->ntb->dev, "Failed to alloc inbuf of %pa\n",
&peer->inbuf_size);
@@ -639,6 +643,7 @@ static void perf_service_work(struct work_struct *work)
perf_setup_outbuf(peer);
if (test_and_clear_bit(PERF_CMD_CLEAR, &peer->sts)) {
+ init_completion(&peer->init_comp);
clear_bit(PERF_STS_DONE, &peer->sts);
if (test_bit(0, &peer->perf->busy_flag) &&
peer == peer->perf->test_peer) {
@@ -655,7 +660,7 @@ static int perf_init_service(struct perf_ctx *perf)
{
u64 mask;
- if (ntb_peer_mw_count(perf->ntb) < perf->pcnt + 1) {
+ if (ntb_peer_mw_count(perf->ntb) < perf->pcnt) {
dev_err(&perf->ntb->dev, "Not enough memory windows\n");
return -EINVAL;
}
@@ -1046,8 +1051,9 @@ static int perf_submit_test(struct perf_peer *peer)
struct perf_thread *pthr;
int tidx, ret;
- if (!test_bit(PERF_STS_DONE, &peer->sts))
- return -ENOLINK;
+ ret = wait_for_completion_interruptible(&peer->init_comp);
+ if (ret < 0)
+ return ret;
if (test_and_set_bit_lock(0, &perf->busy_flag))
return -EBUSY;
@@ -1413,10 +1419,21 @@ static int perf_init_peers(struct perf_ctx *perf)
peer->gidx = pidx;
}
INIT_WORK(&peer->service, perf_service_work);
+ init_completion(&peer->init_comp);
}
if (perf->gidx == -1)
perf->gidx = pidx;
+ /*
+ * Hardware with only two ports may not have unique port
+ * numbers. In this case, the gidxs should all be zero.
+ */
+ if (perf->pcnt == 1 && ntb_port_number(perf->ntb) == 0 &&
+ ntb_peer_port_number(perf->ntb, 0) == 0) {
+ perf->gidx = 0;
+ perf->peers[0].gidx = 0;
+ }
+
for (pidx = 0; pidx < perf->pcnt; pidx++) {
ret = perf_setup_peer_mw(&perf->peers[pidx]);
if (ret)
@@ -1512,4 +1529,3 @@ static void __exit perf_exit(void)
destroy_workqueue(perf_wq);
}
module_exit(perf_exit);
-
diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c
index 65865e460ab8..18d00eec7b02 100644
--- a/drivers/ntb/test/ntb_pingpong.c
+++ b/drivers/ntb/test/ntb_pingpong.c
@@ -121,15 +121,14 @@ static int pp_find_next_peer(struct pp_ctx *pp)
link = ntb_link_is_up(pp->ntb, NULL, NULL);
/* Find next available peer */
- if (link & pp->nmask) {
+ if (link & pp->nmask)
pidx = __ffs64(link & pp->nmask);
- out_db = BIT_ULL(pidx + 1);
- } else if (link & pp->pmask) {
+ else if (link & pp->pmask)
pidx = __ffs64(link & pp->pmask);
- out_db = BIT_ULL(pidx);
- } else {
+ else
return -ENODEV;
- }
+
+ out_db = BIT_ULL(ntb_peer_port_number(pp->ntb, pidx));
spin_lock(&pp->lock);
pp->out_pidx = pidx;
@@ -303,7 +302,7 @@ static void pp_init_flds(struct pp_ctx *pp)
break;
}
- pp->in_db = BIT_ULL(pidx);
+ pp->in_db = BIT_ULL(lport);
pp->pmask = GENMASK_ULL(pidx, 0) >> 1;
pp->nmask = GENMASK_ULL(pcnt - 1, pidx);
@@ -435,4 +434,3 @@ static void __exit pp_exit(void)
debugfs_remove_recursive(pp_dbgfs_topdir);
}
module_exit(pp_exit);
-
diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c
index d592c0ffbd19..311d6ab8d016 100644
--- a/drivers/ntb/test/ntb_tool.c
+++ b/drivers/ntb/test/ntb_tool.c
@@ -504,7 +504,7 @@ static ssize_t tool_peer_link_read(struct file *filep, char __user *ubuf,
buf[1] = '\n';
buf[2] = '\0';
- return simple_read_from_buffer(ubuf, size, offp, buf, 3);
+ return simple_read_from_buffer(ubuf, size, offp, buf, 2);
}
static TOOL_FOPS_RDWR(tool_peer_link_fops,
@@ -590,7 +590,7 @@ static int tool_setup_mw(struct tool_ctx *tc, int pidx, int widx,
inmw->size = min_t(resource_size_t, req_size, size);
inmw->size = round_up(inmw->size, addr_align);
inmw->size = round_up(inmw->size, size_align);
- inmw->mm_base = dma_alloc_coherent(&tc->ntb->dev, inmw->size,
+ inmw->mm_base = dma_alloc_coherent(&tc->ntb->pdev->dev, inmw->size,
&inmw->dma_base, GFP_KERNEL);
if (!inmw->mm_base)
return -ENOMEM;
@@ -612,7 +612,7 @@ static int tool_setup_mw(struct tool_ctx *tc, int pidx, int widx,
return 0;
err_free_dma:
- dma_free_coherent(&tc->ntb->dev, inmw->size, inmw->mm_base,
+ dma_free_coherent(&tc->ntb->pdev->dev, inmw->size, inmw->mm_base,
inmw->dma_base);
inmw->mm_base = NULL;
inmw->dma_base = 0;
@@ -629,7 +629,7 @@ static void tool_free_mw(struct tool_ctx *tc, int pidx, int widx)
if (inmw->mm_base != NULL) {
ntb_mw_clear_trans(tc->ntb, pidx, widx);
- dma_free_coherent(&tc->ntb->dev, inmw->size,
+ dma_free_coherent(&tc->ntb->pdev->dev, inmw->size,
inmw->mm_base, inmw->dma_base);
}
@@ -1690,4 +1690,3 @@ static void __exit tool_exit(void)
debugfs_remove_recursive(tool_dbgfs_topdir);
}
module_exit(tool_exit);
-
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index 62e9cb167aad..db45c6bbb7bb 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -290,7 +290,7 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
}
set_capacity(disk, available_disk_size >> SECTOR_SHIFT);
- device_add_disk(dev, disk);
+ device_add_disk(dev, disk, NULL);
revalidate_disk(disk);
return 0;
}
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 75ae2c508a04..b6823e66af13 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -400,9 +400,9 @@ static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
if (++(arena->freelist[lane].seq) == 4)
arena->freelist[lane].seq = 1;
- if (ent_e_flag(ent->old_map))
+ if (ent_e_flag(le32_to_cpu(ent->old_map)))
arena->freelist[lane].has_err = 1;
- arena->freelist[lane].block = le32_to_cpu(ent_lba(ent->old_map));
+ arena->freelist[lane].block = ent_lba(le32_to_cpu(ent->old_map));
return ret;
}
@@ -541,9 +541,9 @@ static int arena_clear_freelist_error(struct arena_info *arena, u32 lane)
static int btt_freelist_init(struct arena_info *arena)
{
- int old, new, ret;
- u32 i, map_entry;
- struct log_entry log_new, log_old;
+ int new, ret;
+ struct log_entry log_new;
+ u32 i, map_entry, log_oldmap, log_newmap;
arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
GFP_KERNEL);
@@ -551,24 +551,26 @@ static int btt_freelist_init(struct arena_info *arena)
return -ENOMEM;
for (i = 0; i < arena->nfree; i++) {
- old = btt_log_read(arena, i, &log_old, LOG_OLD_ENT);
- if (old < 0)
- return old;
-
new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
if (new < 0)
return new;
+ /* old and new map entries with any flags stripped out */
+ log_oldmap = ent_lba(le32_to_cpu(log_new.old_map));
+ log_newmap = ent_lba(le32_to_cpu(log_new.new_map));
+
/* sub points to the next one to be overwritten */
arena->freelist[i].sub = 1 - new;
arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
- arena->freelist[i].block = le32_to_cpu(log_new.old_map);
+ arena->freelist[i].block = log_oldmap;
/*
* FIXME: if error clearing fails during init, we want to make
* the BTT read-only
*/
- if (ent_e_flag(log_new.old_map)) {
+ if (ent_e_flag(le32_to_cpu(log_new.old_map)) &&
+ !ent_normal(le32_to_cpu(log_new.old_map))) {
+ arena->freelist[i].has_err = 1;
ret = arena_clear_freelist_error(arena, i);
if (ret)
dev_err_ratelimited(to_dev(arena),
@@ -576,7 +578,7 @@ static int btt_freelist_init(struct arena_info *arena)
}
/* This implies a newly created or untouched flog entry */
- if (log_new.old_map == log_new.new_map)
+ if (log_oldmap == log_newmap)
continue;
/* Check if map recovery is needed */
@@ -584,8 +586,15 @@ static int btt_freelist_init(struct arena_info *arena)
NULL, NULL, 0);
if (ret)
return ret;
- if ((le32_to_cpu(log_new.new_map) != map_entry) &&
- (le32_to_cpu(log_new.old_map) == map_entry)) {
+
+ /*
+ * The map_entry from btt_read_map is stripped of any flag bits,
+ * so use the stripped out versions from the log as well for
+ * testing whether recovery is needed. For restoration, use the
+ * 'raw' version of the log entries as that captured what we
+ * were going to write originally.
+ */
+ if ((log_newmap != map_entry) && (log_oldmap == map_entry)) {
/*
* Last transaction wrote the flog, but wasn't able
* to complete the map write. So fix up the map.
@@ -1556,7 +1565,7 @@ static int btt_blk_init(struct btt *btt)
}
}
set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
- device_add_disk(&btt->nd_btt->dev, btt->btt_disk);
+ device_add_disk(&btt->nd_btt->dev, btt->btt_disk, NULL);
btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
revalidate_disk(btt->btt_disk);
diff --git a/drivers/nvdimm/btt.h b/drivers/nvdimm/btt.h
index db3cb6d4d0d4..ddff49c707b0 100644
--- a/drivers/nvdimm/btt.h
+++ b/drivers/nvdimm/btt.h
@@ -44,6 +44,8 @@
#define ent_e_flag(ent) (!!(ent & MAP_ERR_MASK))
#define ent_z_flag(ent) (!!(ent & MAP_TRIM_MASK))
#define set_e_flag(ent) (ent |= MAP_ERR_MASK)
+/* 'normal' is both e and z flags set */
+#define ent_normal(ent) (ent_e_flag(ent) && ent_z_flag(ent))
enum btt_init_state {
INIT_UNCHECKED = 0,
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index e341498876ca..9486acc08402 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -159,11 +159,19 @@ static ssize_t size_show(struct device *dev,
}
static DEVICE_ATTR_RO(size);
+static ssize_t log_zero_flags_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Y\n");
+}
+static DEVICE_ATTR_RO(log_zero_flags);
+
static struct attribute *nd_btt_attributes[] = {
&dev_attr_sector_size.attr,
&dev_attr_namespace.attr,
&dev_attr_uuid.attr,
&dev_attr_size.attr,
+ &dev_attr_log_zero_flags.attr,
NULL,
};
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 54a633e8cb5d..48a070a37ea9 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -984,8 +984,10 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
return -EFAULT;
}
- if (!desc || (desc->out_num + desc->in_num == 0) ||
- !test_bit(cmd, &cmd_mask))
+ if (!desc ||
+ (desc->out_num + desc->in_num == 0) ||
+ cmd > ND_CMD_CALL ||
+ !test_bit(cmd, &cmd_mask))
return -ENOTTY;
/* fail write commands (when read-only) */
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 863cabc35215..f0e0e3b42c91 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -359,16 +359,16 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(state);
-static ssize_t available_slots_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf)
{
- struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
+ struct device *dev;
ssize_t rc;
u32 nfree;
if (!ndd)
return -ENXIO;
+ dev = ndd->dev;
nvdimm_bus_lock(dev);
nfree = nd_label_nfree(ndd);
if (nfree - 1 > nfree) {
@@ -380,6 +380,18 @@ static ssize_t available_slots_show(struct device *dev,
nvdimm_bus_unlock(dev);
return rc;
}
+
+static ssize_t available_slots_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rc;
+
+ device_lock(dev);
+ rc = __available_slots_show(dev_get_drvdata(dev), buf);
+ device_unlock(dev);
+
+ return rc;
+}
static DEVICE_ATTR_RO(available_slots);
static struct attribute *nvdimm_attributes[] = {
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index 9f1b7e3153f9..19e3469d5908 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -861,6 +861,15 @@ static int __blk_label_update(struct nd_region *nd_region,
}
}
+ /* release slots associated with any invalidated UUIDs */
+ mutex_lock(&nd_mapping->lock);
+ list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list)
+ if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)) {
+ reap_victim(nd_mapping, label_ent);
+ list_move(&label_ent->list, &list);
+ }
+ mutex_unlock(&nd_mapping->lock);
+
/*
* Find the resource associated with the first label in the set
* per the v1.2 namespace specification.
@@ -880,8 +889,10 @@ static int __blk_label_update(struct nd_region *nd_region,
if (is_old_resource(res, old_res_list, old_num_resources))
continue; /* carry-over */
slot = nd_label_alloc_slot(ndd);
- if (slot == UINT_MAX)
+ if (slot == UINT_MAX) {
+ rc = -ENXIO;
goto abort;
+ }
dev_dbg(ndd->dev, "allocated: %d\n", slot);
nd_label = to_label(ndd, slot);
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 5dc3b407d7bd..63640c315d93 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1996,7 +1996,7 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
nd_mapping = &nd_region->mapping[i];
label_ent = list_first_entry_or_null(&nd_mapping->labels,
typeof(*label_ent), list);
- label0 = label_ent ? label_ent->label : 0;
+ label0 = label_ent ? label_ent->label : NULL;
if (!label0) {
WARN_ON(1);
@@ -2332,8 +2332,9 @@ static struct device **scan_labels(struct nd_region *nd_region)
continue;
/* skip labels that describe extents outside of the region */
- if (nd_label->dpa < nd_mapping->start || nd_label->dpa > map_end)
- continue;
+ if (__le64_to_cpu(nd_label->dpa) < nd_mapping->start ||
+ __le64_to_cpu(nd_label->dpa) > map_end)
+ continue;
i = add_namespace_resource(nd_region, nd_label, devs, count);
if (i < 0)
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index a7ce2f1761a2..d6f981f6ef59 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -479,7 +479,7 @@ static int pmem_attach_disk(struct device *dev,
gendev = disk_to_dev(disk);
gendev->groups = pmem_attribute_groups;
- device_add_disk(dev, disk);
+ device_add_disk(dev, disk, NULL);
if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
return -ENOMEM;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index a8132e8d72bb..e64310f2296f 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -255,11 +255,8 @@ void nvme_complete_rq(struct request *req)
trace_nvme_complete_rq(req);
if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) {
- if ((req->cmd_flags & REQ_NVME_MPATH) &&
- blk_path_error(status)) {
- nvme_failover_req(req);
+ if ((req->cmd_flags & REQ_NVME_MPATH) && nvme_failover_req(req))
return;
- }
if (!blk_queue_dying(req->q)) {
nvme_req(req)->retries++;
@@ -926,6 +923,19 @@ void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
+/*
+ * In NVMe 1.0 the CNS field was just a binary controller or namespace
+ * flag, thus sending any new CNS opcodes has a big chance of not working.
+ * Qemu unfortunately had that bug after reporting a 1.1 version compliance
+ * (but not for any later version).
+ */
+static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl)
+{
+ if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)
+ return ctrl->vs < NVME_VS(1, 2, 0);
+ return ctrl->vs < NVME_VS(1, 1, 0);
+}
+
static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
{
struct nvme_command c = { };
@@ -1589,7 +1599,7 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
if (ns->head->disk) {
nvme_update_disk_info(ns->head->disk, ns, id);
blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
- revalidate_disk(ns->head->disk);
+ nvme_mpath_update_disk_size(ns->head->disk);
}
#endif
}
@@ -2081,7 +2091,8 @@ static void nvme_set_latency_tolerance(struct device *dev, s32 val)
if (ctrl->ps_max_latency_us != latency) {
ctrl->ps_max_latency_us = latency;
- nvme_configure_apst(ctrl);
+ if (ctrl->state == NVME_CTRL_LIVE)
+ nvme_configure_apst(ctrl);
}
}
@@ -2200,6 +2211,17 @@ static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn)
lockdep_assert_held(&nvme_subsystems_lock);
+ /*
+ * Fail matches for discovery subsystems. This results
+ * in each discovery controller bound to a unique subsystem.
+ * This avoids issues with validating controller values
+ * that can only be true when there is a single unique subsystem.
+ * There may be multiple and completely independent entities
+ * that provide discovery controllers.
+ */
+ if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME))
+ return NULL;
+
list_for_each_entry(subsys, &nvme_subsystems, entry) {
if (strcmp(subsys->subnqn, subsysnqn))
continue;
@@ -2584,10 +2606,26 @@ static int nvme_dev_open(struct inode *inode, struct file *file)
return -EWOULDBLOCK;
}
+ nvme_get_ctrl(ctrl);
+ if (!try_module_get(ctrl->ops->module)) {
+ nvme_put_ctrl(ctrl);
+ return -EINVAL;
+ }
+
file->private_data = ctrl;
return 0;
}
+static int nvme_dev_release(struct inode *inode, struct file *file)
+{
+ struct nvme_ctrl *ctrl =
+ container_of(inode->i_cdev, struct nvme_ctrl, cdev);
+
+ module_put(ctrl->ops->module);
+ nvme_put_ctrl(ctrl);
+ return 0;
+}
+
static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
{
struct nvme_ns *ns;
@@ -2648,6 +2686,7 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
static const struct file_operations nvme_dev_fops = {
.owner = THIS_MODULE,
.open = nvme_dev_open,
+ .release = nvme_dev_release,
.unlocked_ioctl = nvme_dev_ioctl,
.compat_ioctl = nvme_dev_ioctl,
};
@@ -2804,6 +2843,14 @@ const struct attribute_group nvme_ns_id_attr_group = {
.is_visible = nvme_ns_id_attrs_are_visible,
};
+const struct attribute_group *nvme_ns_id_attr_groups[] = {
+ &nvme_ns_id_attr_group,
+#ifdef CONFIG_NVM
+ &nvme_nvm_attr_group,
+#endif
+ NULL,
+};
+
#define nvme_show_str_function(field) \
static ssize_t field##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
@@ -2835,6 +2882,10 @@ static ssize_t nvme_sysfs_delete(struct device *dev,
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ /* Can't delete non-created controllers */
+ if (!ctrl->created)
+ return -EBUSY;
+
if (device_remove_file_self(dev, attr))
nvme_delete_ctrl_sync(ctrl);
return count;
@@ -3169,14 +3220,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
nvme_get_ctrl(ctrl);
- device_add_disk(ctrl->device, ns->disk);
- if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
- &nvme_ns_id_attr_group))
- pr_warn("%s: failed to create sysfs group for identification\n",
- ns->disk->disk_name);
- if (ns->ndev && nvme_nvm_register_sysfs(ns))
- pr_warn("%s: failed to register lightnvm sysfs group for identification\n",
- ns->disk->disk_name);
+ device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups);
nvme_mpath_add_disk(ns, id);
nvme_fault_inject_init(ns);
@@ -3210,10 +3254,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */
if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
- sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
- &nvme_ns_id_attr_group);
- if (ns->ndev)
- nvme_nvm_unregister_sysfs(ns);
del_gendisk(ns->disk);
blk_cleanup_queue(ns->queue);
if (blk_get_integrity(ns->disk))
@@ -3357,8 +3397,7 @@ static void nvme_scan_work(struct work_struct *work)
mutex_lock(&ctrl->scan_lock);
nn = le32_to_cpu(id->nn);
- if (ctrl->vs >= NVME_VS(1, 1, 0) &&
- !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
+ if (!nvme_ctrl_limited_cns(ctrl)) {
if (!nvme_scan_ns_list(ctrl, nn))
goto out_free_id;
}
@@ -3556,6 +3595,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
queue_work(nvme_wq, &ctrl->async_event_work);
nvme_start_queues(ctrl);
}
+ ctrl->created = true;
}
EXPORT_SYMBOL_GPL(nvme_start_ctrl);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index bcd09d3a44da..05dd46f98441 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -577,7 +577,6 @@ bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
* which is require to set the queue live in the appropinquate states.
*/
switch (ctrl->state) {
- case NVME_CTRL_NEW:
case NVME_CTRL_CONNECTING:
if (req->cmd->common.opcode == nvme_fabrics_command &&
req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 1875f6b8a907..ed88d5021772 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -342,8 +342,7 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
!template->ls_req || !template->fcp_io ||
!template->ls_abort || !template->fcp_abort ||
!template->max_hw_queues || !template->max_sgl_segments ||
- !template->max_dif_sgl_segments || !template->dma_boundary ||
- !template->module) {
+ !template->max_dif_sgl_segments || !template->dma_boundary) {
ret = -EINVAL;
goto out_reghost_failed;
}
@@ -1717,7 +1716,7 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
dev_err(ctrl->dev,
"FCP Op failed - cmdiu dma mapping failed.\n");
- ret = EFAULT;
+ ret = -EFAULT;
goto out_on_error;
}
@@ -1727,7 +1726,7 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
dev_err(ctrl->dev,
"FCP Op failed - rspiu dma mapping failed.\n");
- ret = EFAULT;
+ ret = -EFAULT;
}
atomic_set(&op->state, FCPOP_STATE_IDLE);
@@ -1792,6 +1791,7 @@ nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
struct nvme_fc_fcp_op *aen_op;
int i;
+ cancel_work_sync(&ctrl->ctrl.async_event_work);
aen_op = ctrl->aen_ops;
for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
if (!aen_op->fcp_req.private)
@@ -1987,7 +1987,6 @@ nvme_fc_ctrl_free(struct kref *ref)
{
struct nvme_fc_ctrl *ctrl =
container_of(ref, struct nvme_fc_ctrl, ref);
- struct nvme_fc_lport *lport = ctrl->lport;
unsigned long flags;
if (ctrl->ctrl.tagset) {
@@ -2013,7 +2012,6 @@ nvme_fc_ctrl_free(struct kref *ref)
if (ctrl->ctrl.opts)
nvmf_free_options(ctrl->ctrl.opts);
kfree(ctrl);
- module_put(lport->ops->module);
}
static void
@@ -3055,15 +3053,10 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
goto out_fail;
}
- if (!try_module_get(lport->ops->module)) {
- ret = -EUNATCH;
- goto out_free_ctrl;
- }
-
idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
if (idx < 0) {
ret = -ENOSPC;
- goto out_mod_put;
+ goto out_free_ctrl;
}
ctrl->ctrl.opts = opts;
@@ -3205,8 +3198,6 @@ out_free_queues:
out_free_ida:
put_device(ctrl->dev);
ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
-out_mod_put:
- module_put(lport->ops->module);
out_free_ctrl:
kfree(ctrl);
out_fail:
@@ -3303,12 +3294,14 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
spin_lock_irqsave(&nvme_fc_lock, flags);
list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
if (lport->localport.node_name != laddr.nn ||
- lport->localport.port_name != laddr.pn)
+ lport->localport.port_name != laddr.pn ||
+ lport->localport.port_state != FC_OBJSTATE_ONLINE)
continue;
list_for_each_entry(rport, &lport->endp_list, endp_list) {
if (rport->remoteport.node_name != raddr.nn ||
- rport->remoteport.port_name != raddr.pn)
+ rport->remoteport.port_name != raddr.pn ||
+ rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
continue;
/* if fail to get reference fall through. Will error */
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index a69553e75f38..d10257b9c523 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -1193,10 +1193,29 @@ static NVM_DEV_ATTR_12_RO(multiplane_modes);
static NVM_DEV_ATTR_12_RO(media_capabilities);
static NVM_DEV_ATTR_12_RO(max_phys_secs);
-static struct attribute *nvm_dev_attrs_12[] = {
+/* 2.0 values */
+static NVM_DEV_ATTR_20_RO(groups);
+static NVM_DEV_ATTR_20_RO(punits);
+static NVM_DEV_ATTR_20_RO(chunks);
+static NVM_DEV_ATTR_20_RO(clba);
+static NVM_DEV_ATTR_20_RO(ws_min);
+static NVM_DEV_ATTR_20_RO(ws_opt);
+static NVM_DEV_ATTR_20_RO(maxoc);
+static NVM_DEV_ATTR_20_RO(maxocpu);
+static NVM_DEV_ATTR_20_RO(mw_cunits);
+static NVM_DEV_ATTR_20_RO(write_typ);
+static NVM_DEV_ATTR_20_RO(write_max);
+static NVM_DEV_ATTR_20_RO(reset_typ);
+static NVM_DEV_ATTR_20_RO(reset_max);
+
+static struct attribute *nvm_dev_attrs[] = {
+ /* version agnostic attrs */
&dev_attr_version.attr,
&dev_attr_capabilities.attr,
+ &dev_attr_read_typ.attr,
+ &dev_attr_read_max.attr,
+ /* 1.2 attrs */
&dev_attr_vendor_opcode.attr,
&dev_attr_device_mode.attr,
&dev_attr_media_manager.attr,
@@ -1211,8 +1230,6 @@ static struct attribute *nvm_dev_attrs_12[] = {
&dev_attr_page_size.attr,
&dev_attr_hw_sector_size.attr,
&dev_attr_oob_sector_size.attr,
- &dev_attr_read_typ.attr,
- &dev_attr_read_max.attr,
&dev_attr_prog_typ.attr,
&dev_attr_prog_max.attr,
&dev_attr_erase_typ.attr,
@@ -1221,33 +1238,7 @@ static struct attribute *nvm_dev_attrs_12[] = {
&dev_attr_media_capabilities.attr,
&dev_attr_max_phys_secs.attr,
- NULL,
-};
-
-static const struct attribute_group nvm_dev_attr_group_12 = {
- .name = "lightnvm",
- .attrs = nvm_dev_attrs_12,
-};
-
-/* 2.0 values */
-static NVM_DEV_ATTR_20_RO(groups);
-static NVM_DEV_ATTR_20_RO(punits);
-static NVM_DEV_ATTR_20_RO(chunks);
-static NVM_DEV_ATTR_20_RO(clba);
-static NVM_DEV_ATTR_20_RO(ws_min);
-static NVM_DEV_ATTR_20_RO(ws_opt);
-static NVM_DEV_ATTR_20_RO(maxoc);
-static NVM_DEV_ATTR_20_RO(maxocpu);
-static NVM_DEV_ATTR_20_RO(mw_cunits);
-static NVM_DEV_ATTR_20_RO(write_typ);
-static NVM_DEV_ATTR_20_RO(write_max);
-static NVM_DEV_ATTR_20_RO(reset_typ);
-static NVM_DEV_ATTR_20_RO(reset_max);
-
-static struct attribute *nvm_dev_attrs_20[] = {
- &dev_attr_version.attr,
- &dev_attr_capabilities.attr,
-
+ /* 2.0 attrs */
&dev_attr_groups.attr,
&dev_attr_punits.attr,
&dev_attr_chunks.attr,
@@ -1258,8 +1249,6 @@ static struct attribute *nvm_dev_attrs_20[] = {
&dev_attr_maxocpu.attr,
&dev_attr_mw_cunits.attr,
- &dev_attr_read_typ.attr,
- &dev_attr_read_max.attr,
&dev_attr_write_typ.attr,
&dev_attr_write_max.attr,
&dev_attr_reset_typ.attr,
@@ -1268,44 +1257,38 @@ static struct attribute *nvm_dev_attrs_20[] = {
NULL,
};
-static const struct attribute_group nvm_dev_attr_group_20 = {
- .name = "lightnvm",
- .attrs = nvm_dev_attrs_20,
-};
-
-int nvme_nvm_register_sysfs(struct nvme_ns *ns)
+static umode_t nvm_dev_attrs_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct gendisk *disk = dev_to_disk(dev);
+ struct nvme_ns *ns = disk->private_data;
struct nvm_dev *ndev = ns->ndev;
- struct nvm_geo *geo = &ndev->geo;
+ struct device_attribute *dev_attr =
+ container_of(attr, typeof(*dev_attr), attr);
if (!ndev)
- return -EINVAL;
-
- switch (geo->major_ver_id) {
- case 1:
- return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
- &nvm_dev_attr_group_12);
- case 2:
- return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
- &nvm_dev_attr_group_20);
- }
-
- return -EINVAL;
-}
+ return 0;
-void nvme_nvm_unregister_sysfs(struct nvme_ns *ns)
-{
- struct nvm_dev *ndev = ns->ndev;
- struct nvm_geo *geo = &ndev->geo;
+ if (dev_attr->show == nvm_dev_attr_show)
+ return attr->mode;
- switch (geo->major_ver_id) {
+ switch (ndev->geo.major_ver_id) {
case 1:
- sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
- &nvm_dev_attr_group_12);
+ if (dev_attr->show == nvm_dev_attr_show_12)
+ return attr->mode;
break;
case 2:
- sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
- &nvm_dev_attr_group_20);
+ if (dev_attr->show == nvm_dev_attr_show_20)
+ return attr->mode;
break;
}
+
+ return 0;
}
+
+const struct attribute_group nvme_nvm_attr_group = {
+ .name = "lightnvm",
+ .attrs = nvm_dev_attrs,
+ .is_visible = nvm_dev_attrs_visible,
+};
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index e8bc25aed44c..64f699a1afd7 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -11,6 +11,7 @@
* more details.
*/
+#include <linux/backing-dev.h>
#include <linux/moduleparam.h>
#include <trace/events/block.h>
#include "nvme.h"
@@ -72,17 +73,12 @@ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
}
}
-void nvme_failover_req(struct request *req)
+bool nvme_failover_req(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
u16 status = nvme_req(req)->status;
unsigned long flags;
- spin_lock_irqsave(&ns->head->requeue_lock, flags);
- blk_steal_bios(&ns->head->requeue_list, req);
- spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
- blk_mq_end_request(req, 0);
-
switch (status & 0x7ff) {
case NVME_SC_ANA_TRANSITION:
case NVME_SC_ANA_INACCESSIBLE:
@@ -110,15 +106,17 @@ void nvme_failover_req(struct request *req)
nvme_mpath_clear_current_path(ns);
break;
default:
- /*
- * Reset the controller for any non-ANA error as we don't know
- * what caused the error.
- */
- nvme_reset_ctrl(ns->ctrl);
- break;
+ /* This was a non-ANA error so follow the normal error path. */
+ return false;
}
+ spin_lock_irqsave(&ns->head->requeue_lock, flags);
+ blk_steal_bios(&ns->head->requeue_list, req);
+ spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
+ blk_mq_end_request(req, 0);
+
kblockd_schedule_work(&ns->head->requeue_work);
+ return true;
}
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
@@ -315,13 +313,9 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
if (!head->disk)
return;
- if (!(head->disk->flags & GENHD_FL_UP)) {
- device_add_disk(&head->subsys->dev, head->disk);
- if (sysfs_create_group(&disk_to_dev(head->disk)->kobj,
- &nvme_ns_id_attr_group))
- dev_warn(&head->subsys->dev,
- "failed to create id group.\n");
- }
+ if (!(head->disk->flags & GENHD_FL_UP))
+ device_add_disk(&head->subsys->dev, head->disk,
+ nvme_ns_id_attr_groups);
synchronize_srcu(&ns->head->srcu);
kblockd_schedule_work(&ns->head->requeue_work);
@@ -402,7 +396,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
if (!nr_nsids)
return 0;
- down_write(&ctrl->namespaces_rwsem);
+ down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list) {
unsigned nsid = le32_to_cpu(desc->nsids[n]);
@@ -413,7 +407,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
if (++n == nr_nsids)
break;
}
- up_write(&ctrl->namespaces_rwsem);
+ up_read(&ctrl->namespaces_rwsem);
return 0;
}
@@ -495,43 +489,60 @@ static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
}
DEVICE_ATTR_RO(ana_state);
-static int nvme_set_ns_ana_state(struct nvme_ctrl *ctrl,
+static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
struct nvme_ana_group_desc *desc, void *data)
{
- struct nvme_ns *ns = data;
+ struct nvme_ana_group_desc *dst = data;
- if (ns->ana_grpid == le32_to_cpu(desc->grpid)) {
- nvme_update_ns_ana_state(desc, ns);
- return -ENXIO; /* just break out of the loop */
- }
+ if (desc->grpid != dst->grpid)
+ return 0;
- return 0;
+ *dst = *desc;
+ return -ENXIO; /* just break out of the loop */
}
void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
{
if (nvme_ctrl_use_ana(ns->ctrl)) {
+ struct nvme_ana_group_desc desc = {
+ .grpid = id->anagrpid,
+ .state = 0,
+ };
+
mutex_lock(&ns->ctrl->ana_lock);
ns->ana_grpid = le32_to_cpu(id->anagrpid);
- nvme_parse_ana_log(ns->ctrl, ns, nvme_set_ns_ana_state);
+ nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc);
mutex_unlock(&ns->ctrl->ana_lock);
+ if (desc.state) {
+ /* found the group desc: update */
+ nvme_update_ns_ana_state(&desc, ns);
+ } else {
+ /* group desc not found: trigger a re-read */
+ set_bit(NVME_NS_ANA_PENDING, &ns->flags);
+ queue_work(nvme_wq, &ns->ctrl->ana_work);
+ }
} else {
mutex_lock(&ns->head->lock);
ns->ana_state = NVME_ANA_OPTIMIZED;
nvme_mpath_set_live(ns);
mutex_unlock(&ns->head->lock);
}
+
+ if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) {
+ struct gendisk *disk = ns->head->disk;
+
+ if (disk)
+ disk->queue->backing_dev_info->capabilities |=
+ BDI_CAP_STABLE_WRITES;
+ }
}
void nvme_mpath_remove_disk(struct nvme_ns_head *head)
{
if (!head->disk)
return;
- if (head->disk->flags & GENHD_FL_UP) {
- sysfs_remove_group(&disk_to_dev(head->disk)->kobj,
- &nvme_ns_id_attr_group);
+ if (head->disk->flags & GENHD_FL_UP)
del_gendisk(head->disk);
- }
blk_set_queue_dying(head->disk->queue);
/* make sure all pending bios are cleaned up */
kblockd_schedule_work(&head->requeue_work);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index cc4273f11989..276975506709 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -206,6 +206,7 @@ struct nvme_ctrl {
struct nvme_command ka_cmd;
struct work_struct fw_act_work;
unsigned long events;
+ bool created;
#ifdef CONFIG_NVME_MULTIPATH
/* asymmetric namespace access: */
@@ -463,7 +464,7 @@ int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
void *log, size_t size, u64 offset);
-extern const struct attribute_group nvme_ns_id_attr_group;
+extern const struct attribute_group *nvme_ns_id_attr_groups[];
extern const struct block_device_operations nvme_ns_head_ops;
#ifdef CONFIG_NVME_MULTIPATH
@@ -477,7 +478,7 @@ void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
struct nvme_ctrl *ctrl, int *flags);
-void nvme_failover_req(struct request *req);
+bool nvme_failover_req(struct request *req);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
@@ -503,6 +504,16 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
kblockd_schedule_work(&head->requeue_work);
}
+static inline void nvme_mpath_update_disk_size(struct gendisk *disk)
+{
+ struct block_device *bdev = bdget_disk(disk, 0);
+
+ if (bdev) {
+ bd_set_size(bdev, get_capacity(disk) << SECTOR_SHIFT);
+ bdput(bdev);
+ }
+}
+
extern struct device_attribute dev_attr_ana_grpid;
extern struct device_attribute dev_attr_ana_state;
@@ -521,8 +532,9 @@ static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
}
-static inline void nvme_failover_req(struct request *req)
+static inline bool nvme_failover_req(struct request *req)
{
+ return false;
}
static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
{
@@ -568,14 +580,16 @@ static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
{
}
+static inline void nvme_mpath_update_disk_size(struct gendisk *disk)
+{
+}
#endif /* CONFIG_NVME_MULTIPATH */
#ifdef CONFIG_NVM
void nvme_nvm_update_nvm_info(struct nvme_ns *ns);
int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
void nvme_nvm_unregister(struct nvme_ns *ns);
-int nvme_nvm_register_sysfs(struct nvme_ns *ns);
-void nvme_nvm_unregister_sysfs(struct nvme_ns *ns);
+extern const struct attribute_group nvme_nvm_attr_group;
int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg);
#else
static inline void nvme_nvm_update_nvm_info(struct nvme_ns *ns) {};
@@ -586,11 +600,6 @@ static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
}
static inline void nvme_nvm_unregister(struct nvme_ns *ns) {};
-static inline int nvme_nvm_register_sysfs(struct nvme_ns *ns)
-{
- return 0;
-}
-static inline void nvme_nvm_unregister_sysfs(struct nvme_ns *ns) {};
static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd,
unsigned long arg)
{
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 3c68a5b35ec1..82d87d2e280c 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -276,9 +276,21 @@ static void nvme_dbbuf_init(struct nvme_dev *dev,
nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)];
}
+static void nvme_dbbuf_free(struct nvme_queue *nvmeq)
+{
+ if (!nvmeq->qid)
+ return;
+
+ nvmeq->dbbuf_sq_db = NULL;
+ nvmeq->dbbuf_cq_db = NULL;
+ nvmeq->dbbuf_sq_ei = NULL;
+ nvmeq->dbbuf_cq_ei = NULL;
+}
+
static void nvme_dbbuf_set(struct nvme_dev *dev)
{
struct nvme_command c;
+ unsigned int i;
if (!dev->dbbuf_dbs)
return;
@@ -292,6 +304,9 @@ static void nvme_dbbuf_set(struct nvme_dev *dev)
dev_warn(dev->ctrl.device, "unable to set dbbuf\n");
/* Free memory and continue on */
nvme_dbbuf_dma_free(dev);
+
+ for (i = 1; i <= dev->online_queues; i++)
+ nvme_dbbuf_free(&dev->queues[i]);
}
}
@@ -2718,6 +2733,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */
.driver_data = NVME_QUIRK_LIGHTNVM, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
+ { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
{ 0, }
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 9711bfbdf431..8798274dc3ba 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -118,6 +118,7 @@ struct nvme_rdma_ctrl {
struct sockaddr_storage src_addr;
struct nvme_ctrl ctrl;
+ struct mutex teardown_lock;
bool use_inline_data;
};
@@ -447,7 +448,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
* Spread I/O queues completion vectors according their queue index.
* Admin queues can always go on completion vector 0.
*/
- comp_vector = idx == 0 ? idx : idx - 1;
+ comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors;
/* +1 for ib_stop_cq */
queue->ib_cq = ib_alloc_cq(ibdev, queue,
@@ -643,8 +644,11 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
return ret;
ctrl->ctrl.queue_count = nr_io_queues + 1;
- if (ctrl->ctrl.queue_count < 2)
- return 0;
+ if (ctrl->ctrl.queue_count < 2) {
+ dev_err(ctrl->ctrl.device,
+ "unable to set any I/O queues\n");
+ return -ENOMEM;
+ }
dev_info(ctrl->ctrl.device,
"creating %d I/O queues.\n", nr_io_queues);
@@ -739,6 +743,7 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
}
if (ctrl->async_event_sqe.data) {
+ cancel_work_sync(&ctrl->ctrl.async_event_work);
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
sizeof(struct nvme_command), DMA_TO_DEVICE);
ctrl->async_event_sqe.data = NULL;
@@ -880,6 +885,7 @@ out_free_io_queues:
static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
+ mutex_lock(&ctrl->teardown_lock);
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]);
if (ctrl->ctrl.admin_tagset)
@@ -887,11 +893,13 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
nvme_cancel_request, &ctrl->ctrl);
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
nvme_rdma_destroy_admin_queue(ctrl, remove);
+ mutex_unlock(&ctrl->teardown_lock);
}
static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
+ mutex_lock(&ctrl->teardown_lock);
if (ctrl->ctrl.queue_count > 1) {
nvme_stop_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl);
@@ -902,6 +910,7 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
nvme_start_queues(&ctrl->ctrl);
nvme_rdma_destroy_io_queues(ctrl, remove);
}
+ mutex_unlock(&ctrl->teardown_lock);
}
static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl)
@@ -1634,7 +1643,6 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
complete(&queue->cm_done);
return 0;
case RDMA_CM_EVENT_REJECTED:
- nvme_rdma_destroy_queue_ib(queue);
cm_error = nvme_rdma_conn_rejected(queue, ev);
break;
case RDMA_CM_EVENT_ROUTE_ERROR:
@@ -1955,6 +1963,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
return ERR_PTR(-ENOMEM);
ctrl->ctrl.opts = opts;
INIT_LIST_HEAD(&ctrl->list);
+ mutex_init(&ctrl->teardown_lock);
if (opts->mask & NVMF_OPT_TRSVCID)
port = opts->trsvcid;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 776b7e9e23b9..1a35d73c39c3 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -307,6 +307,9 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
{
+ if (unlikely(ctrl->kato == 0))
+ return;
+
pr_debug("ctrl %d start keep-alive timer for %d secs\n",
ctrl->cntlid, ctrl->kato);
@@ -316,6 +319,9 @@ static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
{
+ if (unlikely(ctrl->kato == 0))
+ return;
+
pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
cancel_delayed_work_sync(&ctrl->ka_work);
@@ -764,9 +770,20 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
{
lockdep_assert_held(&ctrl->lock);
- if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
- nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES ||
- nvmet_cc_mps(ctrl->cc) != 0 ||
+ /*
+ * Only I/O controllers should verify iosqes,iocqes.
+ * Strictly speaking, the spec says a discovery controller
+ * should verify iosqes,iocqes are zeroed, however that
+ * would break backwards compatibility, so don't enforce it.
+ */
+ if (ctrl->subsys->type != NVME_NQN_DISC &&
+ (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
+ nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) {
+ ctrl->csts = NVME_CSTS_CFS;
+ return;
+ }
+
+ if (nvmet_cc_mps(ctrl->cc) != 0 ||
nvmet_cc_ams(ctrl->cc) != 0 ||
nvmet_cc_css(ctrl->cc) != 0) {
ctrl->csts = NVME_CSTS_CFS;
@@ -781,7 +798,8 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
* in case a host died before it enabled the controller. Hence, simply
* reset the keep alive timer when the controller is enabled.
*/
- mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
+ if (ctrl->kato)
+ mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
}
static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 29b4b236afd8..77e4d184bc99 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1986,9 +1986,9 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
return;
if (fcpreq->fcp_error ||
fcpreq->transferred_length != fcpreq->transfer_length) {
- spin_lock(&fod->flock);
+ spin_lock_irqsave(&fod->flock, flags);
fod->abort = true;
- spin_unlock(&fod->flock);
+ spin_unlock_irqrestore(&fod->flock, flags);
nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
return;
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index f0536d341f2f..291f4121f516 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -825,7 +825,6 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
#define FCLOOP_DMABOUND_4G 0xFFFFFFFF
static struct nvme_fc_port_template fctemplate = {
- .module = THIS_MODULE,
.localport_delete = fcloop_localport_delete,
.remoteport_delete = fcloop_remoteport_delete,
.create_queue = fcloop_create_queue,
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 39d972e2595f..ad6263cf7303 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -38,9 +38,11 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
ns->file = filp_open(ns->device_path, flags, 0);
if (IS_ERR(ns->file)) {
- pr_err("failed to open file %s: (%ld)\n",
- ns->device_path, PTR_ERR(ns->file));
- return PTR_ERR(ns->file);
+ ret = PTR_ERR(ns->file);
+ pr_err("failed to open file %s: (%d)\n",
+ ns->device_path, ret);
+ ns->file = NULL;
+ return ret;
}
ret = vfs_getattr(&ns->file->f_path,
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 08f997a390d5..cfd26437aeae 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -83,6 +83,7 @@ enum nvmet_rdma_queue_state {
struct nvmet_rdma_queue {
struct rdma_cm_id *cm_id;
+ struct ib_qp *qp;
struct nvmet_port *port;
struct ib_cq *cq;
atomic_t sq_wr_avail;
@@ -471,7 +472,7 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
if (ndev->srq)
ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL);
else
- ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL);
+ ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL);
if (unlikely(ret))
pr_err("post_recv cmd failed\n");
@@ -510,7 +511,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
if (rsp->n_rdma) {
- rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
+ rdma_rw_ctx_destroy(&rsp->rw, queue->qp,
queue->cm_id->port_num, rsp->req.sg,
rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
}
@@ -594,7 +595,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
WARN_ON(rsp->n_rdma <= 0);
atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
- rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
+ rdma_rw_ctx_destroy(&rsp->rw, queue->qp,
queue->cm_id->port_num, rsp->req.sg,
rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
rsp->n_rdma = 0;
@@ -737,7 +738,7 @@ static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
}
if (nvmet_rdma_need_data_in(rsp)) {
- if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp,
+ if (rdma_rw_ctx_post(&rsp->rw, queue->qp,
queue->cm_id->port_num, &rsp->read_cqe, NULL))
nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
} else {
@@ -1020,6 +1021,7 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
pr_err("failed to create_qp ret= %d\n", ret);
goto err_destroy_cq;
}
+ queue->qp = queue->cm_id->qp;
atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
@@ -1048,11 +1050,10 @@ err_destroy_cq:
static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
{
- struct ib_qp *qp = queue->cm_id->qp;
-
- ib_drain_qp(qp);
- rdma_destroy_id(queue->cm_id);
- ib_destroy_qp(qp);
+ ib_drain_qp(queue->qp);
+ if (queue->cm_id)
+ rdma_destroy_id(queue->cm_id);
+ ib_destroy_qp(queue->qp);
ib_free_cq(queue->cq);
}
@@ -1286,9 +1287,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
if (ret) {
- schedule_work(&queue->release_work);
- /* Destroying rdma_cm id is not needed here */
- return 0;
+ /*
+ * Don't destroy the cm_id in free path, as we implicitly
+ * destroy the cm_id here with non-zero ret code.
+ */
+ queue->cm_id = NULL;
+ goto free_queue;
}
mutex_lock(&nvmet_rdma_queue_mutex);
@@ -1297,6 +1301,8 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
return 0;
+free_queue:
+ nvmet_rdma_free_queue(queue);
put_device:
kref_put(&ndev->ref, nvmet_rdma_free_dev);
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index fbb1f1df6fc7..e62926b295db 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -35,25 +35,11 @@ static int qfprom_reg_read(void *context,
return 0;
}
-static int qfprom_reg_write(void *context,
- unsigned int reg, void *_val, size_t bytes)
-{
- struct qfprom_priv *priv = context;
- u8 *val = _val;
- int i = 0, words = bytes;
-
- while (words--)
- writeb(*val++, priv->base + reg + i++);
-
- return 0;
-}
-
static struct nvmem_config econfig = {
.name = "qfprom",
.stride = 1,
.word_size = 1,
.reg_read = qfprom_reg_read,
- .reg_write = qfprom_reg_write,
};
static int qfprom_probe(struct platform_device *pdev)
diff --git a/drivers/of/address.c b/drivers/of/address.c
index c42aebba35ab..30806dd35735 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -975,11 +975,13 @@ EXPORT_SYMBOL_GPL(of_dma_get_range);
*/
bool of_dma_is_coherent(struct device_node *np)
{
- struct device_node *node = of_node_get(np);
+ struct device_node *node;
if (IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT))
return true;
+ node = of_node_get(np);
+
while (node) {
if (of_property_read_bool(node, "dma-coherent")) {
of_node_put(node);
diff --git a/drivers/of/kobj.c b/drivers/of/kobj.c
index c72eef988041..a32e60b024b8 100644
--- a/drivers/of/kobj.c
+++ b/drivers/of/kobj.c
@@ -134,8 +134,6 @@ int __of_attach_node_sysfs(struct device_node *np)
if (!name)
return -ENOMEM;
- of_node_get(np);
-
rc = kobject_add(&np->kobj, parent, "%s", name);
kfree(name);
if (rc)
@@ -144,6 +142,7 @@ int __of_attach_node_sysfs(struct device_node *np)
for_each_property_of_node(np, pp)
__of_add_property_sysfs(np, pp);
+ of_node_get(np);
return 0;
}
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index af7572fe090f..100adacfdca9 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -267,10 +267,15 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
child, addr);
if (of_mdiobus_child_is_phy(child)) {
+ /* -ENODEV is the return code that PHYLIB has
+ * standardized on to indicate that bus
+ * scanning should continue.
+ */
rc = of_mdiobus_register_phy(mdio, child, addr);
- if (rc && rc != -ENODEV)
+ if (!rc)
+ break;
+ if (rc != -ENODEV)
goto unregister;
- break;
}
}
}
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 895c83e0c7b6..19f95552da4d 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -218,6 +218,16 @@ static int __init __rmem_cmp(const void *a, const void *b)
if (ra->base > rb->base)
return 1;
+ /*
+ * Put the dynamic allocations (address == 0, size == 0) before static
+ * allocations at address 0x0 so that overlap detection works
+ * correctly.
+ */
+ if (ra->size < rb->size)
+ return -1;
+ if (ra->size > rb->size)
+ return 1;
+
return 0;
}
@@ -235,8 +245,7 @@ static void __init __rmem_check_for_overlap(void)
this = &reserved_mem[i];
next = &reserved_mem[i + 1];
- if (!(this->base && next->base))
- continue;
+
if (this->base + this->size > next->base) {
phys_addr_t this_end, next_end;
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 514528b3566f..a77bfeac867d 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -261,6 +261,8 @@ static struct property *dup_and_fixup_symbol_prop(
of_property_set_flag(new_prop, OF_DYNAMIC);
+ kfree(target_path);
+
return new_prop;
err_free_new_prop:
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 808571f7f6ef..29f17c3449aa 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -772,6 +772,10 @@ static void __init of_unittest_changeset(void)
unittest(!of_changeset_revert(&chgset), "revert failed\n");
of_changeset_destroy(&chgset);
+
+ of_node_put(n1);
+ of_node_put(n2);
+ of_node_put(n21);
#endif
}
@@ -1055,10 +1059,13 @@ static void __init of_unittest_platform_populate(void)
of_platform_populate(np, match, NULL, &test_bus->dev);
for_each_child_of_node(np, child) {
- for_each_child_of_node(child, grandchild)
- unittest(of_find_device_by_node(grandchild),
+ for_each_child_of_node(child, grandchild) {
+ pdev = of_find_device_by_node(grandchild);
+ unittest(pdev,
"Could not create device for node '%pOFn'\n",
grandchild);
+ of_dev_put(pdev);
+ }
}
of_platform_depopulate(&test_bus->dev);
@@ -2441,8 +2448,11 @@ static __init void of_unittest_overlay_high_level(void)
goto err_unlock;
}
if (__of_add_property(of_symbols, new_prop)) {
+ kfree(new_prop->name);
+ kfree(new_prop->value);
+ kfree(new_prop);
/* "name" auto-generated by unflatten */
- if (!strcmp(new_prop->name, "name"))
+ if (!strcmp(prop->name, "name"))
continue;
unittest(0, "duplicate property '%s' in overlay_base node __symbols__",
prop->name);
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 6dd1780a5885..0f19cc75cc0c 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -1291,7 +1291,7 @@ sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
** (one that doesn't overlap memory or LMMIO space) in the
** IBASE and IMASK registers.
*/
- ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE);
+ ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1fffffULL;
iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) {
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index a3ad2fe185b9..3c8ffd62dc00 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -204,17 +204,13 @@ EXPORT_SYMBOL(pci_bus_set_ops);
static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait);
static noinline void pci_wait_cfg(struct pci_dev *dev)
+ __must_hold(&pci_lock)
{
- DECLARE_WAITQUEUE(wait, current);
-
- __add_wait_queue(&pci_cfg_wait, &wait);
do {
- set_current_state(TASK_UNINTERRUPTIBLE);
raw_spin_unlock_irq(&pci_lock);
- schedule();
+ wait_event(pci_cfg_wait, !dev->block_cfg_access);
raw_spin_lock_irq(&pci_lock);
} while (dev->block_cfg_access);
- __remove_wait_queue(&pci_cfg_wait, &wait);
}
/* Returns 0 on success, negative values indicate error. */
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 5cb40b2518f9..87a2829dffd4 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -323,12 +323,8 @@ void pci_bus_add_device(struct pci_dev *dev)
dev->match_driver = true;
retval = device_attach(&dev->dev);
- if (retval < 0 && retval != -EPROBE_DEFER) {
+ if (retval < 0 && retval != -EPROBE_DEFER)
pci_warn(dev, "device attach failed (%d)\n", retval);
- pci_proc_detach_device(dev);
- pci_remove_sysfs_dev_files(dev);
- return;
- }
pci_dev_assign_added(dev, true);
}
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 6d4ef0101ef6..be62f654c8eb 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -285,6 +285,8 @@ int dw_pcie_allocate_domains(struct pcie_port *pp)
return -ENOMEM;
}
+ irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
+
pp->msi_domain = pci_msi_create_irq_domain(fwnode,
&dw_pcie_msi_domain_info,
pp->irq_domain);
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index e292801fff7f..33e510393976 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -45,7 +45,13 @@
#define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10
#define PCIE20_PARF_PHY_CTRL 0x40
+#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16)
+#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16)
+
#define PCIE20_PARF_PHY_REFCLK 0x4C
+#define PHY_REFCLK_SSP_EN BIT(16)
+#define PHY_REFCLK_USE_PAD BIT(12)
+
#define PCIE20_PARF_DBI_BASE_ADDR 0x168
#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
@@ -76,6 +82,18 @@
#define DBI_RO_WR_EN 1
#define PERST_DELAY_US 1000
+/* PARF registers */
+#define PCIE20_PARF_PCS_DEEMPH 0x34
+#define PCS_DEEMPH_TX_DEEMPH_GEN1(x) ((x) << 16)
+#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) ((x) << 8)
+#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) ((x) << 0)
+
+#define PCIE20_PARF_PCS_SWING 0x38
+#define PCS_SWING_TX_SWING_FULL(x) ((x) << 8)
+#define PCS_SWING_TX_SWING_LOW(x) ((x) << 0)
+
+#define PCIE20_PARF_CONFIG_BITS 0x50
+#define PHY_RX0_EQ(x) ((x) << 24)
#define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358
#define SLV_ADDR_SPACE_SZ 0x10000000
@@ -90,6 +108,7 @@ struct qcom_pcie_resources_2_1_0 {
struct reset_control *ahb_reset;
struct reset_control *por_reset;
struct reset_control *phy_reset;
+ struct reset_control *ext_reset;
struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
};
@@ -251,6 +270,10 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
if (IS_ERR(res->por_reset))
return PTR_ERR(res->por_reset);
+ res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext");
+ if (IS_ERR(res->ext_reset))
+ return PTR_ERR(res->ext_reset);
+
res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
return PTR_ERR_OR_ZERO(res->phy_reset);
}
@@ -263,6 +286,7 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
reset_control_assert(res->axi_reset);
reset_control_assert(res->ahb_reset);
reset_control_assert(res->por_reset);
+ reset_control_assert(res->ext_reset);
reset_control_assert(res->pci_reset);
clk_disable_unprepare(res->iface_clk);
clk_disable_unprepare(res->core_clk);
@@ -275,6 +299,7 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
+ struct device_node *node = dev->of_node;
u32 val;
int ret;
@@ -314,14 +339,42 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
goto err_deassert_ahb;
}
+ ret = reset_control_deassert(res->ext_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert ext reset\n");
+ goto err_deassert_ahb;
+ }
+
/* enable PCIe clocks and resets */
val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
val &= ~BIT(0);
writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+ if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
+ writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
+ PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
+ PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
+ pcie->parf + PCIE20_PARF_PCS_DEEMPH);
+ writel(PCS_SWING_TX_SWING_FULL(120) |
+ PCS_SWING_TX_SWING_LOW(120),
+ pcie->parf + PCIE20_PARF_PCS_SWING);
+ writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS);
+ }
+
+ if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
+ /* set TX termination offset */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
+ val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+ }
+
/* enable external reference clock */
val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
- val |= BIT(16);
+ /* USE_PAD is required only for ipq806x */
+ if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))
+ val &= ~PHY_REFCLK_USE_PAD;
+ val |= PHY_REFCLK_SSP_EN;
writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
ret = reset_control_deassert(res->phy_reset);
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 6b4555ff2548..0235b6e7dcd1 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -321,10 +321,6 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
advk_pcie_wait_for_link(pcie);
- reg = PCIE_CORE_LINK_L0S_ENTRY |
- (1 << PCIE_CORE_LINK_WIDTH_SHIFT);
- advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG);
-
reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
reg |= PCIE_CORE_CMD_MEM_ACCESS_EN |
PCIE_CORE_CMD_IO_ACCESS_EN |
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 6f86583605a4..097c02197ec8 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -2400,7 +2400,7 @@ static int tegra_pcie_probe(struct platform_device *pdev)
err = pm_runtime_get_sync(pcie->dev);
if (err < 0) {
dev_err(dev, "fail to enable pcie controller: %d\n", err);
- goto teardown_msi;
+ goto pm_runtime_put;
}
err = tegra_pcie_request_resources(pcie);
@@ -2440,7 +2440,6 @@ free_resources:
pm_runtime_put:
pm_runtime_put_sync(pcie->dev);
pm_runtime_disable(pcie->dev);
-teardown_msi:
tegra_pcie_msi_teardown(pcie);
put_resources:
tegra_pcie_put_resources(pcie);
diff --git a/drivers/pci/controller/pci-thunder-ecam.c b/drivers/pci/controller/pci-thunder-ecam.c
index 32d1d7b81ef4..18715d2ce022 100644
--- a/drivers/pci/controller/pci-thunder-ecam.c
+++ b/drivers/pci/controller/pci-thunder-ecam.c
@@ -116,7 +116,7 @@ static int thunder_ecam_p2_config_read(struct pci_bus *bus, unsigned int devfn,
* the config space access window. Since we are working with
* the high-order 32 bits, shift everything down by 32 bits.
*/
- node_bits = (cfg->res.start >> 32) & (1 << 12);
+ node_bits = upper_32_bits(cfg->res.start) & (1 << 12);
v |= node_bits;
set_val(v, where, size, val);
diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c
index f127ce8bd4ef..1650ec2c35f9 100644
--- a/drivers/pci/controller/pci-thunder-pem.c
+++ b/drivers/pci/controller/pci-thunder-pem.c
@@ -11,6 +11,7 @@
#include <linux/pci-acpi.h>
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include "../pci.h"
#if defined(CONFIG_PCI_HOST_THUNDER_PEM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
@@ -314,9 +315,9 @@ static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
* structure here for the BAR.
*/
bar4_start = res_pem->start + 0xf00000;
- pem_pci->ea_entry[0] = (u32)bar4_start | 2;
- pem_pci->ea_entry[1] = (u32)(res_pem->end - bar4_start) & ~3u;
- pem_pci->ea_entry[2] = (u32)(bar4_start >> 32);
+ pem_pci->ea_entry[0] = lower_32_bits(bar4_start) | 2;
+ pem_pci->ea_entry[1] = lower_32_bits(res_pem->end - bar4_start) & ~3u;
+ pem_pci->ea_entry[2] = upper_32_bits(bar4_start);
cfg->priv = pem_pci;
return 0;
@@ -324,9 +325,9 @@ static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
-#define PEM_RES_BASE 0x87e0c0000000UL
-#define PEM_NODE_MASK GENMASK(45, 44)
-#define PEM_INDX_MASK GENMASK(26, 24)
+#define PEM_RES_BASE 0x87e0c0000000ULL
+#define PEM_NODE_MASK GENMASK_ULL(45, 44)
+#define PEM_INDX_MASK GENMASK_ULL(26, 24)
#define PEM_MIN_DOM_IN_NODE 4
#define PEM_MAX_DOM_IN_NODE 10
diff --git a/drivers/pci/controller/pci-v3-semi.c b/drivers/pci/controller/pci-v3-semi.c
index d219404bad92..9a86bb7448ac 100644
--- a/drivers/pci/controller/pci-v3-semi.c
+++ b/drivers/pci/controller/pci-v3-semi.c
@@ -743,7 +743,7 @@ static int v3_pci_probe(struct platform_device *pdev)
int ret;
LIST_HEAD(res);
- host = pci_alloc_host_bridge(sizeof(*v3));
+ host = devm_pci_alloc_host_bridge(dev, sizeof(*v3));
if (!host)
return -ENOMEM;
diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c
index f4c02da84e59..0bfa5065b440 100644
--- a/drivers/pci/controller/pci-xgene-msi.c
+++ b/drivers/pci/controller/pci-xgene-msi.c
@@ -384,13 +384,9 @@ static int xgene_msi_hwirq_alloc(unsigned int cpu)
if (!msi_group->gic_irq)
continue;
- irq_set_chained_handler(msi_group->gic_irq,
- xgene_msi_isr);
- err = irq_set_handler_data(msi_group->gic_irq, msi_group);
- if (err) {
- pr_err("failed to register GIC IRQ handler\n");
- return -EINVAL;
- }
+ irq_set_chained_handler_and_data(msi_group->gic_irq,
+ xgene_msi_isr, msi_group);
+
/*
* Statically allocate MSI GIC IRQs to each CPU core.
* With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated
diff --git a/drivers/pci/controller/pcie-cadence-host.c b/drivers/pci/controller/pcie-cadence-host.c
index ec394f6a19c8..ae7affcb1a81 100644
--- a/drivers/pci/controller/pcie-cadence-host.c
+++ b/drivers/pci/controller/pcie-cadence-host.c
@@ -102,6 +102,7 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
{
struct cdns_pcie *pcie = &rc->pcie;
u32 value, ctrl;
+ u32 id;
/*
* Set the root complex BAR configuration register:
@@ -121,8 +122,12 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
/* Set root port configuration space */
- if (rc->vendor_id != 0xffff)
- cdns_pcie_rp_writew(pcie, PCI_VENDOR_ID, rc->vendor_id);
+ if (rc->vendor_id != 0xffff) {
+ id = CDNS_PCIE_LM_ID_VENDOR(rc->vendor_id) |
+ CDNS_PCIE_LM_ID_SUBSYS(rc->vendor_id);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id);
+ }
+
if (rc->device_id != 0xffff)
cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id);
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
index 9deb56989d72..dc953c73cb56 100644
--- a/drivers/pci/controller/pcie-iproc-msi.c
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -209,15 +209,20 @@ static int iproc_msi_irq_set_affinity(struct irq_data *data,
struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
int target_cpu = cpumask_first(mask);
int curr_cpu;
+ int ret;
curr_cpu = hwirq_to_cpu(msi, data->hwirq);
if (curr_cpu == target_cpu)
- return IRQ_SET_MASK_OK_DONE;
+ ret = IRQ_SET_MASK_OK_DONE;
+ else {
+ /* steer MSI to the target CPU */
+ data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu;
+ ret = IRQ_SET_MASK_OK;
+ }
- /* steer MSI to the target CPU */
- data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu;
+ irq_data_update_effective_affinity(data, cpumask_of(target_cpu));
- return IRQ_SET_MASK_OK;
+ return ret;
}
static void iproc_msi_irq_compose_msi_msg(struct irq_data *data,
@@ -266,7 +271,7 @@ static int iproc_msi_irq_domain_alloc(struct irq_domain *domain,
NULL, NULL);
}
- return hwirq;
+ return 0;
}
static void iproc_msi_irq_domain_free(struct irq_domain *domain,
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index ec86414216f9..f2d79e0235bc 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -300,7 +300,7 @@ enum iproc_pcie_reg {
};
/* iProc PCIe PAXB BCMA registers */
-static const u16 iproc_pcie_reg_paxb_bcma[] = {
+static const u16 iproc_pcie_reg_paxb_bcma[IPROC_PCIE_MAX_NUM_REG] = {
[IPROC_PCIE_CLK_CTRL] = 0x000,
[IPROC_PCIE_CFG_IND_ADDR] = 0x120,
[IPROC_PCIE_CFG_IND_DATA] = 0x124,
@@ -311,7 +311,7 @@ static const u16 iproc_pcie_reg_paxb_bcma[] = {
};
/* iProc PCIe PAXB registers */
-static const u16 iproc_pcie_reg_paxb[] = {
+static const u16 iproc_pcie_reg_paxb[IPROC_PCIE_MAX_NUM_REG] = {
[IPROC_PCIE_CLK_CTRL] = 0x000,
[IPROC_PCIE_CFG_IND_ADDR] = 0x120,
[IPROC_PCIE_CFG_IND_DATA] = 0x124,
@@ -327,7 +327,7 @@ static const u16 iproc_pcie_reg_paxb[] = {
};
/* iProc PCIe PAXB v2 registers */
-static const u16 iproc_pcie_reg_paxb_v2[] = {
+static const u16 iproc_pcie_reg_paxb_v2[IPROC_PCIE_MAX_NUM_REG] = {
[IPROC_PCIE_CLK_CTRL] = 0x000,
[IPROC_PCIE_CFG_IND_ADDR] = 0x120,
[IPROC_PCIE_CFG_IND_DATA] = 0x124,
@@ -355,7 +355,7 @@ static const u16 iproc_pcie_reg_paxb_v2[] = {
};
/* iProc PCIe PAXC v1 registers */
-static const u16 iproc_pcie_reg_paxc[] = {
+static const u16 iproc_pcie_reg_paxc[IPROC_PCIE_MAX_NUM_REG] = {
[IPROC_PCIE_CLK_CTRL] = 0x000,
[IPROC_PCIE_CFG_IND_ADDR] = 0x1f0,
[IPROC_PCIE_CFG_IND_DATA] = 0x1f4,
@@ -364,7 +364,7 @@ static const u16 iproc_pcie_reg_paxc[] = {
};
/* iProc PCIe PAXC v2 registers */
-static const u16 iproc_pcie_reg_paxc_v2[] = {
+static const u16 iproc_pcie_reg_paxc_v2[IPROC_PCIE_MAX_NUM_REG] = {
[IPROC_PCIE_MSI_GIC_MODE] = 0x050,
[IPROC_PCIE_MSI_BASE_ADDR] = 0x074,
[IPROC_PCIE_MSI_WINDOW_SIZE] = 0x078,
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 1bfbceb9f445..066e9e00de11 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -72,6 +72,7 @@
#define PCIE_MSI_VECTOR 0x0c0
#define PCIE_CONF_VEND_ID 0x100
+#define PCIE_CONF_DEVICE_ID 0x102
#define PCIE_CONF_CLASS_ID 0x106
#define PCIE_INT_MASK 0x420
@@ -134,12 +135,16 @@ struct mtk_pcie_port;
/**
* struct mtk_pcie_soc - differentiate between host generations
* @need_fix_class_id: whether this host's class ID needed to be fixed or not
+ * @need_fix_device_id: whether this host's device ID needed to be fixed or not
+ * @device_id: device ID which this host need to be fixed
* @ops: pointer to configuration access functions
* @startup: pointer to controller setting functions
* @setup_irq: pointer to initialize IRQ functions
*/
struct mtk_pcie_soc {
bool need_fix_class_id;
+ bool need_fix_device_id;
+ unsigned int device_id;
struct pci_ops *ops;
int (*startup)(struct mtk_pcie_port *port);
int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node);
@@ -678,6 +683,9 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
writew(val, port->base + PCIE_CONF_CLASS_ID);
}
+ if (soc->need_fix_device_id)
+ writew(soc->device_id, port->base + PCIE_CONF_DEVICE_ID);
+
/* 100ms timeout value should be enough for Gen1/2 training */
err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
!!(val & PCIE_PORT_LINKUP_V2), 20,
@@ -1081,14 +1089,14 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)
err = of_pci_get_devfn(child);
if (err < 0) {
dev_err(dev, "failed to parse devfn: %d\n", err);
- return err;
+ goto error_put_node;
}
slot = PCI_SLOT(err);
err = mtk_pcie_parse_port(pcie, child, slot);
if (err)
- return err;
+ goto error_put_node;
}
err = mtk_pcie_subsys_powerup(pcie);
@@ -1104,6 +1112,9 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)
mtk_pcie_subsys_powerdown(pcie);
return 0;
+error_put_node:
+ of_node_put(child);
+ return err;
}
static int mtk_pcie_request_resources(struct mtk_pcie *pcie)
@@ -1213,11 +1224,21 @@ static const struct mtk_pcie_soc mtk_pcie_soc_mt7622 = {
.setup_irq = mtk_pcie_setup_irq,
};
+static const struct mtk_pcie_soc mtk_pcie_soc_mt7629 = {
+ .need_fix_class_id = true,
+ .need_fix_device_id = true,
+ .device_id = PCI_DEVICE_ID_MEDIATEK_7629,
+ .ops = &mtk_pcie_ops_v2,
+ .startup = mtk_pcie_startup_port_v2,
+ .setup_irq = mtk_pcie_setup_irq,
+};
+
static const struct of_device_id mtk_pcie_ids[] = {
{ .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 },
{ .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 },
{ .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 },
{ .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_mt7622 },
+ { .compatible = "mediatek,mt7629-pcie", .data = &mtk_pcie_soc_mt7629 },
{},
};
diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c
index 333ab6092f17..00296c5eacb9 100644
--- a/drivers/pci/controller/pcie-rcar.c
+++ b/drivers/pci/controller/pcie-rcar.c
@@ -335,11 +335,12 @@ static struct pci_ops rcar_pcie_ops = {
};
static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie,
- struct resource *res)
+ struct resource_entry *window)
{
/* Setup PCIe address space mappings for each resource */
resource_size_t size;
resource_size_t res_start;
+ struct resource *res = window->res;
u32 mask;
rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win));
@@ -353,9 +354,9 @@ static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie,
rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win));
if (res->flags & IORESOURCE_IO)
- res_start = pci_pio_to_address(res->start);
+ res_start = pci_pio_to_address(res->start) - window->offset;
else
- res_start = res->start;
+ res_start = res->start - window->offset;
rcar_pci_write_reg(pcie, upper_32_bits(res_start), PCIEPAUR(win));
rcar_pci_write_reg(pcie, lower_32_bits(res_start) & ~0x7F,
@@ -384,7 +385,7 @@ static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci)
switch (resource_type(res)) {
case IORESOURCE_IO:
case IORESOURCE_MEM:
- rcar_pcie_setup_window(i, pci, res);
+ rcar_pcie_setup_window(i, pci, win);
i++;
break;
case IORESOURCE_BUS:
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index ab36e5ca1aca..3d1b004a58f8 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -617,9 +617,11 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
if (!membar2)
return -ENOMEM;
offset[0] = vmd->dev->resource[VMD_MEMBAR1].start -
- readq(membar2 + MB2_SHADOW_OFFSET);
+ (readq(membar2 + MB2_SHADOW_OFFSET) &
+ PCI_BASE_ADDRESS_MEM_MASK);
offset[1] = vmd->dev->resource[VMD_MEMBAR2].start -
- readq(membar2 + MB2_SHADOW_OFFSET + 8);
+ (readq(membar2 + MB2_SHADOW_OFFSET + 8) &
+ PCI_BASE_ADDRESS_MEM_MASK);
pci_iounmap(vmd->dev, membar2);
}
}
@@ -702,9 +704,10 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info,
x86_vector_domain);
- irq_domain_free_fwnode(fn);
- if (!vmd->irq_domain)
+ if (!vmd->irq_domain) {
+ irq_domain_free_fwnode(fn);
return -ENODEV;
+ }
pci_add_resource(&resources, &vmd->resources[0]);
pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
@@ -715,6 +718,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
if (!vmd->bus) {
pci_free_resource_list(&resources);
irq_domain_remove(vmd->irq_domain);
+ irq_domain_free_fwnode(fn);
return -ENODEV;
}
@@ -817,6 +821,7 @@ static void vmd_cleanup_srcu(struct vmd_dev *vmd)
static void vmd_remove(struct pci_dev *dev)
{
struct vmd_dev *vmd = pci_get_drvdata(dev);
+ struct fwnode_handle *fn = vmd->irq_domain->fwnode;
sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
pci_stop_root_bus(vmd->bus);
@@ -825,6 +830,7 @@ static void vmd_remove(struct pci_dev *dev)
vmd_teardown_dma_ops(vmd);
vmd_detach_resources(vmd);
irq_domain_remove(vmd->irq_domain);
+ irq_domain_free_fwnode(fn);
}
#ifdef CONFIG_PM_SLEEP
@@ -866,6 +872,8 @@ static const struct pci_device_id vmd_ids[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
VMD_FEAT_HAS_BUS_RESTRICTIONS,},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
+ .driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,},
{0,}
};
MODULE_DEVICE_TABLE(pci, vmd_ids);
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 4bbd26e8a9e2..09a1e449cd1c 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -572,6 +572,7 @@ static int __init pci_epf_test_init(void)
WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
ret = pci_epf_register_driver(&test_driver);
if (ret) {
+ destroy_workqueue(kpcitest_workqueue);
pr_err("Failed to register pci epf test driver --> %d\n", ret);
return ret;
}
@@ -582,6 +583,8 @@ module_init(pci_epf_test_init);
static void __exit pci_epf_test_exit(void)
{
+ if (kpcitest_workqueue)
+ destroy_workqueue(kpcitest_workqueue);
pci_epf_unregister_driver(&test_driver);
}
module_exit(pci_epf_test_exit);
diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c
index 2bf8bd1f0563..0471643cf536 100644
--- a/drivers/pci/endpoint/pci-epc-mem.c
+++ b/drivers/pci/endpoint/pci-epc-mem.c
@@ -79,6 +79,7 @@ int __pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_base, size_t size,
mem->page_size = page_size;
mem->pages = pages;
mem->size = size;
+ mutex_init(&mem->lock);
epc->mem = mem;
@@ -122,7 +123,7 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
phys_addr_t *phys_addr, size_t size)
{
int pageno;
- void __iomem *virt_addr;
+ void __iomem *virt_addr = NULL;
struct pci_epc_mem *mem = epc->mem;
unsigned int page_shift = ilog2(mem->page_size);
int order;
@@ -130,15 +131,18 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
size = ALIGN(size, mem->page_size);
order = pci_epc_mem_get_order(mem, size);
+ mutex_lock(&mem->lock);
pageno = bitmap_find_free_region(mem->bitmap, mem->pages, order);
if (pageno < 0)
- return NULL;
+ goto ret;
*phys_addr = mem->phys_base + (pageno << page_shift);
virt_addr = ioremap(*phys_addr, size);
if (!virt_addr)
bitmap_release_region(mem->bitmap, pageno, order);
+ret:
+ mutex_unlock(&mem->lock);
return virt_addr;
}
EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr);
@@ -164,7 +168,9 @@ void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr,
pageno = (phys_addr - mem->phys_base) >> page_shift;
size = ALIGN(size, mem->page_size);
order = pci_epc_mem_get_order(mem, size);
+ mutex_lock(&mem->lock);
bitmap_release_region(mem->bitmap, pageno, order);
+ mutex_unlock(&mem->lock);
}
EXPORT_SYMBOL_GPL(pci_epc_mem_free_addr);
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index c94c13525447..3d8844e7090a 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -122,13 +122,21 @@ static struct acpiphp_context *acpiphp_grab_context(struct acpi_device *adev)
struct acpiphp_context *context;
acpi_lock_hp_context();
+
context = acpiphp_get_context(adev);
- if (!context || context->func.parent->is_going_away) {
- acpi_unlock_hp_context();
- return NULL;
+ if (!context)
+ goto unlock;
+
+ if (context->func.parent->is_going_away) {
+ acpiphp_put_context(context);
+ context = NULL;
+ goto unlock;
}
+
get_bridge(context->func.parent);
acpiphp_put_context(context);
+
+unlock:
acpi_unlock_hp_context();
return context;
}
@@ -532,6 +540,7 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
slot->flags &= ~SLOT_ENABLED;
continue;
}
+ pci_dev_put(dev);
}
}
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index c3e3f5358b3b..005817e40ad3 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -530,7 +530,7 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
struct controller *ctrl = (struct controller *)dev_id;
struct pci_dev *pdev = ctrl_dev(ctrl);
struct device *parent = pdev->dev.parent;
- u16 status, events;
+ u16 status, events = 0;
/*
* Interrupts only occur in D3hot or shallower (PCIe r4.0, sec 6.7.3.4).
@@ -553,6 +553,7 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
}
}
+read_status:
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status);
if (status == (u16) ~0) {
ctrl_info(ctrl, "%s: no response from device\n", __func__);
@@ -565,24 +566,37 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
* Slot Status contains plain status bits as well as event
* notification bits; right now we only want the event bits.
*/
- events = status & (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
- PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
- PCI_EXP_SLTSTA_DLLSC);
+ status &= PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
+ PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
+ PCI_EXP_SLTSTA_DLLSC;
/*
* If we've already reported a power fault, don't report it again
* until we've done something to handle it.
*/
if (ctrl->power_fault_detected)
- events &= ~PCI_EXP_SLTSTA_PFD;
+ status &= ~PCI_EXP_SLTSTA_PFD;
+ events |= status;
if (!events) {
if (parent)
pm_runtime_put(parent);
return IRQ_NONE;
}
- pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
+ if (status) {
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
+
+ /*
+ * In MSI mode, all event bits must be zero before the port
+ * will send a new interrupt (PCIe Base Spec r5.0 sec 6.7.3.4).
+ * So re-read the Slot Status register in case a bit was set
+ * between read and write.
+ */
+ if (pci_dev_msi_enabled(pdev) && !pciehp_poll_mode)
+ goto read_status;
+ }
+
ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events);
if (parent)
pm_runtime_put(parent);
@@ -627,17 +641,15 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
if (atomic_fetch_and(~RERUN_ISR, &ctrl->pending_events) & RERUN_ISR) {
ret = pciehp_isr(irq, dev_id);
enable_irq(irq);
- if (ret != IRQ_WAKE_THREAD) {
- pci_config_pm_runtime_put(pdev);
- return ret;
- }
+ if (ret != IRQ_WAKE_THREAD)
+ goto out;
}
synchronize_hardirq(irq);
events = atomic_xchg(&ctrl->pending_events, 0);
if (!events) {
- pci_config_pm_runtime_put(pdev);
- return IRQ_NONE;
+ ret = IRQ_NONE;
+ goto out;
}
/* Check Attention Button Pressed */
@@ -666,10 +678,12 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
pciehp_handle_presence_or_link_change(slot, events);
up_read(&ctrl->reset_lock);
+ ret = IRQ_HANDLED;
+out:
pci_config_pm_runtime_put(pdev);
ctrl->ist_running = false;
wake_up(&ctrl->requester);
- return IRQ_HANDLED;
+ return ret;
}
static int pciehp_poll(void *data)
diff --git a/drivers/pci/hotplug/rpadlpar_sysfs.c b/drivers/pci/hotplug/rpadlpar_sysfs.c
index cdbfa5df3a51..dbfa0b55d31a 100644
--- a/drivers/pci/hotplug/rpadlpar_sysfs.c
+++ b/drivers/pci/hotplug/rpadlpar_sysfs.c
@@ -34,12 +34,11 @@ static ssize_t add_slot_store(struct kobject *kobj, struct kobj_attribute *attr,
if (nbytes >= MAX_DRC_NAME_LEN)
return 0;
- memcpy(drc_name, buf, nbytes);
+ strscpy(drc_name, buf, nbytes + 1);
end = strchr(drc_name, '\n');
- if (!end)
- end = &drc_name[nbytes];
- *end = '\0';
+ if (end)
+ *end = '\0';
rc = dlpar_add_slot(drc_name);
if (rc)
@@ -65,12 +64,11 @@ static ssize_t remove_slot_store(struct kobject *kobj,
if (nbytes >= MAX_DRC_NAME_LEN)
return 0;
- memcpy(drc_name, buf, nbytes);
+ strscpy(drc_name, buf, nbytes + 1);
end = strchr(drc_name, '\n');
- if (!end)
- end = &drc_name[nbytes];
- *end = '\0';
+ if (end)
+ *end = '\0';
rc = dlpar_remove_slot(drc_name);
if (rc)
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index f7218c1673ce..2c46f7dcd2f5 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -587,7 +587,7 @@ static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable)
{
while (bus->parent) {
if (acpi_pm_device_can_wakeup(&bus->self->dev))
- return acpi_pm_set_bridge_wakeup(&bus->self->dev, enable);
+ return acpi_pm_set_device_wakeup(&bus->self->dev, enable);
bus = bus->parent;
}
@@ -595,7 +595,7 @@ static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable)
/* We have reached the root bus. */
if (bus->bridge) {
if (acpi_pm_device_can_wakeup(bus->bridge))
- return acpi_pm_set_bridge_wakeup(bus->bridge, enable);
+ return acpi_pm_set_device_wakeup(bus->bridge, enable);
}
return 0;
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 57a87a001b4f..3d59bbe4a5d5 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1585,20 +1585,10 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
int err;
int i, bars = 0;
- /*
- * Power state could be unknown at this point, either due to a fresh
- * boot or a device removal call. So get the current power state
- * so that things like MSI message writing will behave as expected
- * (e.g. if the device really is in D0 at enable time).
- */
- if (dev->pm_cap) {
- u16 pmcsr;
- pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
- dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
- }
-
- if (atomic_inc_return(&dev->enable_cnt) > 1)
+ if (atomic_inc_return(&dev->enable_cnt) > 1) {
+ pci_update_current_state(dev, dev->current_state);
return 0; /* already enabled */
+ }
bridge = pci_upstream_bridge(dev);
if (bridge)
@@ -3361,7 +3351,14 @@ u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
return 0;
pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
- return (cap & PCI_REBAR_CAP_SIZES) >> 4;
+ cap &= PCI_REBAR_CAP_SIZES;
+
+ /* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
+ if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
+ bar == 0 && cap == 0x7000)
+ cap = 0x3f000;
+
+ return cap >> 4;
}
/**
@@ -3810,6 +3807,10 @@ int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
ret = logic_pio_register_range(range);
if (ret)
kfree(range);
+
+ /* Ignore duplicates due to deferred probing */
+ if (ret == -EEXIST)
+ ret = 0;
#endif
return ret;
@@ -5840,19 +5841,21 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
while (*p) {
count = 0;
if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
- p[count] == '@') {
+ p[count] == '@') {
p += count + 1;
+ if (align_order > 63) {
+ pr_err("PCI: Invalid requested alignment (order %d)\n",
+ align_order);
+ align_order = PAGE_SHIFT;
+ }
} else {
- align_order = -1;
+ align_order = PAGE_SHIFT;
}
ret = pci_dev_str_match(dev, p, &p);
if (ret == 1) {
*resize = true;
- if (align_order == -1)
- align = PAGE_SIZE;
- else
- align = 1 << align_order;
+ align = 1ULL << align_order;
break;
} else if (ret < 0) {
pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index e9ede82ee2c2..39725b71300f 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -473,6 +473,12 @@ static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe)
#if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
struct resource *res);
+#else
+static inline int acpi_get_rc_resources(struct device *dev, const char *hid,
+ u16 segment, struct resource *res)
+{
+ return -ENODEV;
+}
#endif
u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 1117b25fbe0b..279f9f0197b0 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -67,6 +67,7 @@ struct pcie_link_state {
u32 clkpm_capable:1; /* Clock PM capable? */
u32 clkpm_enabled:1; /* Current Clock PM state */
u32 clkpm_default:1; /* Default Clock PM state by BIOS */
+ u32 clkpm_disable:1; /* Clock PM disabled */
/* Exit latencies */
struct aspm_latency latency_up; /* Upstream direction exit latency */
@@ -164,8 +165,11 @@ static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
{
- /* Don't enable Clock PM if the link is not Clock PM capable */
- if (!link->clkpm_capable)
+ /*
+ * Don't enable Clock PM if the link is not Clock PM capable
+ * or Clock PM is disabled
+ */
+ if (!link->clkpm_capable || link->clkpm_disable)
enable = 0;
/* Need nothing if the specified equals to current state */
if (link->clkpm_enabled == enable)
@@ -195,7 +199,8 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
}
link->clkpm_enabled = enabled;
link->clkpm_default = enabled;
- link->clkpm_capable = (blacklist) ? 0 : capable;
+ link->clkpm_capable = capable;
+ link->clkpm_disable = blacklist ? 1 : 0;
}
static bool pcie_retrain_link(struct pcie_link_state *link)
@@ -628,16 +633,6 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
/* Setup initial capable state. Will be updated later */
link->aspm_capable = link->aspm_support;
- /*
- * If the downstream component has pci bridge function, don't
- * do ASPM for now.
- */
- list_for_each_entry(child, &linkbus->devices, bus_list) {
- if (pci_pcie_type(child) == PCI_EXP_TYPE_PCI_BRIDGE) {
- link->aspm_disable = ASPM_STATE_ALL;
- break;
- }
- }
/* Get and check endpoint acceptable latencies */
list_for_each_entry(child, &linkbus->devices, bus_list) {
@@ -747,9 +742,9 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
/* Enable what we need to enable */
pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
- PCI_L1SS_CAP_L1_PM_SS, val);
+ PCI_L1SS_CTL1_L1SS_MASK, val);
pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1,
- PCI_L1SS_CAP_L1_PM_SS, val);
+ PCI_L1SS_CTL1_L1SS_MASK, val);
}
static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
@@ -1106,10 +1101,9 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
link->aspm_disable |= ASPM_STATE_L1;
pcie_config_aspm_link(link, policy_to_aspm_state(link));
- if (state & PCIE_LINK_STATE_CLKPM) {
- link->clkpm_capable = 0;
- pcie_set_clkpm(link, 0);
- }
+ if (state & PCIE_LINK_STATE_CLKPM)
+ link->clkpm_disable = 1;
+ pcie_set_clkpm(link, policy_to_clkpm_state(link));
mutex_unlock(&aspm_lock);
if (sem)
up_read(&pci_bus_sem);
@@ -1170,6 +1164,7 @@ static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp)
cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]);
else
cnt += sprintf(buffer + cnt, "%s ", policy_str[i]);
+ cnt += sprintf(buffer + cnt, "\n");
return cnt;
}
diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c
index 9361f3aa26ab..357a454cafa0 100644
--- a/drivers/pci/pcie/ptm.c
+++ b/drivers/pci/pcie/ptm.c
@@ -39,10 +39,6 @@ void pci_ptm_init(struct pci_dev *dev)
if (!pci_is_pcie(dev))
return;
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
- if (!pos)
- return;
-
/*
* Enable PTM only on interior devices (root ports, switch ports,
* etc.) on the assumption that it causes no link traffic until an
@@ -52,6 +48,23 @@ void pci_ptm_init(struct pci_dev *dev)
pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END))
return;
+ /*
+ * Switch Downstream Ports are not permitted to have a PTM
+ * capability; their PTM behavior is controlled by the Upstream
+ * Port (PCIe r5.0, sec 7.9.16).
+ */
+ ups = pci_upstream_bridge(dev);
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM &&
+ ups && ups->ptm_enabled) {
+ dev->ptm_granularity = ups->ptm_granularity;
+ dev->ptm_enabled = 1;
+ return;
+ }
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
+ if (!pos)
+ return;
+
pci_read_config_dword(dev, pos + PCI_PTM_CAP, &cap);
local_clock = (cap & PCI_PTM_GRANULARITY_MASK) >> 8;
@@ -61,7 +74,6 @@ void pci_ptm_init(struct pci_dev *dev)
* the spec recommendation (PCIe r3.1, sec 7.32.3), select the
* furthest upstream Time Source as the PTM Root.
*/
- ups = pci_upstream_bridge(dev);
if (ups && ups->ptm_enabled) {
ctrl = PCI_PTM_CTRL_ENABLE;
if (ups->ptm_granularity == 0)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index fa4c386c8cd8..113b7bdf86dd 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -348,6 +348,57 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
}
}
+static void pci_read_bridge_windows(struct pci_dev *bridge)
+{
+ u16 io;
+ u32 pmem, tmp;
+
+ pci_read_config_word(bridge, PCI_IO_BASE, &io);
+ if (!io) {
+ pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0);
+ pci_read_config_word(bridge, PCI_IO_BASE, &io);
+ pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
+ }
+ if (io)
+ bridge->io_window = 1;
+
+ /*
+ * DECchip 21050 pass 2 errata: the bridge may miss an address
+ * disconnect boundary by one PCI data phase. Workaround: do not
+ * use prefetching on this device.
+ */
+ if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001)
+ return;
+
+ pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
+ if (!pmem) {
+ pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE,
+ 0xffe0fff0);
+ pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
+ pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
+ }
+ if (!pmem)
+ return;
+
+ bridge->pref_window = 1;
+
+ if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
+
+ /*
+ * Bridge claims to have a 64-bit prefetchable memory
+ * window; verify that the upper bits are actually
+ * writable.
+ */
+ pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &pmem);
+ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
+ 0xffffffff);
+ pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
+ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, pmem);
+ if (tmp)
+ bridge->pref_64_window = 1;
+ }
+}
+
static void pci_read_bridge_io(struct pci_bus *child)
{
struct pci_dev *dev = child->self;
@@ -818,9 +869,10 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
goto free;
err = device_register(&bridge->dev);
- if (err)
+ if (err) {
put_device(&bridge->dev);
-
+ goto free;
+ }
bus->bridge = get_device(&bridge->dev);
device_enable_async_suspend(bus->bridge);
pci_set_bus_of_node(bus);
@@ -1634,7 +1686,7 @@ int pci_setup_device(struct pci_dev *dev)
/* Device class may be changed after fixup */
class = dev->class >> 8;
- if (dev->non_compliant_bars) {
+ if (dev->non_compliant_bars && !dev->mmio_always_on) {
pci_read_config_word(dev, PCI_COMMAND, &cmd);
if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
pci_info(dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
@@ -1711,6 +1763,7 @@ int pci_setup_device(struct pci_dev *dev)
pci_read_irq(dev);
dev->transparent = ((dev->class & 0xff) == 1);
pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
+ pci_read_bridge_windows(dev);
set_pcie_hotplug_bridge(dev);
pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
if (pos) {
@@ -1748,13 +1801,33 @@ static void pci_configure_mps(struct pci_dev *dev)
struct pci_dev *bridge = pci_upstream_bridge(dev);
int mps, mpss, p_mps, rc;
- if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
+ if (!pci_is_pcie(dev))
return;
/* MPS and MRRS fields are of type 'RsvdP' for VFs, short-circuit out */
if (dev->is_virtfn)
return;
+ /*
+ * For Root Complex Integrated Endpoints, program the maximum
+ * supported value unless limited by the PCIE_BUS_PEER2PEER case.
+ */
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) {
+ if (pcie_bus_config == PCIE_BUS_PEER2PEER)
+ mps = 128;
+ else
+ mps = 128 << dev->pcie_mpss;
+ rc = pcie_set_mps(dev, mps);
+ if (rc) {
+ pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
+ mps);
+ }
+ return;
+ }
+
+ if (!bridge || !pci_is_pcie(bridge))
+ return;
+
mps = pcie_get_mps(dev);
p_mps = pcie_get_mps(bridge);
@@ -2286,6 +2359,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
pci_set_of_node(dev);
if (pci_setup_device(dev)) {
+ pci_release_of_node(dev);
pci_bus_put(dev->bus);
kfree(dev);
return NULL;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 419dda6dbd16..70f05595da60 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1947,26 +1947,92 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk
/*
* IO-APIC1 on 6300ESB generates boot interrupts, see Intel order no
* 300641-004US, section 5.7.3.
+ *
+ * Core IO on Xeon E5 1600/2600/4600, see Intel order no 326509-003.
+ * Core IO on Xeon E5 v2, see Intel order no 329188-003.
+ * Core IO on Xeon E7 v2, see Intel order no 329595-002.
+ * Core IO on Xeon E5 v3, see Intel order no 330784-003.
+ * Core IO on Xeon E7 v3, see Intel order no 332315-001US.
+ * Core IO on Xeon E5 v4, see Intel order no 333810-002US.
+ * Core IO on Xeon E7 v4, see Intel order no 332315-001US.
+ * Core IO on Xeon D-1500, see Intel order no 332051-001.
+ * Core IO on Xeon Scalable, see Intel order no 610950.
*/
-#define INTEL_6300_IOAPIC_ABAR 0x40
+#define INTEL_6300_IOAPIC_ABAR 0x40 /* Bus 0, Dev 29, Func 5 */
#define INTEL_6300_DISABLE_BOOT_IRQ (1<<14)
+#define INTEL_CIPINTRC_CFG_OFFSET 0x14C /* Bus 0, Dev 5, Func 0 */
+#define INTEL_CIPINTRC_DIS_INTX_ICH (1<<25)
+
static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
{
u16 pci_config_word;
+ u32 pci_config_dword;
if (noioapicquirk)
return;
- pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR, &pci_config_word);
- pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ;
- pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word);
-
+ switch (dev->device) {
+ case PCI_DEVICE_ID_INTEL_ESB_10:
+ pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR,
+ &pci_config_word);
+ pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ;
+ pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR,
+ pci_config_word);
+ break;
+ case 0x3c28: /* Xeon E5 1600/2600/4600 */
+ case 0x0e28: /* Xeon E5/E7 V2 */
+ case 0x2f28: /* Xeon E5/E7 V3,V4 */
+ case 0x6f28: /* Xeon D-1500 */
+ case 0x2034: /* Xeon Scalable Family */
+ pci_read_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET,
+ &pci_config_dword);
+ pci_config_dword |= INTEL_CIPINTRC_DIS_INTX_ICH;
+ pci_write_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET,
+ pci_config_dword);
+ break;
+ default:
+ return;
+ }
pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
dev->vendor, dev->device);
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
-DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
+/*
+ * Device 29 Func 5 Device IDs of IO-APIC
+ * containing ABAR—APIC1 Alternate Base Address Register
+ */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10,
+ quirk_disable_intel_boot_interrupt);
+
+/*
+ * Device 5 Func 0 Device IDs of Core IO modules/hubs
+ * containing Coherent Interface Protocol Interrupt Control
+ *
+ * Device IDs obtained from volume 2 datasheets of commented
+ * families above.
+ */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x3c28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0e28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2f28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x6f28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2034,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x3c28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x0e28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x2f28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x6f28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x2034,
+ quirk_disable_intel_boot_interrupt);
/* Disable boot interrupts on HT-1000 */
#define BC_HT1000_FEATURE_REG 0x64
@@ -2241,6 +2307,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
+static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev)
+{
+ pci_info(dev, "Disabling ASPM L0s/L1\n");
+ pci_disable_link_state(dev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
+}
+
+/*
+ * ASM1083/1085 PCIe-PCI bridge devices cause AER timeout errors on the
+ * upstream PCIe root port when ASPM is enabled. At least L0s mode is affected;
+ * disable both L0s and L1 for now to be safe.
+ */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1);
+
/*
* Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain
* Link bit cleared after starting the link retrain process to allow this
@@ -3882,6 +3961,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9183,
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
quirk_dma_func1_alias);
+/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c135 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9215,
+ quirk_dma_func1_alias);
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c127 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220,
quirk_dma_func1_alias);
@@ -4198,6 +4280,24 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
quirk_chelsio_T5_disable_root_port_attributes);
/*
+ * pci_acs_ctrl_enabled - compare desired ACS controls with those provided
+ * by a device
+ * @acs_ctrl_req: Bitmask of desired ACS controls
+ * @acs_ctrl_ena: Bitmask of ACS controls enabled or provided implicitly by
+ * the hardware design
+ *
+ * Return 1 if all ACS controls in the @acs_ctrl_req bitmask are included
+ * in @acs_ctrl_ena, i.e., the device provides all the access controls the
+ * caller desires. Return 0 otherwise.
+ */
+static int pci_acs_ctrl_enabled(u16 acs_ctrl_req, u16 acs_ctrl_ena)
+{
+ if ((acs_ctrl_req & acs_ctrl_ena) == acs_ctrl_req)
+ return 1;
+ return 0;
+}
+
+/*
* AMD has indicated that the devices below do not support peer-to-peer
* in any system where they are found in the southbridge with an AMD
* IOMMU in the system. Multifunction devices that do not support
@@ -4237,10 +4337,12 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
if (ACPI_FAILURE(status))
return -ENODEV;
+ acpi_put_table(header);
+
/* Filter out flags not applicable to multifunction */
acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT);
- return acs_flags & ~(PCI_ACS_RR | PCI_ACS_CR) ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_RR | PCI_ACS_CR);
#else
return -ENODEV;
#endif
@@ -4267,20 +4369,19 @@ static bool pci_quirk_cavium_acs_match(struct pci_dev *dev)
static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
{
+ if (!pci_quirk_cavium_acs_match(dev))
+ return -ENOTTY;
+
/*
- * Cavium root ports don't advertise an ACS capability. However,
+ * Cavium Root Ports don't advertise an ACS capability. However,
* the RTL internally implements similar protection as if ACS had
- * Request Redirection, Completion Redirection, Source Validation,
+ * Source Validation, Request Redirection, Completion Redirection,
* and Upstream Forwarding features enabled. Assert that the
* hardware implements and enables equivalent ACS functionality for
* these flags.
*/
- acs_flags &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_SV | PCI_ACS_UF);
-
- if (!pci_quirk_cavium_acs_match(dev))
- return -ENOTTY;
-
- return acs_flags ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
@@ -4290,13 +4391,12 @@ static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
* transactions with others, allowing masking out these bits as if they
* were unimplemented in the ACS capability.
*/
- acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
-
- return acs_flags ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
/*
- * Many Intel PCH root ports do provide ACS-like features to disable peer
+ * Many Intel PCH Root Ports do provide ACS-like features to disable peer
* transactions and validate bus numbers in requests, but do not provide an
* actual PCIe ACS capability. This is the list of device IDs known to fall
* into that category as provided by Intel in Red Hat bugzilla 1037684.
@@ -4344,37 +4444,32 @@ static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev)
return false;
}
-#define INTEL_PCH_ACS_FLAGS (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV)
-
static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
{
- u16 flags = dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK ?
- INTEL_PCH_ACS_FLAGS : 0;
-
if (!pci_quirk_intel_pch_acs_match(dev))
return -ENOTTY;
- return acs_flags & ~flags ? 0 : 1;
+ if (dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK)
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+
+ return pci_acs_ctrl_enabled(acs_flags, 0);
}
/*
- * These QCOM root ports do provide ACS-like features to disable peer
+ * These QCOM Root Ports do provide ACS-like features to disable peer
* transactions and validate bus numbers in requests, but do not provide an
* actual PCIe ACS capability. Hardware supports source validation but it
* will report the issue as Completer Abort instead of ACS Violation.
- * Hardware doesn't support peer-to-peer and each root port is a root
- * complex with unique segment numbers. It is not possible for one root
- * port to pass traffic to another root port. All PCIe transactions are
- * terminated inside the root port.
+ * Hardware doesn't support peer-to-peer and each Root Port is a Root
+ * Complex with unique segment numbers. It is not possible for one Root
+ * Port to pass traffic to another Root Port. All PCIe transactions are
+ * terminated inside the Root Port.
*/
static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
{
- u16 flags = (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV);
- int ret = acs_flags & ~flags ? 0 : 1;
-
- pci_info(dev, "Using QCOM ACS Quirk (%d)\n", ret);
-
- return ret;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
/*
@@ -4457,7 +4552,7 @@ static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags)
pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
- return acs_flags & ~ctrl ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags, ctrl);
}
static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
@@ -4471,10 +4566,35 @@ static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
* perform peer-to-peer with other functions, allowing us to mask out
* these bits as if they were unimplemented in the ACS capability.
*/
- acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
- PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
+ PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
+}
- return acs_flags ? 0 : 1;
+static int pci_quirk_rciep_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ /*
+ * Intel RCiEP's are required to allow p2p only on translated
+ * addresses. Refer to Intel VT-d specification, r3.1, sec 3.16,
+ * "Root-Complex Peer to Peer Considerations".
+ */
+ if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_END)
+ return -ENOTTY;
+
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+}
+
+static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ /*
+ * iProc PAXB Root Ports don't advertise an ACS capability, but
+ * they do not allow peer-to-peer transactions between Root Ports.
+ * Allow each Root Port to be in a separate IOMMU group by masking
+ * SV/RR/CR/UF bits.
+ */
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
static const struct pci_dev_acs_enabled {
@@ -4547,6 +4667,7 @@ static const struct pci_dev_acs_enabled {
/* I219 */
{ PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_rciep_acs },
/* QCOM QDF2xxx root ports */
{ PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs },
{ PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs },
@@ -4568,9 +4689,21 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs },
{ PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs },
{ PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
{ 0 }
};
+/*
+ * pci_dev_specific_acs_enabled - check whether device provides ACS controls
+ * @dev: PCI device
+ * @acs_flags: Bitmask of desired ACS controls
+ *
+ * Returns:
+ * -ENOTTY: No quirk applies to this device; we can't tell whether the
+ * device provides the desired controls
+ * 0: Device does not provide all the desired controls
+ * >0: Device provides all the controls in @acs_flags
+ */
int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
{
const struct pci_dev_acs_enabled *i;
@@ -4890,13 +5023,25 @@ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
-/* FLR may cause some 82579 devices to hang */
-static void quirk_intel_no_flr(struct pci_dev *dev)
+/*
+ * FLR may cause the following to devices to hang:
+ *
+ * AMD Starship/Matisse HD Audio Controller 0x1487
+ * AMD Starship USB 3.0 Host Controller 0x148c
+ * AMD Matisse USB 3.0 Host Controller 0x149c
+ * Intel 82579LM Gigabit Ethernet Controller 0x1502
+ * Intel 82579V Gigabit Ethernet Controller 0x1503
+ *
+ */
+static void quirk_no_flr(struct pci_dev *dev)
{
dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET;
}
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_intel_no_flr);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_intel_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1487, quirk_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x148c, quirk_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr);
static void quirk_no_ext_tags(struct pci_dev *pdev)
{
@@ -4926,7 +5071,8 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
*/
static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
{
- if (pdev->device == 0x7340 && pdev->revision != 0xc5)
+ if ((pdev->device == 0x7312 && pdev->revision != 0x00) ||
+ (pdev->device == 0x7340 && pdev->revision != 0xc5))
return;
pci_info(pdev, "disabling ATS\n");
@@ -4937,6 +5083,8 @@ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats);
/* AMD Iceland dGPU */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
+/* AMD Navi10 dGPU */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats);
/* AMD Navi14 dGPU */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
#endif /* CONFIG_PCI_ATS */
@@ -4950,35 +5098,49 @@ static void quirk_fsl_no_msi(struct pci_dev *pdev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_no_msi);
/*
- * GPUs with integrated HDA controller for streaming audio to attached displays
- * need a device link from the HDA controller (consumer) to the GPU (supplier)
- * so that the GPU is powered up whenever the HDA controller is accessed.
- * The GPU and HDA controller are functions 0 and 1 of the same PCI device.
- * The device link stays in place until shutdown (or removal of the PCI device
- * if it's hotplugged). Runtime PM is allowed by default on the HDA controller
- * to prevent it from permanently keeping the GPU awake.
+ * Although not allowed by the spec, some multi-function devices have
+ * dependencies of one function (consumer) on another (supplier). For the
+ * consumer to work in D0, the supplier must also be in D0. Create a
+ * device link from the consumer to the supplier to enforce this
+ * dependency. Runtime PM is allowed by default on the consumer to prevent
+ * it from permanently keeping the supplier awake.
*/
-static void quirk_gpu_hda(struct pci_dev *hda)
+static void pci_create_device_link(struct pci_dev *pdev, unsigned int consumer,
+ unsigned int supplier, unsigned int class,
+ unsigned int class_shift)
{
- struct pci_dev *gpu;
+ struct pci_dev *supplier_pdev;
- if (PCI_FUNC(hda->devfn) != 1)
+ if (PCI_FUNC(pdev->devfn) != consumer)
return;
- gpu = pci_get_domain_bus_and_slot(pci_domain_nr(hda->bus),
- hda->bus->number,
- PCI_DEVFN(PCI_SLOT(hda->devfn), 0));
- if (!gpu || (gpu->class >> 16) != PCI_BASE_CLASS_DISPLAY) {
- pci_dev_put(gpu);
+ supplier_pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
+ pdev->bus->number,
+ PCI_DEVFN(PCI_SLOT(pdev->devfn), supplier));
+ if (!supplier_pdev || (supplier_pdev->class >> class_shift) != class) {
+ pci_dev_put(supplier_pdev);
return;
}
- if (!device_link_add(&hda->dev, &gpu->dev,
- DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME))
- pci_err(hda, "cannot link HDA to GPU %s\n", pci_name(gpu));
+ if (device_link_add(&pdev->dev, &supplier_pdev->dev,
+ DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME))
+ pci_info(pdev, "D0 power state depends on %s\n",
+ pci_name(supplier_pdev));
+ else
+ pci_err(pdev, "Cannot enforce power dependency on %s\n",
+ pci_name(supplier_pdev));
- pm_runtime_allow(&hda->dev);
- pci_dev_put(gpu);
+ pm_runtime_allow(&pdev->dev);
+ pci_dev_put(supplier_pdev);
+}
+
+/*
+ * Create device link for GPUs with integrated HDA controller for streaming
+ * audio to attached displays.
+ */
+static void quirk_gpu_hda(struct pci_dev *hda)
+{
+ pci_create_device_link(hda, 1, 0, PCI_BASE_CLASS_DISPLAY, 16);
}
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
@@ -4988,6 +5150,62 @@ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
/*
+ * Create device link for NVIDIA GPU with integrated USB xHCI Host
+ * controller to VGA.
+ */
+static void quirk_gpu_usb(struct pci_dev *usb)
+{
+ pci_create_device_link(usb, 2, 0, PCI_BASE_CLASS_DISPLAY, 16);
+}
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
+ PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
+
+/*
+ * Create device link for NVIDIA GPU with integrated Type-C UCSI controller
+ * to VGA. Currently there is no class code defined for UCSI device over PCI
+ * so using UNKNOWN class for now and it will be updated when UCSI
+ * over PCI gets a class code.
+ */
+#define PCI_CLASS_SERIAL_UNKNOWN 0x0c80
+static void quirk_gpu_usb_typec_ucsi(struct pci_dev *ucsi)
+{
+ pci_create_device_link(ucsi, 3, 0, PCI_BASE_CLASS_DISPLAY, 16);
+}
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
+ PCI_CLASS_SERIAL_UNKNOWN, 8,
+ quirk_gpu_usb_typec_ucsi);
+
+/*
+ * Enable the NVIDIA GPU integrated HDA controller if the BIOS left it
+ * disabled. https://devtalk.nvidia.com/default/topic/1024022
+ */
+static void quirk_nvidia_hda(struct pci_dev *gpu)
+{
+ u8 hdr_type;
+ u32 val;
+
+ /* There was no integrated HDA controller before MCP89 */
+ if (gpu->device < PCI_DEVICE_ID_NVIDIA_GEFORCE_320M)
+ return;
+
+ /* Bit 25 at offset 0x488 enables the HDA controller */
+ pci_read_config_dword(gpu, 0x488, &val);
+ if (val & BIT(25))
+ return;
+
+ pci_info(gpu, "Enabling HDA controller\n");
+ pci_write_config_dword(gpu, 0x488, val | BIT(25));
+
+ /* The GPU becomes a multi-function device when the HDA is enabled */
+ pci_read_config_byte(gpu, PCI_HEADER_TYPE, &hdr_type);
+ gpu->multifunction = !!(hdr_type & 0x80);
+}
+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
+ PCI_BASE_CLASS_DISPLAY, 16, quirk_nvidia_hda);
+DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
+ PCI_BASE_CLASS_DISPLAY, 16, quirk_nvidia_hda);
+
+/*
* Some IDT switches incorrectly flag an ACS Source Validation error on
* completions for config read requests even though PCIe r4.0, sec
* 6.12.1.1, says that completions are never affected by ACS Source
@@ -5216,3 +5434,34 @@ out_disable:
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1,
PCI_CLASS_DISPLAY_VGA, 8,
quirk_reset_lenovo_thinkpad_p50_nvgpu);
+
+/*
+ * Device [1b21:2142]
+ * When in D0, PME# doesn't get asserted when plugging USB 3.0 device.
+ */
+static void pci_fixup_no_d0_pme(struct pci_dev *dev)
+{
+ pci_info(dev, "PME# does not work under D0, disabling it\n");
+ dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x2142, pci_fixup_no_d0_pme);
+
+/*
+ * Device [12d8:0x400e] and [12d8:0x400f]
+ * These devices advertise PME# support in all power states but don't
+ * reliably assert it.
+ */
+static void pci_fixup_no_pme(struct pci_dev *dev)
+{
+ pci_info(dev, "PME# is unreliable, disabling it\n");
+ dev->pme_support = 0;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400e, pci_fixup_no_pme);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400f, pci_fixup_no_pme);
+
+static void apex_pci_fixup_class(struct pci_dev *pdev)
+{
+ pdev->class = (PCI_CLASS_SYSTEM_OTHER << 8) | pdev->class;
+}
+DECLARE_PCI_FIXUP_CLASS_HEADER(0x1ac1, 0x089a,
+ PCI_CLASS_NOT_DEFINED, 8, apex_pci_fixup_class);
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index 137bf0cee897..8fc9a4e911e3 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -195,20 +195,3 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom)
pci_disable_rom(pdev);
}
EXPORT_SYMBOL(pci_unmap_rom);
-
-/**
- * pci_platform_rom - provides a pointer to any ROM image provided by the
- * platform
- * @pdev: pointer to pci device struct
- * @size: pointer to receive size of pci window over ROM
- */
-void __iomem *pci_platform_rom(struct pci_dev *pdev, size_t *size)
-{
- if (pdev->rom && pdev->romlen) {
- *size = pdev->romlen;
- return phys_to_virt((phys_addr_t)pdev->rom);
- }
-
- return NULL;
-}
-EXPORT_SYMBOL(pci_platform_rom);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 8e5b00a420a5..87c8190de622 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -735,58 +735,21 @@ int pci_claim_bridge_resource(struct pci_dev *bridge, int i)
base/limit registers must be read-only and read as 0. */
static void pci_bridge_check_ranges(struct pci_bus *bus)
{
- u16 io;
- u32 pmem;
struct pci_dev *bridge = bus->self;
- struct resource *b_res;
+ struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
- b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
b_res[1].flags |= IORESOURCE_MEM;
- pci_read_config_word(bridge, PCI_IO_BASE, &io);
- if (!io) {
- pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0);
- pci_read_config_word(bridge, PCI_IO_BASE, &io);
- pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
- }
- if (io)
+ if (bridge->io_window)
b_res[0].flags |= IORESOURCE_IO;
- /* DECchip 21050 pass 2 errata: the bridge may miss an address
- disconnect boundary by one PCI data phase.
- Workaround: do not use prefetching on this device. */
- if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001)
- return;
-
- pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
- if (!pmem) {
- pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE,
- 0xffe0fff0);
- pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
- pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
- }
- if (pmem) {
+ if (bridge->pref_window) {
b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
- if ((pmem & PCI_PREF_RANGE_TYPE_MASK) ==
- PCI_PREF_RANGE_TYPE_64) {
+ if (bridge->pref_64_window) {
b_res[2].flags |= IORESOURCE_MEM_64;
b_res[2].flags |= PCI_PREF_RANGE_TYPE_64;
}
}
-
- /* double check if bridge does support 64 bit pref */
- if (b_res[2].flags & IORESOURCE_MEM_64) {
- u32 mem_base_hi, tmp;
- pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32,
- &mem_base_hi);
- pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
- 0xffffffff);
- pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
- if (!tmp)
- b_res[2].flags &= ~IORESOURCE_MEM_64;
- pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
- mem_base_hi);
- }
}
/* Helper function for sizing routines: find first available
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index d8ca40a97693..d21fa04fa44d 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -439,10 +439,11 @@ int pci_resize_resource(struct pci_dev *dev, int resno, int size)
res->end = res->start + pci_rebar_size_to_bytes(size) - 1;
/* Check if the new config works by trying to assign everything. */
- ret = pci_reassign_bridge_resources(dev->bus->self, res->flags);
- if (ret)
- goto error_resize;
-
+ if (dev->bus->self) {
+ ret = pci_reassign_bridge_resources(dev->bus->self, res->flags);
+ if (ret)
+ goto error_resize;
+ }
return 0;
error_resize:
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index a32897f83ee5..dfbe9cbf292c 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -303,16 +303,19 @@ placeholder:
slot_name = make_slot_name(name);
if (!slot_name) {
err = -ENOMEM;
+ kfree(slot);
goto err;
}
+ INIT_LIST_HEAD(&slot->list);
+ list_add(&slot->list, &parent->slots);
+
err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL,
"%s", slot_name);
- if (err)
+ if (err) {
+ kobject_put(&slot->kobj);
goto err;
-
- INIT_LIST_HEAD(&slot->list);
- list_add(&slot->list, &parent->slots);
+ }
down_read(&pci_bus_sem);
list_for_each_entry(dev, &parent->devices, bus_list)
@@ -328,7 +331,6 @@ out:
mutex_unlock(&pci_slot_mutex);
return slot;
err:
- kfree(slot);
slot = ERR_PTR(err);
goto out;
}
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 43431816412c..291c0074ad6f 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -147,7 +147,7 @@ static int mrpc_queue_cmd(struct switchtec_user *stuser)
kref_get(&stuser->kref);
stuser->read_len = sizeof(stuser->data);
stuser_set_state(stuser, MRPC_QUEUED);
- init_completion(&stuser->comp);
+ reinit_completion(&stuser->comp);
list_add_tail(&stuser->list, &stdev->mrpc_queue);
mrpc_cmd_submit(stdev);
diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
index d96626c614f5..a7bdd10fccf3 100644
--- a/drivers/pci/syscall.c
+++ b/drivers/pci/syscall.c
@@ -19,7 +19,7 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
u16 word;
u32 dword;
long err;
- long cfg_ret;
+ int cfg_ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -45,7 +45,7 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
}
err = -EIO;
- if (cfg_ret != PCIBIOS_SUCCESSFUL)
+ if (cfg_ret)
goto error;
switch (len) {
@@ -103,7 +103,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
if (err)
break;
err = pci_user_write_config_byte(dev, off, byte);
- if (err != PCIBIOS_SUCCESSFUL)
+ if (err)
err = -EIO;
break;
@@ -112,7 +112,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
if (err)
break;
err = pci_user_write_config_word(dev, off, word);
- if (err != PCIBIOS_SUCCESSFUL)
+ if (err)
err = -EIO;
break;
@@ -121,7 +121,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
if (err)
break;
err = pci_user_write_config_dword(dev, off, dword);
- if (err != PCIBIOS_SUCCESSFUL)
+ if (err)
err = -EIO;
break;
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 96075cecb0ae..199293450acf 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -236,7 +236,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
ret = armpmu_register(pmu);
if (ret)
- goto out_free;
+ goto out_free_irqs;
return 0;
diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
index 443906e0aff3..0393c4471227 100644
--- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
@@ -290,7 +290,7 @@ static struct attribute *hisi_hha_pmu_events_attr[] = {
HISI_PMU_EVENT_ATTR(rx_wbip, 0x05),
HISI_PMU_EVENT_ATTR(rx_wtistash, 0x11),
HISI_PMU_EVENT_ATTR(rd_ddr_64b, 0x1c),
- HISI_PMU_EVENT_ATTR(wr_dr_64b, 0x1d),
+ HISI_PMU_EVENT_ATTR(wr_ddr_64b, 0x1d),
HISI_PMU_EVENT_ATTR(rd_ddr_128b, 0x1e),
HISI_PMU_EVENT_ATTR(wr_ddr_128b, 0x1f),
HISI_PMU_EVENT_ATTR(spill_num, 0x20),
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
index 0bde5d919b2e..4aff69cbb903 100644
--- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -38,7 +38,7 @@
/* L3C has 8-counters */
#define L3C_NR_COUNTERS 0x8
-#define L3C_PERF_CTRL_EN 0x20000
+#define L3C_PERF_CTRL_EN 0x10000
#define L3C_EVTYPE_NONE 0xff
/*
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index 0e31f1392a53..949b07e29c06 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -1474,17 +1474,6 @@ static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id)
}
#if defined(CONFIG_ACPI)
-static int acpi_pmu_dev_add_resource(struct acpi_resource *ares, void *data)
-{
- struct resource *res = data;
-
- if (ares->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32)
- acpi_dev_resource_memory(ares, res);
-
- /* Always tell the ACPI core to skip this resource */
- return 1;
-}
-
static struct
xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
struct acpi_device *adev, u32 type)
@@ -1496,6 +1485,7 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
struct hw_pmu_info *inf;
void __iomem *dev_csr;
struct resource res;
+ struct resource_entry *rentry;
int enable_bit;
int rc;
@@ -1504,11 +1494,23 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
return NULL;
INIT_LIST_HEAD(&resource_list);
- rc = acpi_dev_get_resources(adev, &resource_list,
- acpi_pmu_dev_add_resource, &res);
+ rc = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
+ if (rc <= 0) {
+ dev_err(dev, "PMU type %d: No resources found\n", type);
+ return NULL;
+ }
+
+ list_for_each_entry(rentry, &resource_list, node) {
+ if (resource_type(rentry->res) == IORESOURCE_MEM) {
+ res = *rentry->res;
+ rentry = NULL;
+ break;
+ }
+ }
acpi_dev_free_resource_list(&resource_list);
- if (rc < 0) {
- dev_err(dev, "PMU type %d: No resource address found\n", type);
+
+ if (rentry) {
+ dev_err(dev, "PMU type %d: No memory resource found\n", type);
return NULL;
}
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index 1f8809bab002..920f61eff9cd 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -550,13 +550,14 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
struct sun4i_usb_phy_data *data =
container_of(work, struct sun4i_usb_phy_data, detect.work);
struct phy *phy0 = data->phys[0].phy;
- struct sun4i_usb_phy *phy = phy_get_drvdata(phy0);
+ struct sun4i_usb_phy *phy;
bool force_session_end, id_notify = false, vbus_notify = false;
int id_det, vbus_det;
- if (phy0 == NULL)
+ if (!phy0)
return;
+ phy = phy_get_drvdata(phy0);
id_det = sun4i_usb_phy0_get_id_det(data);
vbus_det = sun4i_usb_phy0_get_vbus_det(data);
diff --git a/drivers/phy/marvell/Kconfig b/drivers/phy/marvell/Kconfig
index 68e321225400..ed4d3904e53f 100644
--- a/drivers/phy/marvell/Kconfig
+++ b/drivers/phy/marvell/Kconfig
@@ -2,8 +2,8 @@
# Phy drivers for Marvell platforms
#
config ARMADA375_USBCLUSTER_PHY
- def_bool y
- depends on MACH_ARMADA_375 || COMPILE_TEST
+ bool "Armada 375 USB cluster PHY support" if COMPILE_TEST
+ default y if MACH_ARMADA_375
depends on OF && HAS_IOMEM
select GENERIC_PHY
diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c
index 593c77dbde2e..106f53f33324 100644
--- a/drivers/phy/motorola/phy-cpcap-usb.c
+++ b/drivers/phy/motorola/phy-cpcap-usb.c
@@ -623,35 +623,42 @@ static int cpcap_usb_phy_probe(struct platform_device *pdev)
generic_phy = devm_phy_create(ddata->dev, NULL, &ops);
if (IS_ERR(generic_phy)) {
error = PTR_ERR(generic_phy);
- return PTR_ERR(generic_phy);
+ goto out_reg_disable;
}
phy_set_drvdata(generic_phy, ddata);
phy_provider = devm_of_phy_provider_register(ddata->dev,
of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
+ if (IS_ERR(phy_provider)) {
+ error = PTR_ERR(phy_provider);
+ goto out_reg_disable;
+ }
error = cpcap_usb_init_optional_pins(ddata);
if (error)
- return error;
+ goto out_reg_disable;
cpcap_usb_init_optional_gpios(ddata);
error = cpcap_usb_init_iio(ddata);
if (error)
- return error;
+ goto out_reg_disable;
error = cpcap_usb_init_interrupts(pdev, ddata);
if (error)
- return error;
+ goto out_reg_disable;
usb_add_phy_dev(&ddata->phy);
atomic_set(&ddata->active, 1);
schedule_delayed_work(&ddata->detect_work, msecs_to_jiffies(1));
return 0;
+
+out_reg_disable:
+ regulator_disable(ddata->vusb);
+
+ return error;
}
static int cpcap_usb_phy_remove(struct platform_device *pdev)
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index cf515928fed0..68107611c70a 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -311,8 +311,8 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0xf),
QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x1),
QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x0),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0x1f),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x1f),
QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x6),
QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0xf),
QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x0),
@@ -338,7 +338,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x0),
QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CTRL_BY_PSM, 0x1),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0xa),
QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x1),
QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x1),
@@ -347,7 +346,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f),
QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19),
QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19),
- QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x7),
};
static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = {
@@ -355,6 +353,8 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x6),
QMP_PHY_INIT_CFG(QSERDES_TX_RES_CODE_LANE_OFFSET, 0x2),
QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_TX_EMP_POST1_LVL, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_TX_SLEW_CNTL, 0x0a),
};
static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = {
@@ -365,7 +365,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb),
QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x4),
- QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN_HALF, 0x4),
};
static const struct qmp_phy_init_tbl ipq8074_pcie_pcs_tbl[] = {
@@ -818,6 +817,9 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
.mask_pcs_ready = PHYSTATUS,
};
+static const char * const ipq8074_pciephy_clk_l[] = {
+ "aux", "cfg_ahb",
+};
/* list of resets */
static const char * const ipq8074_pciephy_reset_l[] = {
"phy", "common",
@@ -835,8 +837,8 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
.rx_tbl_num = ARRAY_SIZE(ipq8074_pcie_rx_tbl),
.pcs_tbl = ipq8074_pcie_pcs_tbl,
.pcs_tbl_num = ARRAY_SIZE(ipq8074_pcie_pcs_tbl),
- .clk_list = NULL,
- .num_clks = 0,
+ .clk_list = ipq8074_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l),
.reset_list = ipq8074_pciephy_reset_l,
.num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
.vreg_list = NULL,
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index 5d78d43ba9fc..6b3aaf521e58 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -77,6 +77,8 @@
#define QSERDES_COM_CORECLK_DIV_MODE1 0x1bc
/* Only for QMP V2 PHY - TX registers */
+#define QSERDES_TX_EMP_POST1_LVL 0x018
+#define QSERDES_TX_SLEW_CNTL 0x040
#define QSERDES_TX_RES_CODE_LANE_OFFSET 0x054
#define QSERDES_TX_DEBUG_BUS_SEL 0x064
#define QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN 0x068
diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
index b8b226a20014..1feb1e1bf85e 100644
--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
+++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
@@ -717,7 +717,9 @@ static int exynos5_usbdrd_phy_calibrate(struct phy *phy)
struct phy_usb_instance *inst = phy_get_drvdata(phy);
struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
- return exynos5420_usbdrd_phy_calibrate(phy_drd);
+ if (inst->phy_cfg->id == EXYNOS5_DRDPHY_UTMI)
+ return exynos5420_usbdrd_phy_calibrate(phy_drd);
+ return 0;
}
static const struct phy_ops exynos5_usbdrd_phy_ops = {
diff --git a/drivers/phy/samsung/phy-s5pv210-usb2.c b/drivers/phy/samsung/phy-s5pv210-usb2.c
index f6f72339bbc3..bb7fdf491c1c 100644
--- a/drivers/phy/samsung/phy-s5pv210-usb2.c
+++ b/drivers/phy/samsung/phy-s5pv210-usb2.c
@@ -142,6 +142,10 @@ static void s5pv210_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on)
udelay(10);
rst &= ~rstbits;
writel(rst, drv->reg_phy + S5PV210_UPHYRST);
+ /* The following delay is necessary for the reset sequence to be
+ * completed
+ */
+ udelay(80);
} else {
pwr = readl(drv->reg_phy + S5PV210_UPHYPWR);
pwr |= phypwr;
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index de1b4ebe4de2..39c01ef57d83 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -899,6 +899,7 @@ remove_pads:
reset:
reset_control_assert(padctl->rst);
remove:
+ platform_set_drvdata(pdev, NULL);
soc->ops->remove(padctl);
return err;
}
diff --git a/drivers/phy/ti/phy-twl4030-usb.c b/drivers/phy/ti/phy-twl4030-usb.c
index c267afb68f07..ea7564392108 100644
--- a/drivers/phy/ti/phy-twl4030-usb.c
+++ b/drivers/phy/ti/phy-twl4030-usb.c
@@ -801,7 +801,7 @@ static int twl4030_usb_remove(struct platform_device *pdev)
usb_remove_phy(&twl->phy);
pm_runtime_get_sync(twl->dev);
- cancel_delayed_work(&twl->id_workaround_work);
+ cancel_delayed_work_sync(&twl->id_workaround_work);
device_remove_file(twl->dev, &dev_attr_vbus);
/* set transceiver mode to power on defaults */
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index aefe3c33dffd..8dec302dc067 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -458,13 +458,14 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
static bool aspeed_expr_is_gpio(const struct aspeed_sig_expr *expr)
{
/*
- * The signal type is GPIO if the signal name has "GPIO" as a prefix.
+ * The signal type is GPIO if the signal name has "GPI" as a prefix.
* strncmp (rather than strcmp) is used to implement the prefix
* requirement.
*
- * expr->signal might look like "GPIOT3" in the GPIO case.
+ * expr->signal might look like "GPIOB1" in the GPIO case.
+ * expr->signal might look like "GPIT0" in the GPI case.
*/
- return strncmp(expr->signal, "GPIO", 4) == 0;
+ return strncmp(expr->signal, "GPI", 3) == 0;
}
static bool aspeed_gpio_in_exprs(const struct aspeed_sig_expr **exprs)
diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig
index 0f38d51f47c6..e6cd314919de 100644
--- a/drivers/pinctrl/bcm/Kconfig
+++ b/drivers/pinctrl/bcm/Kconfig
@@ -21,6 +21,7 @@ config PINCTRL_BCM2835
select PINMUX
select PINCONF
select GENERIC_PINCONF
+ select GPIOLIB
select GPIOLIB_IRQCHIP
config PINCTRL_IPROC_GPIO
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index b04edc22dad7..90d414dd792c 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -662,16 +662,6 @@ static int imx_pinctrl_probe_dt(struct platform_device *pdev,
return 0;
}
-/*
- * imx_free_resources() - free memory used by this driver
- * @info: info driver instance
- */
-static void imx_free_resources(struct imx_pinctrl *ipctl)
-{
- if (ipctl->pctl)
- pinctrl_unregister(ipctl->pctl);
-}
-
int imx_pinctrl_probe(struct platform_device *pdev,
const struct imx_pinctrl_soc_info *info)
{
@@ -762,21 +752,16 @@ int imx_pinctrl_probe(struct platform_device *pdev,
&ipctl->pctl);
if (ret) {
dev_err(&pdev->dev, "could not register IMX pinctrl driver\n");
- goto free;
+ return ret;
}
ret = imx_pinctrl_probe_dt(pdev, ipctl);
if (ret) {
dev_err(&pdev->dev, "fail to probe dt properties\n");
- goto free;
+ return ret;
}
dev_info(&pdev->dev, "initialized IMX pinctrl driver\n");
return pinctrl_enable(ipctl->pctl);
-
-free:
- imx_free_resources(ipctl);
-
- return ret;
}
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
index deb7870b3d1a..961c24e0cc8f 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
@@ -638,7 +638,6 @@ int imx1_pinctrl_core_probe(struct platform_device *pdev,
ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
if (ret) {
- pinctrl_unregister(ipctl->pctl);
dev_err(&pdev->dev, "Failed to populate subdevices\n");
return ret;
}
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index a760d8bda0af..b3d478edbbb1 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1009,6 +1009,21 @@ static void byt_gpio_disable_free(struct pinctrl_dev *pctl_dev,
pm_runtime_put(&vg->pdev->dev);
}
+static void byt_gpio_direct_irq_check(struct byt_gpio *vg,
+ unsigned int offset)
+{
+ void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
+
+ /*
+ * Before making any direction modifications, do a check if gpio is set
+ * for direct IRQ. On Bay Trail, setting GPIO to output does not make
+ * sense, so let's at least inform the caller before they shoot
+ * themselves in the foot.
+ */
+ if (readl(conf_reg) & BYT_DIRECT_IRQ_EN)
+ dev_info_once(&vg->pdev->dev, "Potential Error: Setting GPIO with direct_irq_en to output");
+}
+
static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
struct pinctrl_gpio_range *range,
unsigned int offset,
@@ -1016,7 +1031,6 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
{
struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
- void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
unsigned long flags;
u32 value;
@@ -1027,14 +1041,8 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
if (input)
value |= BYT_OUTPUT_EN;
else
- /*
- * Before making any direction modifications, do a check if gpio
- * is set for direct IRQ. On baytrail, setting GPIO to output
- * does not make sense, so let's at least warn the caller before
- * they shoot themselves in the foot.
- */
- WARN(readl(conf_reg) & BYT_DIRECT_IRQ_EN,
- "Potential Error: Setting GPIO with direct_irq_en to output");
+ byt_gpio_direct_irq_check(vg, offset);
+
writel(value, val_reg);
raw_spin_unlock_irqrestore(&byt_lock, flags);
@@ -1250,7 +1258,6 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
break;
case PIN_CONFIG_INPUT_DEBOUNCE:
debounce = readl(db_reg);
- debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
if (arg)
conf |= BYT_DEBOUNCE_EN;
@@ -1259,24 +1266,31 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
switch (arg) {
case 375:
+ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
debounce |= BYT_DEBOUNCE_PULSE_375US;
break;
case 750:
+ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
debounce |= BYT_DEBOUNCE_PULSE_750US;
break;
case 1500:
+ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
debounce |= BYT_DEBOUNCE_PULSE_1500US;
break;
case 3000:
+ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
debounce |= BYT_DEBOUNCE_PULSE_3MS;
break;
case 6000:
+ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
debounce |= BYT_DEBOUNCE_PULSE_6MS;
break;
case 12000:
+ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
debounce |= BYT_DEBOUNCE_PULSE_12MS;
break;
case 24000:
+ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
debounce |= BYT_DEBOUNCE_PULSE_24MS;
break;
default:
@@ -1374,19 +1388,50 @@ static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
static int byt_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
{
- return pinctrl_gpio_direction_input(chip->base + offset);
+ struct byt_gpio *vg = gpiochip_get_data(chip);
+ void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+ unsigned long flags;
+ u32 reg;
+
+ raw_spin_lock_irqsave(&byt_lock, flags);
+
+ reg = readl(val_reg);
+ reg &= ~BYT_DIR_MASK;
+ reg |= BYT_OUTPUT_EN;
+ writel(reg, val_reg);
+
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
+ return 0;
}
+/*
+ * Note despite the temptation this MUST NOT be converted into a call to
+ * pinctrl_gpio_direction_output() + byt_gpio_set() that does not work this
+ * MUST be done as a single BYT_VAL_REG register write.
+ * See the commit message of the commit adding this comment for details.
+ */
static int byt_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset, int value)
{
- int ret = pinctrl_gpio_direction_output(chip->base + offset);
+ struct byt_gpio *vg = gpiochip_get_data(chip);
+ void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+ unsigned long flags;
+ u32 reg;
- if (ret)
- return ret;
+ raw_spin_lock_irqsave(&byt_lock, flags);
- byt_gpio_set(chip, offset, value);
+ byt_gpio_direct_irq_check(vg, offset);
+ reg = readl(val_reg);
+ reg &= ~BYT_DIR_MASK;
+ if (value)
+ reg |= BYT_LEVEL;
+ else
+ reg &= ~BYT_LEVEL;
+
+ writel(reg, val_reg);
+
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
return 0;
}
@@ -1495,6 +1540,7 @@ static const struct gpio_chip byt_gpio_chip = {
.direction_output = byt_gpio_direction_output,
.get = byt_gpio_get,
.set = byt_gpio_set,
+ .set_config = gpiochip_generic_config,
.dbg_show = byt_gpio_dbg_show,
};
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index f16baf9b8696..25932d2a7154 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1485,11 +1485,15 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
struct chv_pinctrl *pctrl = gpiochip_get_data(gc);
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned long pending;
+ unsigned long flags;
u32 intr_line;
chained_irq_enter(chip, desc);
+ raw_spin_lock_irqsave(&chv_lock, flags);
pending = readl(pctrl->regs + CHV_INTSTAT);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
+
for_each_set_bit(intr_line, &pending, pctrl->community->nirqs) {
unsigned irq, offset;
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 89ff2795a8b5..5e0adb00b430 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -621,6 +621,10 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned pin,
value |= PADCFG1_TERM_UP;
+ /* Set default strength value in case none is given */
+ if (arg == 1)
+ arg = 5000;
+
switch (arg) {
case 20000:
value |= PADCFG1_TERM_20K << PADCFG1_TERM_SHIFT;
@@ -643,6 +647,10 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned pin,
case PIN_CONFIG_BIAS_PULL_DOWN:
value &= ~(PADCFG1_TERM_UP | PADCFG1_TERM_MASK);
+ /* Set default strength value in case none is given */
+ if (arg == 1)
+ arg = 5000;
+
switch (arg) {
case 20000:
value |= PADCFG1_TERM_20K << PADCFG1_TERM_SHIFT;
diff --git a/drivers/pinctrl/intel/pinctrl-lewisburg.c b/drivers/pinctrl/intel/pinctrl-lewisburg.c
index dc32c22bf19f..8388aa671b21 100644
--- a/drivers/pinctrl/intel/pinctrl-lewisburg.c
+++ b/drivers/pinctrl/intel/pinctrl-lewisburg.c
@@ -297,9 +297,9 @@ static const struct pinctrl_pin_desc lbg_pins[] = {
static const struct intel_community lbg_communities[] = {
LBG_COMMUNITY(0, 0, 71),
LBG_COMMUNITY(1, 72, 132),
- LBG_COMMUNITY(3, 133, 144),
- LBG_COMMUNITY(4, 145, 180),
- LBG_COMMUNITY(5, 181, 246),
+ LBG_COMMUNITY(3, 133, 143),
+ LBG_COMMUNITY(4, 144, 178),
+ LBG_COMMUNITY(5, 179, 246),
};
static const struct intel_pinctrl_soc_data lbg_soc_data = {
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
index 4fa69f988c7b..6b2312e73f23 100644
--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
+++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
@@ -729,6 +729,10 @@ static int mrfld_config_set_pin(struct mrfld_pinctrl *mp, unsigned int pin,
mask |= BUFCFG_Px_EN_MASK | BUFCFG_PUPD_VAL_MASK;
bits |= BUFCFG_PU_EN;
+ /* Set default strength value in case none is given */
+ if (arg == 1)
+ arg = 20000;
+
switch (arg) {
case 50000:
bits |= BUFCFG_PUPD_VAL_50K << BUFCFG_PUPD_VAL_SHIFT;
@@ -749,6 +753,10 @@ static int mrfld_config_set_pin(struct mrfld_pinctrl *mp, unsigned int pin,
mask |= BUFCFG_Px_EN_MASK | BUFCFG_PUPD_VAL_MASK;
bits |= BUFCFG_PD_EN;
+ /* Set default strength value in case none is given */
+ if (arg == 1)
+ arg = 20000;
+
switch (arg) {
case 50000:
bits |= BUFCFG_PUPD_VAL_50K << BUFCFG_PUPD_VAL_SHIFT;
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
index 43231fd065a1..1a9450ef932b 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
@@ -418,7 +418,7 @@ static struct mvebu_mpp_mode mv98dx3236_mpp_modes[] = {
MPP_VAR_FUNCTION(0x1, "i2c0", "sck", V_98DX3236_PLUS)),
MPP_MODE(15,
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
- MPP_VAR_FUNCTION(0x4, "i2c0", "sda", V_98DX3236_PLUS)),
+ MPP_VAR_FUNCTION(0x1, "i2c0", "sda", V_98DX3236_PLUS)),
MPP_MODE(16,
MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
MPP_VAR_FUNCTION(0x4, "dev", "oe", V_98DX3236_PLUS)),
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index b1ffdd3f6d07..d9b9c11c7f8f 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -157,7 +157,7 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset,
pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
} else if (debounce < 250000) {
- time = debounce / 15600;
+ time = debounce / 15625;
pin_reg |= time & DB_TMR_OUT_MASK;
pin_reg &= ~BIT(DB_TMR_OUT_UNIT_OFF);
pin_reg |= BIT(DB_TMR_LARGE_OFF);
@@ -167,14 +167,14 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset,
pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
pin_reg |= BIT(DB_TMR_LARGE_OFF);
} else {
- pin_reg &= ~DB_CNTRl_MASK;
+ pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
ret = -EINVAL;
}
} else {
pin_reg &= ~BIT(DB_TMR_OUT_UNIT_OFF);
pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
pin_reg &= ~DB_TMR_OUT_MASK;
- pin_reg &= ~DB_CNTRl_MASK;
+ pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
}
writel(pin_reg, gpio_dev->base + offset * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
@@ -439,7 +439,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
pin_reg &= ~BIT(LEVEL_TRIG_OFF);
pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF;
- pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
irq_set_handler_locked(d, handle_edge_irq);
break;
@@ -447,7 +446,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
pin_reg &= ~BIT(LEVEL_TRIG_OFF);
pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF;
- pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
irq_set_handler_locked(d, handle_edge_irq);
break;
@@ -455,7 +453,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
pin_reg &= ~BIT(LEVEL_TRIG_OFF);
pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
pin_reg |= BOTH_EADGE << ACTIVE_LEVEL_OFF;
- pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
irq_set_handler_locked(d, handle_edge_irq);
break;
@@ -463,8 +460,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
pin_reg |= LEVEL_TRIGGER << LEVEL_TRIG_OFF;
pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF;
- pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
- pin_reg |= DB_TYPE_PRESERVE_LOW_GLITCH << DB_CNTRL_OFF;
irq_set_handler_locked(d, handle_level_irq);
break;
@@ -472,8 +467,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
pin_reg |= LEVEL_TRIGGER << LEVEL_TRIG_OFF;
pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF;
- pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
- pin_reg |= DB_TYPE_PRESERVE_HIGH_GLITCH << DB_CNTRL_OFF;
irq_set_handler_locked(d, handle_level_irq);
break;
diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h
index 22af7edfdb38..91da7527f002 100644
--- a/drivers/pinctrl/pinctrl-amd.h
+++ b/drivers/pinctrl/pinctrl-amd.h
@@ -256,7 +256,7 @@ static const struct amd_pingroup kerncz_groups[] = {
{
.name = "uart0",
.pins = uart0_pins,
- .npins = 9,
+ .npins = 5,
},
{
.name = "uart1",
diff --git a/drivers/pinctrl/pinctrl-falcon.c b/drivers/pinctrl/pinctrl-falcon.c
index fb73dcbb5ef3..68dcf53aaac3 100644
--- a/drivers/pinctrl/pinctrl-falcon.c
+++ b/drivers/pinctrl/pinctrl-falcon.c
@@ -438,24 +438,28 @@ static int pinctrl_falcon_probe(struct platform_device *pdev)
/* load and remap the pad resources of the different banks */
for_each_compatible_node(np, NULL, "lantiq,pad-falcon") {
- struct platform_device *ppdev = of_find_device_by_node(np);
const __be32 *bank = of_get_property(np, "lantiq,bank", NULL);
struct resource res;
+ struct platform_device *ppdev;
u32 avail;
int pins;
if (!of_device_is_available(np))
continue;
- if (!ppdev) {
- dev_err(&pdev->dev, "failed to find pad pdev\n");
- continue;
- }
if (!bank || *bank >= PORTS)
continue;
if (of_address_to_resource(np, 0, &res))
continue;
+
+ ppdev = of_find_device_by_node(np);
+ if (!ppdev) {
+ dev_err(&pdev->dev, "failed to find pad pdev\n");
+ continue;
+ }
+
falcon_info.clk[*bank] = clk_get(&ppdev->dev, NULL);
+ put_device(&ppdev->dev);
if (IS_ERR(falcon_info.clk[*bank])) {
dev_err(&ppdev->dev, "failed to get clock\n");
return PTR_ERR(falcon_info.clk[*bank]);
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index a5accffbc8c9..babf6d011264 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -642,7 +642,8 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
break;
default:
- unreachable();
+ /* unreachable */
+ break;
}
}
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index 33c3eca0ece9..5b5a4323ae63 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -120,7 +120,7 @@ static const struct regmap_config mcp23x08_regmap = {
.max_register = MCP_OLAT,
};
-static const struct reg_default mcp23x16_defaults[] = {
+static const struct reg_default mcp23x17_defaults[] = {
{.reg = MCP_IODIR << 1, .def = 0xffff},
{.reg = MCP_IPOL << 1, .def = 0x0000},
{.reg = MCP_GPINTEN << 1, .def = 0x0000},
@@ -131,23 +131,23 @@ static const struct reg_default mcp23x16_defaults[] = {
{.reg = MCP_OLAT << 1, .def = 0x0000},
};
-static const struct regmap_range mcp23x16_volatile_range = {
+static const struct regmap_range mcp23x17_volatile_range = {
.range_min = MCP_INTF << 1,
.range_max = MCP_GPIO << 1,
};
-static const struct regmap_access_table mcp23x16_volatile_table = {
- .yes_ranges = &mcp23x16_volatile_range,
+static const struct regmap_access_table mcp23x17_volatile_table = {
+ .yes_ranges = &mcp23x17_volatile_range,
.n_yes_ranges = 1,
};
-static const struct regmap_range mcp23x16_precious_range = {
- .range_min = MCP_GPIO << 1,
+static const struct regmap_range mcp23x17_precious_range = {
+ .range_min = MCP_INTCAP << 1,
.range_max = MCP_GPIO << 1,
};
-static const struct regmap_access_table mcp23x16_precious_table = {
- .yes_ranges = &mcp23x16_precious_range,
+static const struct regmap_access_table mcp23x17_precious_table = {
+ .yes_ranges = &mcp23x17_precious_range,
.n_yes_ranges = 1,
};
@@ -157,10 +157,10 @@ static const struct regmap_config mcp23x17_regmap = {
.reg_stride = 2,
.max_register = MCP_OLAT << 1,
- .volatile_table = &mcp23x16_volatile_table,
- .precious_table = &mcp23x16_precious_table,
- .reg_defaults = mcp23x16_defaults,
- .num_reg_defaults = ARRAY_SIZE(mcp23x16_defaults),
+ .volatile_table = &mcp23x17_volatile_table,
+ .precious_table = &mcp23x17_precious_table,
+ .reg_defaults = mcp23x17_defaults,
+ .num_reg_defaults = ARRAY_SIZE(mcp23x17_defaults),
.cache_type = REGCACHE_FLAT,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
};
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 8d83817935da..d0d5b8bdbc10 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -507,8 +507,8 @@ static int rockchip_dt_node_to_map(struct pinctrl_dev *pctldev,
}
map_num += grp->npins;
- new_map = devm_kcalloc(pctldev->dev, map_num, sizeof(*new_map),
- GFP_KERNEL);
+
+ new_map = kcalloc(map_num, sizeof(*new_map), GFP_KERNEL);
if (!new_map)
return -ENOMEM;
@@ -518,7 +518,7 @@ static int rockchip_dt_node_to_map(struct pinctrl_dev *pctldev,
/* create mux map */
parent = of_get_parent(np);
if (!parent) {
- devm_kfree(pctldev->dev, new_map);
+ kfree(new_map);
return -EINVAL;
}
new_map[0].type = PIN_MAP_TYPE_MUX_GROUP;
@@ -545,6 +545,7 @@ static int rockchip_dt_node_to_map(struct pinctrl_dev *pctldev,
static void rockchip_dt_free_map(struct pinctrl_dev *pctldev,
struct pinctrl_map *map, unsigned num_maps)
{
+ kfree(map);
}
static const struct pinctrl_ops rockchip_pctrl_ops = {
@@ -2777,7 +2778,9 @@ static int rockchip_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
if (!bank->domain)
return -ENXIO;
+ clk_enable(bank->clk);
virq = irq_create_mapping(bank->domain, offset);
+ clk_disable(bank->clk);
return (virq) ? : -ENXIO;
}
@@ -3350,12 +3353,15 @@ static int __maybe_unused rockchip_pinctrl_suspend(struct device *dev)
static int __maybe_unused rockchip_pinctrl_resume(struct device *dev)
{
struct rockchip_pinctrl *info = dev_get_drvdata(dev);
- int ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX,
- rk3288_grf_gpio6c_iomux |
- GPIO6C6_SEL_WRITE_ENABLE);
+ int ret;
- if (ret)
- return ret;
+ if (info->ctrl->type == RK3288) {
+ ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX,
+ rk3288_grf_gpio6c_iomux |
+ GPIO6C6_SEL_WRITE_ENABLE);
+ if (ret)
+ return ret;
+ }
return pinctrl_force_default(info->pctl_dev);
}
diff --git a/drivers/pinctrl/pinctrl-rza1.c b/drivers/pinctrl/pinctrl-rza1.c
index f76edf664539..021c19eaf12d 100644
--- a/drivers/pinctrl/pinctrl-rza1.c
+++ b/drivers/pinctrl/pinctrl-rza1.c
@@ -421,7 +421,7 @@ static const struct rza1_bidir_entry rza1l_bidir_entries[RZA1_NPORTS] = {
};
static const struct rza1_swio_entry rza1l_swio_entries[] = {
- [0] = { ARRAY_SIZE(rza1h_swio_pins), rza1h_swio_pins },
+ [0] = { ARRAY_SIZE(rza1l_swio_pins), rza1l_swio_pins },
};
/* RZ/A1L (r7s72102x) pinmux flags table */
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 7ec72ff2419a..04a4e761e9a9 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -916,7 +916,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np,
/* If pinconf isn't supported, don't parse properties in below. */
if (!PCS_HAS_PINCONF)
- return 0;
+ return -ENOTSUPP;
/* cacluate how much properties are supported in current node */
for (i = 0; i < ARRAY_SIZE(prop2); i++) {
@@ -928,7 +928,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np,
nconfs++;
}
if (!nconfs)
- return 0;
+ return -ENOTSUPP;
func->conf = devm_kcalloc(pcs->dev,
nconfs, sizeof(struct pcs_conf_vals),
@@ -1056,9 +1056,12 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
if (PCS_HAS_PINCONF && function) {
res = pcs_parse_pinconf(pcs, np, function, map);
- if (res)
+ if (res == 0)
+ *num_maps = 2;
+ else if (res == -ENOTSUPP)
+ *num_maps = 1;
+ else
goto free_pingroups;
- *num_maps = 2;
} else {
*num_maps = 1;
}
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index e87ee43efa16..7f764f751c4f 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -40,6 +40,8 @@ struct exynos_irq_chip {
u32 eint_pend;
u32 eint_wake_mask_value;
u32 eint_wake_mask_reg;
+ void (*set_eint_wakeup_mask)(struct samsung_pinctrl_drv_data *drvdata,
+ struct exynos_irq_chip *irq_chip);
};
static inline struct exynos_irq_chip *to_exynos_irq_chip(struct irq_chip *chip)
@@ -53,7 +55,7 @@ static void exynos_irq_mask(struct irq_data *irqd)
struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset;
- unsigned long mask;
+ unsigned int mask;
unsigned long flags;
spin_lock_irqsave(&bank->slock, flags);
@@ -81,7 +83,7 @@ static void exynos_irq_unmask(struct irq_data *irqd)
struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset;
- unsigned long mask;
+ unsigned int mask;
unsigned long flags;
/*
@@ -265,6 +267,7 @@ struct exynos_eint_gpio_save {
u32 eint_con;
u32 eint_fltcon0;
u32 eint_fltcon1;
+ u32 eint_mask;
};
/*
@@ -350,6 +353,47 @@ static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on)
return 0;
}
+static void
+exynos_pinctrl_set_eint_wakeup_mask(struct samsung_pinctrl_drv_data *drvdata,
+ struct exynos_irq_chip *irq_chip)
+{
+ struct regmap *pmu_regs;
+
+ if (!drvdata->retention_ctrl || !drvdata->retention_ctrl->priv) {
+ dev_warn(drvdata->dev,
+ "No retention data configured bank with external wakeup interrupt. Wake-up mask will not be set.\n");
+ return;
+ }
+
+ pmu_regs = drvdata->retention_ctrl->priv;
+ dev_info(drvdata->dev,
+ "Setting external wakeup interrupt mask: 0x%x\n",
+ irq_chip->eint_wake_mask_value);
+
+ regmap_write(pmu_regs, irq_chip->eint_wake_mask_reg,
+ irq_chip->eint_wake_mask_value);
+}
+
+static void
+s5pv210_pinctrl_set_eint_wakeup_mask(struct samsung_pinctrl_drv_data *drvdata,
+ struct exynos_irq_chip *irq_chip)
+
+{
+ void __iomem *clk_base;
+
+ if (!drvdata->retention_ctrl || !drvdata->retention_ctrl->priv) {
+ dev_warn(drvdata->dev,
+ "No retention data configured bank with external wakeup interrupt. Wake-up mask will not be set.\n");
+ return;
+ }
+
+
+ clk_base = (void __iomem *) drvdata->retention_ctrl->priv;
+
+ __raw_writel(irq_chip->eint_wake_mask_value,
+ clk_base + irq_chip->eint_wake_mask_reg);
+}
+
/*
* irq_chip for wakeup interrupts
*/
@@ -368,8 +412,9 @@ static const struct exynos_irq_chip s5pv210_wkup_irq_chip __initconst = {
.eint_mask = EXYNOS_WKUP_EMASK_OFFSET,
.eint_pend = EXYNOS_WKUP_EPEND_OFFSET,
.eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED,
- /* Only difference with exynos4210_wkup_irq_chip: */
+ /* Only differences with exynos4210_wkup_irq_chip: */
.eint_wake_mask_reg = S5PV210_EINT_WAKEUP_MASK,
+ .set_eint_wakeup_mask = s5pv210_pinctrl_set_eint_wakeup_mask,
};
static const struct exynos_irq_chip exynos4210_wkup_irq_chip __initconst = {
@@ -388,6 +433,7 @@ static const struct exynos_irq_chip exynos4210_wkup_irq_chip __initconst = {
.eint_pend = EXYNOS_WKUP_EPEND_OFFSET,
.eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED,
.eint_wake_mask_reg = EXYNOS_EINT_WAKEUP_MASK,
+ .set_eint_wakeup_mask = exynos_pinctrl_set_eint_wakeup_mask,
};
static const struct exynos_irq_chip exynos7_wkup_irq_chip __initconst = {
@@ -406,6 +452,7 @@ static const struct exynos_irq_chip exynos7_wkup_irq_chip __initconst = {
.eint_pend = EXYNOS7_WKUP_EPEND_OFFSET,
.eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED,
.eint_wake_mask_reg = EXYNOS5433_EINT_WAKEUP_MASK,
+ .set_eint_wakeup_mask = exynos_pinctrl_set_eint_wakeup_mask,
};
/* list of external wakeup controllers supported */
@@ -435,7 +482,7 @@ static void exynos_irq_eint0_15(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static inline void exynos_irq_demux_eint(unsigned long pend,
+static inline void exynos_irq_demux_eint(unsigned int pend,
struct irq_domain *domain)
{
unsigned int irq;
@@ -452,8 +499,8 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct exynos_muxed_weint_data *eintd = irq_desc_get_handler_data(desc);
- unsigned long pend;
- unsigned long mask;
+ unsigned int pend;
+ unsigned int mask;
int i;
chained_irq_enter(chip, desc);
@@ -582,27 +629,6 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
return 0;
}
-static void
-exynos_pinctrl_set_eint_wakeup_mask(struct samsung_pinctrl_drv_data *drvdata,
- struct exynos_irq_chip *irq_chip)
-{
- struct regmap *pmu_regs;
-
- if (!drvdata->retention_ctrl || !drvdata->retention_ctrl->priv) {
- dev_warn(drvdata->dev,
- "No retention data configured bank with external wakeup interrupt. Wake-up mask will not be set.\n");
- return;
- }
-
- pmu_regs = drvdata->retention_ctrl->priv;
- dev_info(drvdata->dev,
- "Setting external wakeup interrupt mask: 0x%x\n",
- irq_chip->eint_wake_mask_value);
-
- regmap_write(pmu_regs, irq_chip->eint_wake_mask_reg,
- irq_chip->eint_wake_mask_value);
-}
-
static void exynos_pinctrl_suspend_bank(
struct samsung_pinctrl_drv_data *drvdata,
struct samsung_pin_bank *bank)
@@ -616,10 +642,13 @@ static void exynos_pinctrl_suspend_bank(
+ 2 * bank->eint_offset);
save->eint_fltcon1 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ 2 * bank->eint_offset + 4);
+ save->eint_mask = readl(regs + bank->irq_chip->eint_mask
+ + bank->eint_offset);
pr_debug("%s: save con %#010x\n", bank->name, save->eint_con);
pr_debug("%s: save fltcon0 %#010x\n", bank->name, save->eint_fltcon0);
pr_debug("%s: save fltcon1 %#010x\n", bank->name, save->eint_fltcon1);
+ pr_debug("%s: save mask %#010x\n", bank->name, save->eint_mask);
}
void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata)
@@ -634,8 +663,8 @@ void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata)
else if (bank->eint_type == EINT_TYPE_WKUP) {
if (!irq_chip) {
irq_chip = bank->irq_chip;
- exynos_pinctrl_set_eint_wakeup_mask(drvdata,
- irq_chip);
+ irq_chip->set_eint_wakeup_mask(drvdata,
+ irq_chip);
} else if (bank->irq_chip != irq_chip) {
dev_warn(drvdata->dev,
"More than one external wakeup interrupt chip configured (bank: %s). This is not supported by hardware nor by driver.\n",
@@ -661,6 +690,9 @@ static void exynos_pinctrl_resume_bank(
pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name,
readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ 2 * bank->eint_offset + 4), save->eint_fltcon1);
+ pr_debug("%s: mask %#010x => %#010x\n", bank->name,
+ readl(regs + bank->irq_chip->eint_mask
+ + bank->eint_offset), save->eint_mask);
writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET
+ bank->eint_offset);
@@ -668,6 +700,8 @@ static void exynos_pinctrl_resume_bank(
+ 2 * bank->eint_offset);
writel(save->eint_fltcon1, regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ 2 * bank->eint_offset + 4);
+ writel(save->eint_mask, regs + bank->irq_chip->eint_mask
+ + bank->eint_offset);
}
void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 61aaaf58c599..ff9c2758d25e 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -1001,20 +1001,22 @@ static void sunxi_pinctrl_irq_handler(struct irq_desc *desc)
if (bank == pctl->desc->irq_banks)
return;
+ chained_irq_enter(chip, desc);
+
reg = sunxi_irq_status_reg_from_bank(pctl->desc, bank);
val = readl(pctl->membase + reg);
if (val) {
int irqoffset;
- chained_irq_enter(chip, desc);
for_each_set_bit(irqoffset, &val, IRQ_PER_BANK) {
int pin_irq = irq_find_mapping(pctl->domain,
bank * IRQ_PER_BANK + irqoffset);
generic_handle_irq(pin_irq);
}
- chained_irq_exit(chip, desc);
}
+
+ chained_irq_exit(chip, desc);
}
static int sunxi_pinctrl_add_function(struct sunxi_pinctrl *pctl,
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 1e2524de6a63..a13bb4ddd0cf 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -235,6 +235,7 @@ config FUJITSU_LAPTOP
depends on BACKLIGHT_CLASS_DEVICE
depends on ACPI_VIDEO || ACPI_VIDEO = n
select INPUT_SPARSEKMAP
+ select NEW_LEDS
select LEDS_CLASS
---help---
This is a driver for laptops built by Fujitsu:
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index fcfeadd1301f..bd25c8a156d2 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -43,6 +43,7 @@
#include <linux/input/sparse-keymap.h>
#include <acpi/video.h>
+ACPI_MODULE_NAME(KBUILD_MODNAME);
MODULE_AUTHOR("Carlos Corbacho");
MODULE_DESCRIPTION("Acer Laptop WMI Extras Driver");
MODULE_LICENSE("GPL");
@@ -93,7 +94,7 @@ MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026");
enum acer_wmi_event_ids {
WMID_HOTKEY_EVENT = 0x1,
- WMID_ACCEL_EVENT = 0x5,
+ WMID_ACCEL_OR_KBD_DOCK_EVENT = 0x5,
};
static const struct key_entry acer_wmi_keymap[] __initconst = {
@@ -124,6 +125,7 @@ static const struct key_entry acer_wmi_keymap[] __initconst = {
{KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */
{KE_IGNORE, 0x81, {KEY_SLEEP} },
{KE_KEY, 0x82, {KEY_TOUCHPAD_TOGGLE} }, /* Touch Pad Toggle */
+ {KE_IGNORE, 0x84, {KEY_KBDILLUMTOGGLE} }, /* Automatic Keyboard background light toggle */
{KE_KEY, KEY_TOUCHPAD_ON, {KEY_TOUCHPAD_ON} },
{KE_KEY, KEY_TOUCHPAD_OFF, {KEY_TOUCHPAD_OFF} },
{KE_IGNORE, 0x83, {KEY_TOUCHPAD_TOGGLE} },
@@ -140,7 +142,9 @@ struct event_return_value {
u8 function;
u8 key_num;
u16 device_state;
- u32 reserved;
+ u16 reserved1;
+ u8 kbd_dock_state;
+ u8 reserved2;
} __attribute__((packed));
/*
@@ -218,14 +222,13 @@ struct hotkey_function_type_aa {
/*
* Interface capability flags
*/
-#define ACER_CAP_MAILLED (1<<0)
-#define ACER_CAP_WIRELESS (1<<1)
-#define ACER_CAP_BLUETOOTH (1<<2)
-#define ACER_CAP_BRIGHTNESS (1<<3)
-#define ACER_CAP_THREEG (1<<4)
-#define ACER_CAP_ACCEL (1<<5)
-#define ACER_CAP_RFBTN (1<<6)
-#define ACER_CAP_ANY (0xFFFFFFFF)
+#define ACER_CAP_MAILLED BIT(0)
+#define ACER_CAP_WIRELESS BIT(1)
+#define ACER_CAP_BLUETOOTH BIT(2)
+#define ACER_CAP_BRIGHTNESS BIT(3)
+#define ACER_CAP_THREEG BIT(4)
+#define ACER_CAP_SET_FUNCTION_MODE BIT(5)
+#define ACER_CAP_KBD_DOCK BIT(6)
/*
* Interface type flags
@@ -248,6 +251,7 @@ static int mailled = -1;
static int brightness = -1;
static int threeg = -1;
static int force_series;
+static int force_caps = -1;
static bool ec_raw_mode;
static bool has_type_aa;
static u16 commun_func_bitmap;
@@ -257,11 +261,13 @@ module_param(mailled, int, 0444);
module_param(brightness, int, 0444);
module_param(threeg, int, 0444);
module_param(force_series, int, 0444);
+module_param(force_caps, int, 0444);
module_param(ec_raw_mode, bool, 0444);
MODULE_PARM_DESC(mailled, "Set initial state of Mail LED");
MODULE_PARM_DESC(brightness, "Set initial LCD backlight brightness");
MODULE_PARM_DESC(threeg, "Set initial state of 3G hardware");
MODULE_PARM_DESC(force_series, "Force a different laptop series");
+MODULE_PARM_DESC(force_caps, "Force the capability bitmask to this value");
MODULE_PARM_DESC(ec_raw_mode, "Enable EC raw mode");
struct acer_data {
@@ -332,6 +338,15 @@ static int __init dmi_matched(const struct dmi_system_id *dmi)
return 1;
}
+static int __init set_force_caps(const struct dmi_system_id *dmi)
+{
+ if (force_caps == -1) {
+ force_caps = (uintptr_t)dmi->driver_data;
+ pr_info("Found %s, set force_caps to 0x%x\n", dmi->ident, force_caps);
+ }
+ return 1;
+}
+
static struct quirk_entry quirk_unknown = {
};
@@ -510,6 +525,33 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
},
.driver_data = &quirk_acer_travelmate_2490,
},
+ {
+ .callback = set_force_caps,
+ .ident = "Acer Aspire Switch 10E SW3-016",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW3-016"),
+ },
+ .driver_data = (void *)ACER_CAP_KBD_DOCK,
+ },
+ {
+ .callback = set_force_caps,
+ .ident = "Acer Aspire Switch 10 SW5-012",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"),
+ },
+ .driver_data = (void *)ACER_CAP_KBD_DOCK,
+ },
+ {
+ .callback = set_force_caps,
+ .ident = "Acer One 10 (S1003)",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"),
+ },
+ .driver_data = (void *)ACER_CAP_KBD_DOCK,
+ },
{}
};
@@ -1265,10 +1307,8 @@ static void __init type_aa_dmi_decode(const struct dmi_header *header, void *d)
interface->capability |= ACER_CAP_THREEG;
if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_BLUETOOTH)
interface->capability |= ACER_CAP_BLUETOOTH;
- if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_RFBTN) {
- interface->capability |= ACER_CAP_RFBTN;
+ if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_RFBTN)
commun_func_bitmap &= ~ACER_WMID3_GDS_RFBTN;
- }
commun_fn_key_number = type_aa->commun_fn_key_number;
}
@@ -1529,7 +1569,7 @@ static int acer_gsensor_event(void)
struct acpi_buffer output;
union acpi_object out_obj[5];
- if (!has_cap(ACER_CAP_ACCEL))
+ if (!acer_wmi_accel_dev)
return -1;
output.length = sizeof(out_obj);
@@ -1553,6 +1593,71 @@ static int acer_gsensor_event(void)
}
/*
+ * Switch series keyboard dock status
+ */
+static int acer_kbd_dock_state_to_sw_tablet_mode(u8 kbd_dock_state)
+{
+ switch (kbd_dock_state) {
+ case 0x01: /* Docked, traditional clamshell laptop mode */
+ return 0;
+ case 0x04: /* Stand-alone tablet */
+ case 0x40: /* Docked, tent mode, keyboard not usable */
+ return 1;
+ default:
+ pr_warn("Unknown kbd_dock_state 0x%02x\n", kbd_dock_state);
+ }
+
+ return 0;
+}
+
+static void acer_kbd_dock_get_initial_state(void)
+{
+ u8 *output, input[8] = { 0x05, 0x00, };
+ struct acpi_buffer input_buf = { sizeof(input), input };
+ struct acpi_buffer output_buf = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+ int sw_tablet_mode;
+
+ status = wmi_evaluate_method(WMID_GUID3, 0, 0x2, &input_buf, &output_buf);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Error getting keyboard-dock initial status"));
+ return;
+ }
+
+ obj = output_buf.pointer;
+ if (!obj || obj->type != ACPI_TYPE_BUFFER || obj->buffer.length != 8) {
+ pr_err("Unexpected output format getting keyboard-dock initial status\n");
+ goto out_free_obj;
+ }
+
+ output = obj->buffer.pointer;
+ if (output[0] != 0x00 || (output[3] != 0x05 && output[3] != 0x45)) {
+ pr_err("Unexpected output [0]=0x%02x [3]=0x%02x getting keyboard-dock initial status\n",
+ output[0], output[3]);
+ goto out_free_obj;
+ }
+
+ sw_tablet_mode = acer_kbd_dock_state_to_sw_tablet_mode(output[4]);
+ input_report_switch(acer_wmi_input_dev, SW_TABLET_MODE, sw_tablet_mode);
+
+out_free_obj:
+ kfree(obj);
+}
+
+static void acer_kbd_dock_event(const struct event_return_value *event)
+{
+ int sw_tablet_mode;
+
+ if (!has_cap(ACER_CAP_KBD_DOCK))
+ return;
+
+ sw_tablet_mode = acer_kbd_dock_state_to_sw_tablet_mode(event->kbd_dock_state);
+ input_report_switch(acer_wmi_input_dev, SW_TABLET_MODE, sw_tablet_mode);
+ input_sync(acer_wmi_input_dev);
+}
+
+/*
* Rfkill devices
*/
static void acer_rfkill_update(struct work_struct *ignored);
@@ -1779,8 +1884,9 @@ static void acer_wmi_notify(u32 value, void *context)
sparse_keymap_report_event(acer_wmi_input_dev, scancode, 1, true);
}
break;
- case WMID_ACCEL_EVENT:
+ case WMID_ACCEL_OR_KBD_DOCK_EVENT:
acer_gsensor_event();
+ acer_kbd_dock_event(&return_value);
break;
default:
pr_warn("Unknown function number - %d - %d\n",
@@ -1938,8 +2044,6 @@ static int __init acer_wmi_accel_setup(void)
if (err)
return err;
- interface->capability |= ACER_CAP_ACCEL;
-
acer_wmi_accel_dev = input_allocate_device();
if (!acer_wmi_accel_dev)
return -ENOMEM;
@@ -1965,11 +2069,6 @@ err_free_dev:
return err;
}
-static void acer_wmi_accel_destroy(void)
-{
- input_unregister_device(acer_wmi_accel_dev);
-}
-
static int __init acer_wmi_input_setup(void)
{
acpi_status status;
@@ -1987,6 +2086,9 @@ static int __init acer_wmi_input_setup(void)
if (err)
goto err_free_dev;
+ if (has_cap(ACER_CAP_KBD_DOCK))
+ input_set_capability(acer_wmi_input_dev, EV_SW, SW_TABLET_MODE);
+
status = wmi_install_notify_handler(ACERWMID_EVENT_GUID,
acer_wmi_notify, NULL);
if (ACPI_FAILURE(status)) {
@@ -1994,6 +2096,9 @@ static int __init acer_wmi_input_setup(void)
goto err_free_dev;
}
+ if (has_cap(ACER_CAP_KBD_DOCK))
+ acer_kbd_dock_get_initial_state();
+
err = input_register_device(acer_wmi_input_dev);
if (err)
goto err_uninstall_notifier;
@@ -2124,7 +2229,7 @@ static int acer_resume(struct device *dev)
if (has_cap(ACER_CAP_BRIGHTNESS))
set_u32(data->brightness, ACER_CAP_BRIGHTNESS);
- if (has_cap(ACER_CAP_ACCEL))
+ if (acer_wmi_accel_dev)
acer_gsensor_init();
return 0;
@@ -2239,7 +2344,7 @@ static int __init acer_wmi_init(void)
}
/* WMID always provides brightness methods */
interface->capability |= ACER_CAP_BRIGHTNESS;
- } else if (!wmi_has_guid(WMID_GUID2) && interface && !has_type_aa) {
+ } else if (!wmi_has_guid(WMID_GUID2) && interface && !has_type_aa && force_caps == -1) {
pr_err("No WMID device detection method found\n");
return -ENODEV;
}
@@ -2269,7 +2374,14 @@ static int __init acer_wmi_init(void)
if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
interface->capability &= ~ACER_CAP_BRIGHTNESS;
- if (wmi_has_guid(WMID_GUID3)) {
+ if (wmi_has_guid(WMID_GUID3))
+ interface->capability |= ACER_CAP_SET_FUNCTION_MODE;
+
+ if (force_caps != -1)
+ interface->capability = force_caps;
+
+ if (wmi_has_guid(WMID_GUID3) &&
+ (interface->capability & ACER_CAP_SET_FUNCTION_MODE)) {
if (ACPI_FAILURE(acer_wmi_enable_rf_button()))
pr_warn("Cannot enable RF Button Driver\n");
@@ -2332,8 +2444,8 @@ error_device_alloc:
error_platform_register:
if (wmi_has_guid(ACERWMID_EVENT_GUID))
acer_wmi_input_destroy();
- if (has_cap(ACER_CAP_ACCEL))
- acer_wmi_accel_destroy();
+ if (acer_wmi_accel_dev)
+ input_unregister_device(acer_wmi_accel_dev);
return err;
}
@@ -2343,8 +2455,8 @@ static void __exit acer_wmi_exit(void)
if (wmi_has_guid(ACERWMID_EVENT_GUID))
acer_wmi_input_destroy();
- if (has_cap(ACER_CAP_ACCEL))
- acer_wmi_accel_destroy();
+ if (acer_wmi_accel_dev)
+ input_unregister_device(acer_wmi_accel_dev);
remove_debugfs();
platform_device_unregister(acer_platform_device);
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 59f3a37a44d7..8db2dc05b8cf 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -517,9 +517,33 @@ static struct asus_wmi_driver asus_nb_wmi_driver = {
.detect_quirks = asus_nb_wmi_quirks,
};
+static const struct dmi_system_id asus_nb_wmi_blacklist[] __initconst = {
+ {
+ /*
+ * asus-nb-wm adds no functionality. The T100TA has a detachable
+ * USB kbd, so no hotkeys and it has no WMI rfkill; and loading
+ * asus-nb-wm causes the camera LED to turn and _stay_ on.
+ */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"),
+ },
+ },
+ {
+ /* The Asus T200TA has the same issue as the T100TA */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T200TA"),
+ },
+ },
+ {} /* Terminating entry */
+};
static int __init asus_nb_wmi_init(void)
{
+ if (dmi_check_system(asus_nb_wmi_blacklist))
+ return -ENODEV;
+
return asus_wmi_register_driver(&asus_nb_wmi_driver);
}
diff --git a/drivers/platform/x86/dell-smbios-base.c b/drivers/platform/x86/dell-smbios-base.c
index 0537d44d45a6..9e9fc5155789 100644
--- a/drivers/platform/x86/dell-smbios-base.c
+++ b/drivers/platform/x86/dell-smbios-base.c
@@ -597,6 +597,7 @@ static int __init dell_smbios_init(void)
if (wmi && smm) {
pr_err("No SMBIOS backends available (wmi: %d, smm: %d)\n",
wmi, smm);
+ ret = -ENODEV;
goto fail_create_group;
}
diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c
index cf2229ece9ff..ccccce9b67ef 100644
--- a/drivers/platform/x86/dell-smbios-wmi.c
+++ b/drivers/platform/x86/dell-smbios-wmi.c
@@ -274,7 +274,8 @@ int init_dell_smbios_wmi(void)
void exit_dell_smbios_wmi(void)
{
- wmi_driver_unregister(&dell_smbios_wmi_driver);
+ if (wmi_supported)
+ wmi_driver_unregister(&dell_smbios_wmi_driver);
}
MODULE_ALIAS("wmi:" DELL_WMI_SMBIOS_GUID);
diff --git a/drivers/platform/x86/gpd-pocket-fan.c b/drivers/platform/x86/gpd-pocket-fan.c
index b471b86c28fe..5b516e4c2bfb 100644
--- a/drivers/platform/x86/gpd-pocket-fan.c
+++ b/drivers/platform/x86/gpd-pocket-fan.c
@@ -128,7 +128,7 @@ static int gpd_pocket_fan_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(temp_limits); i++) {
if (temp_limits[i] < 20000 || temp_limits[i] > 90000) {
- dev_err(&pdev->dev, "Invalid temp-limit %d (must be between 40000 and 70000)\n",
+ dev_err(&pdev->dev, "Invalid temp-limit %d (must be between 20000 and 90000)\n",
temp_limits[i]);
temp_limits[0] = TEMP_LIMIT0_DEFAULT;
temp_limits[1] = TEMP_LIMIT1_DEFAULT;
diff --git a/drivers/platform/x86/hp-wireless.c b/drivers/platform/x86/hp-wireless.c
index d6ea5e998fb8..bb95bec0b110 100644
--- a/drivers/platform/x86/hp-wireless.c
+++ b/drivers/platform/x86/hp-wireless.c
@@ -30,12 +30,14 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alex Hung");
MODULE_ALIAS("acpi*:HPQ6001:*");
MODULE_ALIAS("acpi*:WSTADEF:*");
+MODULE_ALIAS("acpi*:AMDI0051:*");
static struct input_dev *hpwl_input_dev;
static const struct acpi_device_id hpwl_ids[] = {
{"HPQ6001", 0},
{"WSTADEF", 0},
+ {"AMDI0051", 0},
{"", 0},
};
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 06a3c1ef8eee..93fadd4abf14 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -45,6 +45,10 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS("wmi:95F24279-4D7B-4334-9387-ACCDC67EF61C");
MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
+static int enable_tablet_mode_sw = -1;
+module_param(enable_tablet_mode_sw, int, 0444);
+MODULE_PARM_DESC(enable_tablet_mode_sw, "Enable SW_TABLET_MODE reporting (-1=auto, 0=no, 1=yes)");
+
#define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C"
#define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
@@ -474,8 +478,14 @@ static ssize_t postcode_show(struct device *dev, struct device_attribute *attr,
static ssize_t als_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- u32 tmp = simple_strtoul(buf, NULL, 10);
- int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, HPWMI_WRITE, &tmp,
+ u32 tmp;
+ int ret;
+
+ ret = kstrtou32(buf, 10, &tmp);
+ if (ret)
+ return ret;
+
+ ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, HPWMI_WRITE, &tmp,
sizeof(tmp), sizeof(tmp));
if (ret)
return ret < 0 ? ret : -EINVAL;
@@ -650,10 +660,12 @@ static int __init hp_wmi_input_setup(void)
}
/* Tablet mode */
- val = hp_wmi_hw_state(HPWMI_TABLET_MASK);
- if (!(val < 0)) {
- __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
- input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
+ if (enable_tablet_mode_sw > 0) {
+ val = hp_wmi_hw_state(HPWMI_TABLET_MASK);
+ if (val >= 0) {
+ __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
+ }
}
err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL);
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 7b12abe86b94..9c3c83ef445b 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -101,6 +101,9 @@ MODULE_DEVICE_TABLE(acpi, lis3lv02d_device_ids);
static int lis3lv02d_acpi_init(struct lis3lv02d *lis3)
{
struct acpi_device *dev = lis3->bus_priv;
+ if (!lis3->init_required)
+ return 0;
+
if (acpi_evaluate_object(dev->handle, METHOD_NAME__INI,
NULL, NULL) != AE_OK)
return -EINVAL;
@@ -367,6 +370,7 @@ static int lis3lv02d_add(struct acpi_device *device)
}
/* call the core layer do its init */
+ lis3_dev.init_required = true;
ret = lis3lv02d_init_device(&lis3_dev);
if (ret)
return ret;
@@ -414,11 +418,27 @@ static int lis3lv02d_suspend(struct device *dev)
static int lis3lv02d_resume(struct device *dev)
{
+ lis3_dev.init_required = false;
+ lis3lv02d_poweron(&lis3_dev);
+ return 0;
+}
+
+static int lis3lv02d_restore(struct device *dev)
+{
+ lis3_dev.init_required = true;
lis3lv02d_poweron(&lis3_dev);
return 0;
}
-static SIMPLE_DEV_PM_OPS(hp_accel_pm, lis3lv02d_suspend, lis3lv02d_resume);
+static const struct dev_pm_ops hp_accel_pm = {
+ .suspend = lis3lv02d_suspend,
+ .resume = lis3lv02d_resume,
+ .freeze = lis3lv02d_suspend,
+ .thaw = lis3lv02d_resume,
+ .poweroff = lis3lv02d_suspend,
+ .restore = lis3lv02d_restore,
+};
+
#define HP_ACCEL_PM (&hp_accel_pm)
#else
#define HP_ACCEL_PM NULL
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index 3201a83073b5..fa3cda69cec9 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -87,6 +87,20 @@ static const struct dmi_system_id button_array_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Wacom MobileStudio Pro 16"),
},
},
+ {
+ .ident = "HP Spectre x2 (2015)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x2 Detachable"),
+ },
+ },
+ {
+ .ident = "Lenovo ThinkPad X1 Tablet Gen 2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Tablet Gen 2"),
+ },
+ },
{ }
};
@@ -557,7 +571,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK;
if (acpi_match_device_ids(dev, ids) == 0)
- if (acpi_create_platform_device(dev, NULL))
+ if (!IS_ERR_OR_NULL(acpi_create_platform_device(dev, NULL)))
dev_info(&dev->dev,
"intel-hid: created platform device\n");
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
index a0d0cecff55f..772f9ae3d7e9 100644
--- a/drivers/platform/x86/intel-vbtn.c
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -15,9 +15,13 @@
#include <linux/platform_device.h>
#include <linux/suspend.h>
+/* Returned when NOT in tablet mode on some HP Stream x360 11 models */
+#define VGBS_TABLET_MODE_FLAG_ALT 0x10
/* When NOT in tablet mode, VGBS returns with the flag 0x40 */
-#define TABLET_MODE_FLAG 0x40
-#define DOCK_MODE_FLAG 0x80
+#define VGBS_TABLET_MODE_FLAG 0x40
+#define VGBS_DOCK_MODE_FLAG 0x80
+
+#define VGBS_TABLET_MODE_FLAGS (VGBS_TABLET_MODE_FLAG | VGBS_TABLET_MODE_FLAG_ALT)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("AceLan Kao");
@@ -39,28 +43,59 @@ static const struct key_entry intel_vbtn_keymap[] = {
{ KE_IGNORE, 0xC7, { KEY_VOLUMEDOWN } }, /* volume-down key release */
{ KE_KEY, 0xC8, { KEY_ROTATE_LOCK_TOGGLE } }, /* rotate-lock key press */
{ KE_KEY, 0xC9, { KEY_ROTATE_LOCK_TOGGLE } }, /* rotate-lock key release */
- { KE_SW, 0xCA, { .sw = { SW_DOCK, 1 } } }, /* Docked */
- { KE_SW, 0xCB, { .sw = { SW_DOCK, 0 } } }, /* Undocked */
+};
+
+static const struct key_entry intel_vbtn_switchmap[] = {
+ /*
+ * SW_DOCK should only be reported for docking stations, but DSDTs using the
+ * intel-vbtn code, always seem to use this for 2-in-1s / convertibles and set
+ * SW_DOCK=1 when in laptop-mode (in tandem with setting SW_TABLET_MODE=0).
+ * This causes userspace to think the laptop is docked to a port-replicator
+ * and to disable suspend-on-lid-close, which is undesirable.
+ * Map the dock events to KEY_IGNORE to avoid this broken SW_DOCK reporting.
+ */
+ { KE_IGNORE, 0xCA, { .sw = { SW_DOCK, 1 } } }, /* Docked */
+ { KE_IGNORE, 0xCB, { .sw = { SW_DOCK, 0 } } }, /* Undocked */
{ KE_SW, 0xCC, { .sw = { SW_TABLET_MODE, 1 } } }, /* Tablet */
{ KE_SW, 0xCD, { .sw = { SW_TABLET_MODE, 0 } } }, /* Laptop */
- { KE_END },
};
+#define KEYMAP_LEN \
+ (ARRAY_SIZE(intel_vbtn_keymap) + ARRAY_SIZE(intel_vbtn_switchmap) + 1)
+
struct intel_vbtn_priv {
+ struct key_entry keymap[KEYMAP_LEN];
struct input_dev *input_dev;
+ bool has_switches;
bool wakeup_mode;
};
static int intel_vbtn_input_setup(struct platform_device *device)
{
struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
- int ret;
+ int ret, keymap_len = 0;
+
+ if (true) {
+ memcpy(&priv->keymap[keymap_len], intel_vbtn_keymap,
+ ARRAY_SIZE(intel_vbtn_keymap) *
+ sizeof(struct key_entry));
+ keymap_len += ARRAY_SIZE(intel_vbtn_keymap);
+ }
+
+ if (priv->has_switches) {
+ memcpy(&priv->keymap[keymap_len], intel_vbtn_switchmap,
+ ARRAY_SIZE(intel_vbtn_switchmap) *
+ sizeof(struct key_entry));
+ keymap_len += ARRAY_SIZE(intel_vbtn_switchmap);
+ }
+
+ priv->keymap[keymap_len].type = KE_END;
priv->input_dev = devm_input_allocate_device(&device->dev);
if (!priv->input_dev)
return -ENOMEM;
- ret = sparse_keymap_setup(priv->input_dev, intel_vbtn_keymap, NULL);
+ ret = sparse_keymap_setup(priv->input_dev, priv->keymap, NULL);
if (ret)
return ret;
@@ -115,31 +150,86 @@ out_unknown:
static void detect_tablet_mode(struct platform_device *device)
{
- const char *chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
acpi_handle handle = ACPI_HANDLE(&device->dev);
- struct acpi_buffer vgbs_output = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *obj;
+ unsigned long long vgbs;
acpi_status status;
int m;
- if (!(chassis_type && strcmp(chassis_type, "31") == 0))
- goto out;
-
- status = acpi_evaluate_object(handle, "VGBS", NULL, &vgbs_output);
+ status = acpi_evaluate_integer(handle, "VGBS", NULL, &vgbs);
if (ACPI_FAILURE(status))
- goto out;
-
- obj = vgbs_output.pointer;
- if (!(obj && obj->type == ACPI_TYPE_INTEGER))
- goto out;
+ return;
- m = !(obj->integer.value & TABLET_MODE_FLAG);
+ m = !(vgbs & VGBS_TABLET_MODE_FLAGS);
input_report_switch(priv->input_dev, SW_TABLET_MODE, m);
- m = (obj->integer.value & DOCK_MODE_FLAG) ? 1 : 0;
+ m = (vgbs & VGBS_DOCK_MODE_FLAG) ? 1 : 0;
input_report_switch(priv->input_dev, SW_DOCK, m);
-out:
- kfree(vgbs_output.pointer);
+}
+
+/*
+ * There are several laptops (non 2-in-1) models out there which support VGBS,
+ * but simply always return 0, which we translate to SW_TABLET_MODE=1. This in
+ * turn causes userspace (libinput) to suppress events from the builtin
+ * keyboard and touchpad, making the laptop essentially unusable.
+ *
+ * Since the problem of wrongly reporting SW_TABLET_MODE=1 in combination
+ * with libinput, leads to a non-usable system. Where as OTOH many people will
+ * not even notice when SW_TABLET_MODE is not being reported, a DMI based allow
+ * list is used here. This list mainly matches on the chassis-type of 2-in-1s.
+ *
+ * There are also some 2-in-1s which use the intel-vbtn ACPI interface to report
+ * SW_TABLET_MODE with a chassis-type of 8 ("Portable") or 10 ("Notebook"),
+ * these are matched on a per model basis, since many normal laptops with a
+ * possible broken VGBS ACPI-method also use these chassis-types.
+ */
+static const struct dmi_system_id dmi_switches_allow_list[] = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "31" /* Convertible */),
+ },
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "32" /* Detachable */),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7130"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion 13 x360 PC"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7352"),
+ },
+ },
+ {} /* Array terminator */
+};
+
+static bool intel_vbtn_has_switches(acpi_handle handle)
+{
+ unsigned long long vgbs;
+ acpi_status status;
+
+ if (!dmi_check_system(dmi_switches_allow_list))
+ return false;
+
+ status = acpi_evaluate_integer(handle, "VGBS", NULL, &vgbs);
+ return ACPI_SUCCESS(status);
}
static int intel_vbtn_probe(struct platform_device *device)
@@ -160,13 +250,16 @@ static int intel_vbtn_probe(struct platform_device *device)
return -ENOMEM;
dev_set_drvdata(&device->dev, priv);
+ priv->has_switches = intel_vbtn_has_switches(handle);
+
err = intel_vbtn_input_setup(device);
if (err) {
pr_err("Failed to setup Intel Virtual Button\n");
return err;
}
- detect_tablet_mode(device);
+ if (priv->has_switches)
+ detect_tablet_mode(device);
status = acpi_install_notify_handler(handle,
ACPI_DEVICE_NOTIFY,
@@ -237,7 +330,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK;
if (acpi_match_device_ids(dev, ids) == 0)
- if (acpi_create_platform_device(dev, NULL))
+ if (!IS_ERR_OR_NULL(acpi_create_platform_device(dev, NULL)))
dev_info(&dev->dev,
"intel-vbtn: created platform device\n");
diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
index 2efeab650345..d6a7039a0591 100644
--- a/drivers/platform/x86/intel_punit_ipc.c
+++ b/drivers/platform/x86/intel_punit_ipc.c
@@ -331,6 +331,7 @@ static const struct acpi_device_id punit_ipc_acpi_ids[] = {
{ "INT34D4", 0 },
{ }
};
+MODULE_DEVICE_TABLE(acpi, punit_ipc_acpi_ids);
static struct platform_driver intel_punit_ipc_driver = {
.probe = intel_punit_ipc_probe,
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 69e28c12d591..b2a196a2b6c7 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -212,24 +212,6 @@ static struct i2c_mux_reg_platform_data mlxplat_mux_data[] = {
};
/* Platform hotplug devices */
-static struct i2c_board_info mlxplat_mlxcpld_psu[] = {
- {
- I2C_BOARD_INFO("24c02", 0x51),
- },
- {
- I2C_BOARD_INFO("24c02", 0x50),
- },
-};
-
-static struct i2c_board_info mlxplat_mlxcpld_ng_psu[] = {
- {
- I2C_BOARD_INFO("24c32", 0x51),
- },
- {
- I2C_BOARD_INFO("24c32", 0x50),
- },
-};
-
static struct i2c_board_info mlxplat_mlxcpld_pwr[] = {
{
I2C_BOARD_INFO("dps460", 0x59),
@@ -260,15 +242,13 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_psu_items_data[] = {
.label = "psu1",
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = BIT(0),
- .hpdev.brdinfo = &mlxplat_mlxcpld_psu[0],
- .hpdev.nr = MLXPLAT_CPLD_PSU_DEFAULT_NR,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "psu2",
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = BIT(1),
- .hpdev.brdinfo = &mlxplat_mlxcpld_psu[1],
- .hpdev.nr = MLXPLAT_CPLD_PSU_DEFAULT_NR,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
};
@@ -335,7 +315,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = {
.aggr_mask = MLXPLAT_CPLD_AGGR_PSU_MASK_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = MLXPLAT_CPLD_PSU_MASK,
- .count = ARRAY_SIZE(mlxplat_mlxcpld_psu),
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_psu_items_data),
.inversed = 1,
.health = false,
},
@@ -344,7 +324,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = {
.aggr_mask = MLXPLAT_CPLD_AGGR_PWR_MASK_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = MLXPLAT_CPLD_PWR_MASK,
- .count = ARRAY_SIZE(mlxplat_mlxcpld_pwr),
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_pwr_items_data),
.inversed = 0,
.health = false,
},
@@ -353,7 +333,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = {
.aggr_mask = MLXPLAT_CPLD_AGGR_FAN_MASK_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = MLXPLAT_CPLD_FAN_MASK,
- .count = ARRAY_SIZE(mlxplat_mlxcpld_fan),
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_fan_items_data),
.inversed = 1,
.health = false,
},
@@ -431,15 +411,13 @@ static struct mlxreg_core_data mlxplat_mlxcpld_msn274x_psu_items_data[] = {
.label = "psu1",
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = BIT(0),
- .hpdev.brdinfo = &mlxplat_mlxcpld_psu[0],
- .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "psu2",
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = BIT(1),
- .hpdev.brdinfo = &mlxplat_mlxcpld_psu[1],
- .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
};
@@ -589,15 +567,13 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_psu_items_data[] = {
.label = "psu1",
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = BIT(0),
- .hpdev.brdinfo = &mlxplat_mlxcpld_ng_psu[0],
- .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "psu2",
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = BIT(1),
- .hpdev.brdinfo = &mlxplat_mlxcpld_ng_psu[1],
- .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
};
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
index 26351e9e0aaf..682fc49d172c 100644
--- a/drivers/platform/x86/pmc_atom.c
+++ b/drivers/platform/x86/pmc_atom.c
@@ -423,34 +423,10 @@ static const struct dmi_system_id critclk_systems[] = {
},
{
/* pmc_plt_clk* - are used for ethernet controllers */
- .ident = "Beckhoff CB3163",
+ .ident = "Beckhoff Baytrail",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
- DMI_MATCH(DMI_BOARD_NAME, "CB3163"),
- },
- },
- {
- /* pmc_plt_clk* - are used for ethernet controllers */
- .ident = "Beckhoff CB4063",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
- DMI_MATCH(DMI_BOARD_NAME, "CB4063"),
- },
- },
- {
- /* pmc_plt_clk* - are used for ethernet controllers */
- .ident = "Beckhoff CB6263",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
- DMI_MATCH(DMI_BOARD_NAME, "CB6263"),
- },
- },
- {
- /* pmc_plt_clk* - are used for ethernet controllers */
- .ident = "Beckhoff CB6363",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
- DMI_MATCH(DMI_BOARD_NAME, "CB6363"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "CBxx63"),
},
},
{
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 8f85bb4fe784..35c7d3185fea 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -2597,7 +2597,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
*/
static int hotkey_kthread(void *data)
{
- struct tp_nvram_state s[2];
+ struct tp_nvram_state s[2] = { 0 };
u32 poll_mask, event_mask;
unsigned int si, so;
unsigned long t;
@@ -3242,7 +3242,14 @@ static int hotkey_init_tablet_mode(void)
in_tablet_mode = hotkey_gmms_get_tablet_mode(res,
&has_tablet_mode);
- if (has_tablet_mode)
+ /*
+ * The Yoga 11e series has 2 accelerometers described by a
+ * BOSC0200 ACPI node. This setup relies on a Windows service
+ * which calls special ACPI methods on this node to report
+ * the laptop/tent/tablet mode to the EC. The bmc150 iio driver
+ * does not support this, so skip the hotkey on these models.
+ */
+ if (has_tablet_mode && !acpi_dev_present("BOSC0200", "1", -1))
tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_GMMS;
type = "GMMS";
} else if (acpi_evalf(hkey_handle, &res, "MHKG", "qd")) {
@@ -4095,13 +4102,19 @@ static bool hotkey_notify_6xxx(const u32 hkey,
case TP_HKEY_EV_KEY_NUMLOCK:
case TP_HKEY_EV_KEY_FN:
- case TP_HKEY_EV_KEY_FN_ESC:
/* key press events, we just ignore them as long as the EC
* is still reporting them in the normal keyboard stream */
*send_acpi_ev = false;
*ignore_acpi_ev = true;
return true;
+ case TP_HKEY_EV_KEY_FN_ESC:
+ /* Get the media key status to foce the status LED to update */
+ acpi_evalf(hkey_handle, NULL, "GMKS", "v");
+ *send_acpi_ev = false;
+ *ignore_acpi_ev = true;
+ return true;
+
case TP_HKEY_EV_TABLET_CHANGED:
tpacpi_input_send_tabletsw();
hotkey_tablet_mode_notify_change();
@@ -4251,6 +4264,7 @@ static void hotkey_resume(void)
pr_err("error while attempting to reset the event firmware interface\n");
tpacpi_send_radiosw_update();
+ tpacpi_input_send_tabletsw();
hotkey_tablet_mode_notify_change();
hotkey_wakeup_reason_notify_change();
hotkey_wakeup_hotunplug_complete_notify_change();
@@ -6286,6 +6300,7 @@ enum thermal_access_mode {
enum { /* TPACPI_THERMAL_TPEC_* */
TP_EC_THERMAL_TMP0 = 0x78, /* ACPI EC regs TMP 0..7 */
TP_EC_THERMAL_TMP8 = 0xC0, /* ACPI EC regs TMP 8..15 */
+ TP_EC_FUNCREV = 0xEF, /* ACPI EC Functional revision */
TP_EC_THERMAL_TMP_NA = -128, /* ACPI EC sensor not available */
TPACPI_THERMAL_SENSOR_NA = -128000, /* Sensor not available */
@@ -6484,7 +6499,7 @@ static const struct attribute_group thermal_temp_input8_group = {
static int __init thermal_init(struct ibm_init_struct *iibm)
{
- u8 t, ta1, ta2;
+ u8 t, ta1, ta2, ver = 0;
int i;
int acpi_tmp7;
int res;
@@ -6499,7 +6514,14 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
* 0x78-0x7F, 0xC0-0xC7. Registers return 0x00 for
* non-implemented, thermal sensors return 0x80 when
* not available
+ * The above rule is unfortunately flawed. This has been seen with
+ * 0xC2 (power supply ID) causing thermal control problems.
+ * The EC version can be determined by offset 0xEF and at least for
+ * version 3 the Lenovo firmware team confirmed that registers 0xC0-0xC7
+ * are not thermal registers.
*/
+ if (!acpi_ec_read(TP_EC_FUNCREV, &ver))
+ pr_warn("Thinkpad ACPI EC unable to access EC version\n");
ta1 = ta2 = 0;
for (i = 0; i < 8; i++) {
@@ -6509,11 +6531,13 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
ta1 = 0;
break;
}
- if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) {
- ta2 |= t;
- } else {
- ta1 = 0;
- break;
+ if (ver < 3) {
+ if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) {
+ ta2 |= t;
+ } else {
+ ta1 = 0;
+ break;
+ }
}
}
if (ta1 == 0) {
@@ -6526,9 +6550,12 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
thermal_read_mode = TPACPI_THERMAL_NONE;
}
} else {
- thermal_read_mode =
- (ta2 != 0) ?
- TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8;
+ if (ver >= 3)
+ thermal_read_mode = TPACPI_THERMAL_TPEC_8;
+ else
+ thermal_read_mode =
+ (ta2 != 0) ?
+ TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8;
}
} else if (acpi_tmp7) {
if (tpacpi_is_ibm() &&
@@ -6879,8 +6906,10 @@ static int __init tpacpi_query_bcl_levels(acpi_handle handle)
list_for_each_entry(child, &device->children, node) {
acpi_status status = acpi_evaluate_object(child->handle, "_BCL",
NULL, &buffer);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
+ buffer.length = ACPI_ALLOCATE_BUFFER;
continue;
+ }
obj = (union acpi_object *)buffer.pointer;
if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
@@ -9687,6 +9716,7 @@ static const struct tpacpi_quirk battery_quirk_table[] __initconst = {
TPACPI_Q_LNV3('R', '0', 'B', true), /* Thinkpad 11e gen 3 */
TPACPI_Q_LNV3('R', '0', 'C', true), /* Thinkpad 13 */
TPACPI_Q_LNV3('R', '0', 'J', true), /* Thinkpad 13 gen 2 */
+ TPACPI_Q_LNV3('R', '0', 'K', true), /* Thinkpad 11e gen 4 celeron BIOS */
};
static int __init tpacpi_battery_init(struct ibm_init_struct *ibm)
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index e366977bda41..8c3e9bac4754 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -1497,7 +1497,7 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
char *buffer;
char *cmd;
- int lcd_out, crt_out, tv_out;
+ int lcd_out = -1, crt_out = -1, tv_out = -1;
int remain = count;
int value;
int ret;
@@ -1529,7 +1529,6 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
kfree(cmd);
- lcd_out = crt_out = tv_out = -1;
ret = get_video_status(dev, &video_out);
if (!ret) {
unsigned int new_video_out = video_out;
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index cb204f973491..f122a0263a1b 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -163,6 +163,16 @@ static const struct ts_dmi_data digma_citi_e200_data = {
.properties = digma_citi_e200_props,
};
+static const struct property_entry estar_beauty_hd_props[] = {
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ { }
+};
+
+static const struct ts_dmi_data estar_beauty_hd_data = {
+ .acpi_name = "GDIX1001:00",
+ .properties = estar_beauty_hd_props,
+};
+
static const struct property_entry gp_electronic_t701_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 960),
PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
@@ -502,6 +512,14 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Estar Beauty HD (MID 7316R) */
+ .driver_data = (void *)&estar_beauty_hd_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Estar"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "eSTAR BEAUTY HD Intel Quad core"),
+ },
+ },
+ {
/* GP-electronic T701 */
.driver_data = (void *)&gp_electronic_t701_data,
.matches = {
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
index d9493e893d64..7cf59e764ef2 100644
--- a/drivers/power/reset/at91-sama5d2_shdwc.c
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -36,7 +36,7 @@
#define AT91_SHDW_MR 0x04 /* Shut Down Mode Register */
#define AT91_SHDW_WKUPDBC_SHIFT 24
-#define AT91_SHDW_WKUPDBC_MASK GENMASK(31, 16)
+#define AT91_SHDW_WKUPDBC_MASK GENMASK(26, 24)
#define AT91_SHDW_WKUPDBC(x) (((x) << AT91_SHDW_WKUPDBC_SHIFT) \
& AT91_SHDW_WKUPDBC_MASK)
diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c
index e9e749f87517..8fb43c4438e6 100644
--- a/drivers/power/reset/vexpress-poweroff.c
+++ b/drivers/power/reset/vexpress-poweroff.c
@@ -150,6 +150,7 @@ static struct platform_driver vexpress_reset_driver = {
.driver = {
.name = "vexpress-reset",
.of_match_table = vexpress_reset_of_match,
+ .suppress_bind_attrs = true,
},
};
diff --git a/drivers/power/supply/88pm860x_battery.c b/drivers/power/supply/88pm860x_battery.c
index 63c57dc82ac1..4eda5065b5bb 100644
--- a/drivers/power/supply/88pm860x_battery.c
+++ b/drivers/power/supply/88pm860x_battery.c
@@ -436,7 +436,7 @@ static void pm860x_init_battery(struct pm860x_battery_info *info)
int ret;
int data;
int bat_remove;
- int soc;
+ int soc = 0;
/* measure enable on GPADC1 */
data = MEAS1_GP1;
@@ -499,7 +499,9 @@ static void pm860x_init_battery(struct pm860x_battery_info *info)
}
mutex_unlock(&info->lock);
- calc_soc(info, OCV_MODE_ACTIVE, &soc);
+ ret = calc_soc(info, OCV_MODE_ACTIVE, &soc);
+ if (ret < 0)
+ goto out;
data = pm860x_reg_read(info->i2c, PM8607_POWER_UP_LOG);
bat_remove = data & BAT_WU_LOG;
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index ff6dab0bf0dd..76c699b5abda 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -555,7 +555,7 @@ config CHARGER_BQ24257
tristate "TI BQ24250/24251/24257 battery charger driver"
depends on I2C
depends on GPIOLIB || COMPILE_TEST
- depends on REGMAP_I2C
+ select REGMAP_I2C
help
Say Y to enable support for the TI BQ24250, BQ24251, and BQ24257 battery
chargers.
diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
index 46eb7716c35c..84106a9836c8 100644
--- a/drivers/power/supply/axp288_charger.c
+++ b/drivers/power/supply/axp288_charger.c
@@ -555,14 +555,15 @@ out:
/*
* The HP Pavilion x2 10 series comes in a number of variants:
- * Bay Trail SoC + AXP288 PMIC, DMI_BOARD_NAME: "815D"
- * Cherry Trail SoC + AXP288 PMIC, DMI_BOARD_NAME: "813E"
- * Cherry Trail SoC + TI PMIC, DMI_BOARD_NAME: "827C" or "82F4"
+ * Bay Trail SoC + AXP288 PMIC, Micro-USB, DMI_BOARD_NAME: "8021"
+ * Bay Trail SoC + AXP288 PMIC, Type-C, DMI_BOARD_NAME: "815D"
+ * Cherry Trail SoC + AXP288 PMIC, Type-C, DMI_BOARD_NAME: "813E"
+ * Cherry Trail SoC + TI PMIC, Type-C, DMI_BOARD_NAME: "827C" or "82F4"
*
- * The variants with the AXP288 PMIC are all kinds of special:
+ * The variants with the AXP288 + Type-C connector are all kinds of special:
*
- * 1. All variants use a Type-C connector which the AXP288 does not support, so
- * when using a Type-C charger it is not recognized. Unlike most AXP288 devices,
+ * 1. They use a Type-C connector which the AXP288 does not support, so when
+ * using a Type-C charger it is not recognized. Unlike most AXP288 devices,
* this model actually has mostly working ACPI AC / Battery code, the ACPI code
* "solves" this by simply setting the input_current_limit to 3A.
* There are still some issues with the ACPI code, so we use this native driver,
@@ -585,12 +586,17 @@ out:
*/
static const struct dmi_system_id axp288_hp_x2_dmi_ids[] = {
{
- /*
- * Bay Trail model has "Hewlett-Packard" as sys_vendor, Cherry
- * Trail model has "HP", so we only match on product_name.
- */
.matches = {
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "815D"),
+ },
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "813E"),
},
},
{} /* Terminating entry */
diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
index ab0b6e78ca02..157cf5ec6b02 100644
--- a/drivers/power/supply/axp288_fuel_gauge.c
+++ b/drivers/power/supply/axp288_fuel_gauge.c
@@ -718,14 +718,14 @@ static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = {
{
/* Intel Cherry Trail Compute Stick, Windows version */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
DMI_MATCH(DMI_PRODUCT_NAME, "STK1AW32SC"),
},
},
{
/* Intel Cherry Trail Compute Stick, version without an OS */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
DMI_MATCH(DMI_PRODUCT_NAME, "STK1A32SC"),
},
},
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index b58df04d03b3..863208928cf0 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -446,8 +446,10 @@ static ssize_t bq24190_sysfs_show(struct device *dev,
return -EINVAL;
ret = pm_runtime_get_sync(bdi->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(bdi->dev);
return ret;
+ }
ret = bq24190_read_mask(bdi, info->reg, info->mask, info->shift, &v);
if (ret)
@@ -1092,8 +1094,10 @@ static int bq24190_charger_get_property(struct power_supply *psy,
dev_dbg(bdi->dev, "prop: %d\n", psp);
ret = pm_runtime_get_sync(bdi->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(bdi->dev);
return ret;
+ }
switch (psp) {
case POWER_SUPPLY_PROP_CHARGE_TYPE:
@@ -1164,8 +1168,10 @@ static int bq24190_charger_set_property(struct power_supply *psy,
dev_dbg(bdi->dev, "prop: %d\n", psp);
ret = pm_runtime_get_sync(bdi->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(bdi->dev);
return ret;
+ }
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
@@ -1425,8 +1431,10 @@ static int bq24190_battery_get_property(struct power_supply *psy,
dev_dbg(bdi->dev, "prop: %d\n", psp);
ret = pm_runtime_get_sync(bdi->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(bdi->dev);
return ret;
+ }
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
@@ -1471,8 +1479,10 @@ static int bq24190_battery_set_property(struct power_supply *psy,
dev_dbg(bdi->dev, "prop: %d\n", psp);
ret = pm_runtime_get_sync(bdi->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(bdi->dev);
return ret;
+ }
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index f022e1b550df..b7dc88126866 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -1491,27 +1491,6 @@ static int bq27xxx_battery_read_time(struct bq27xxx_device_info *di, u8 reg)
}
/*
- * Read an average power register.
- * Return < 0 if something fails.
- */
-static int bq27xxx_battery_read_pwr_avg(struct bq27xxx_device_info *di)
-{
- int tval;
-
- tval = bq27xxx_read(di, BQ27XXX_REG_AP, false);
- if (tval < 0) {
- dev_err(di->dev, "error reading average power register %02x: %d\n",
- BQ27XXX_REG_AP, tval);
- return tval;
- }
-
- if (di->opts & BQ27XXX_O_ZERO)
- return (tval * BQ27XXX_POWER_CONSTANT) / BQ27XXX_RS;
- else
- return tval;
-}
-
-/*
* Returns true if a battery over temperature condition is detected
*/
static bool bq27xxx_battery_overtemp(struct bq27xxx_device_info *di, u16 flags)
@@ -1607,8 +1586,6 @@ void bq27xxx_battery_update(struct bq27xxx_device_info *di)
}
if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR)
cache.cycle_count = bq27xxx_battery_read_cyct(di);
- if (di->regs[BQ27XXX_REG_AP] != INVALID_REG_ADDR)
- cache.power_avg = bq27xxx_battery_read_pwr_avg(di);
/* We only have to read charge design full once */
if (di->charge_design_full <= 0)
@@ -1670,6 +1647,32 @@ static int bq27xxx_battery_current(struct bq27xxx_device_info *di,
return 0;
}
+/*
+ * Get the average power in µW
+ * Return < 0 if something fails.
+ */
+static int bq27xxx_battery_pwr_avg(struct bq27xxx_device_info *di,
+ union power_supply_propval *val)
+{
+ int power;
+
+ power = bq27xxx_read(di, BQ27XXX_REG_AP, false);
+ if (power < 0) {
+ dev_err(di->dev,
+ "error reading average power register %02x: %d\n",
+ BQ27XXX_REG_AP, power);
+ return power;
+ }
+
+ if (di->opts & BQ27XXX_O_ZERO)
+ val->intval = (power * BQ27XXX_POWER_CONSTANT) / BQ27XXX_RS;
+ else
+ /* Other gauges return a signed value in units of 10mW */
+ val->intval = (int)((s16)power) * 10000;
+
+ return 0;
+}
+
static int bq27xxx_battery_status(struct bq27xxx_device_info *di,
union power_supply_propval *val)
{
@@ -1680,8 +1683,6 @@ static int bq27xxx_battery_status(struct bq27xxx_device_info *di,
status = POWER_SUPPLY_STATUS_FULL;
else if (di->cache.flags & BQ27000_FLAG_CHGS)
status = POWER_SUPPLY_STATUS_CHARGING;
- else if (power_supply_am_i_supplied(di->bat) > 0)
- status = POWER_SUPPLY_STATUS_NOT_CHARGING;
else
status = POWER_SUPPLY_STATUS_DISCHARGING;
} else {
@@ -1693,6 +1694,10 @@ static int bq27xxx_battery_status(struct bq27xxx_device_info *di,
status = POWER_SUPPLY_STATUS_CHARGING;
}
+ if ((status == POWER_SUPPLY_STATUS_DISCHARGING) &&
+ (power_supply_am_i_supplied(di->bat) > 0))
+ status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+
val->intval = status;
return 0;
@@ -1835,7 +1840,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
ret = bq27xxx_simple_value(di->cache.energy, val);
break;
case POWER_SUPPLY_PROP_POWER_AVG:
- ret = bq27xxx_simple_value(di->cache.power_avg, val);
+ ret = bq27xxx_battery_pwr_avg(di, val);
break;
case POWER_SUPPLY_PROP_HEALTH:
ret = bq27xxx_simple_value(di->cache.health, val);
@@ -1887,7 +1892,10 @@ int bq27xxx_battery_setup(struct bq27xxx_device_info *di)
di->bat = power_supply_register_no_ws(di->dev, psy_desc, &psy_cfg);
if (IS_ERR(di->bat)) {
- dev_err(di->dev, "failed to register battery\n");
+ if (PTR_ERR(di->bat) == -EPROBE_DEFER)
+ dev_dbg(di->dev, "failed to register battery, deferring probe\n");
+ else
+ dev_err(di->dev, "failed to register battery\n");
return PTR_ERR(di->bat);
}
diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c
index bc462d1ec963..97b0e873e87d 100644
--- a/drivers/power/supply/generic-adc-battery.c
+++ b/drivers/power/supply/generic-adc-battery.c
@@ -382,7 +382,7 @@ static int gab_remove(struct platform_device *pdev)
}
kfree(adc_bat->psy_desc.properties);
- cancel_delayed_work(&adc_bat->bat_work);
+ cancel_delayed_work_sync(&adc_bat->bat_work);
return 0;
}
diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c
index 0f3432795f3c..6dcabbeccde1 100644
--- a/drivers/power/supply/lp8788-charger.c
+++ b/drivers/power/supply/lp8788-charger.c
@@ -529,7 +529,7 @@ static int lp8788_set_irqs(struct platform_device *pdev,
ret = request_threaded_irq(virq, NULL,
lp8788_charger_irq_thread,
- 0, name, pchg);
+ IRQF_ONESHOT, name, pchg);
if (ret)
break;
}
@@ -600,27 +600,14 @@ static void lp8788_setup_adc_channel(struct device *dev,
return;
/* ADC channel for battery voltage */
- chan = iio_channel_get(dev, pdata->adc_vbatt);
+ chan = devm_iio_channel_get(dev, pdata->adc_vbatt);
pchg->chan[LP8788_VBATT] = IS_ERR(chan) ? NULL : chan;
/* ADC channel for battery temperature */
- chan = iio_channel_get(dev, pdata->adc_batt_temp);
+ chan = devm_iio_channel_get(dev, pdata->adc_batt_temp);
pchg->chan[LP8788_BATT_TEMP] = IS_ERR(chan) ? NULL : chan;
}
-static void lp8788_release_adc_channel(struct lp8788_charger *pchg)
-{
- int i;
-
- for (i = 0; i < LP8788_NUM_CHG_ADC; i++) {
- if (!pchg->chan[i])
- continue;
-
- iio_channel_release(pchg->chan[i]);
- pchg->chan[i] = NULL;
- }
-}
-
static ssize_t lp8788_show_charger_status(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -747,7 +734,6 @@ static int lp8788_charger_remove(struct platform_device *pdev)
lp8788_irq_unregister(pdev, pchg);
sysfs_remove_group(&pdev->dev.kobj, &lp8788_attr_group);
lp8788_psy_unregister(pchg);
- lp8788_release_adc_channel(pchg);
return 0;
}
diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c
index 33c40f79d23d..2c35c13ad546 100644
--- a/drivers/power/supply/max17040_battery.c
+++ b/drivers/power/supply/max17040_battery.c
@@ -109,7 +109,7 @@ static void max17040_get_vcell(struct i2c_client *client)
vcell = max17040_read_reg(client, MAX17040_VCELL);
- chip->vcell = vcell;
+ chip->vcell = (vcell >> 4) * 1250;
}
static void max17040_get_soc(struct i2c_client *client)
diff --git a/drivers/power/supply/pm2301_charger.c b/drivers/power/supply/pm2301_charger.c
index 78561b6884fc..9ef218d76aa9 100644
--- a/drivers/power/supply/pm2301_charger.c
+++ b/drivers/power/supply/pm2301_charger.c
@@ -1098,7 +1098,7 @@ static int pm2xxx_wall_charger_probe(struct i2c_client *i2c_client,
ret = request_threaded_irq(gpio_to_irq(pm2->pdata->gpio_irq_number),
NULL,
pm2xxx_charger_irq[0].isr,
- pm2->pdata->irq_type,
+ pm2->pdata->irq_type | IRQF_ONESHOT,
pm2xxx_charger_irq[0].name, pm2);
if (ret != 0) {
diff --git a/drivers/power/supply/s3c_adc_battery.c b/drivers/power/supply/s3c_adc_battery.c
index 3d00b35cafc9..8be31f80035c 100644
--- a/drivers/power/supply/s3c_adc_battery.c
+++ b/drivers/power/supply/s3c_adc_battery.c
@@ -394,7 +394,7 @@ static int s3c_adc_bat_remove(struct platform_device *pdev)
gpio_free(pdata->gpio_charge_finished);
}
- cancel_delayed_work(&bat_work);
+ cancel_delayed_work_sync(&bat_work);
if (pdata->exit)
pdata->exit();
diff --git a/drivers/power/supply/smb347-charger.c b/drivers/power/supply/smb347-charger.c
index 072c5189bd6d..0655dbdc7000 100644
--- a/drivers/power/supply/smb347-charger.c
+++ b/drivers/power/supply/smb347-charger.c
@@ -1141,6 +1141,7 @@ static bool smb347_volatile_reg(struct device *dev, unsigned int reg)
switch (reg) {
case IRQSTAT_A:
case IRQSTAT_C:
+ case IRQSTAT_D:
case IRQSTAT_E:
case IRQSTAT_F:
case STAT_A:
diff --git a/drivers/power/supply/test_power.c b/drivers/power/supply/test_power.c
index 57246cdbd042..925abec45380 100644
--- a/drivers/power/supply/test_power.c
+++ b/drivers/power/supply/test_power.c
@@ -344,6 +344,7 @@ static int param_set_ac_online(const char *key, const struct kernel_param *kp)
static int param_get_ac_online(char *buffer, const struct kernel_param *kp)
{
strcpy(buffer, map_get_key(map_ac_online, ac_online, "unknown"));
+ strcat(buffer, "\n");
return strlen(buffer);
}
@@ -357,6 +358,7 @@ static int param_set_usb_online(const char *key, const struct kernel_param *kp)
static int param_get_usb_online(char *buffer, const struct kernel_param *kp)
{
strcpy(buffer, map_get_key(map_ac_online, usb_online, "unknown"));
+ strcat(buffer, "\n");
return strlen(buffer);
}
@@ -371,6 +373,7 @@ static int param_set_battery_status(const char *key,
static int param_get_battery_status(char *buffer, const struct kernel_param *kp)
{
strcpy(buffer, map_get_key(map_status, battery_status, "unknown"));
+ strcat(buffer, "\n");
return strlen(buffer);
}
@@ -385,6 +388,7 @@ static int param_set_battery_health(const char *key,
static int param_get_battery_health(char *buffer, const struct kernel_param *kp)
{
strcpy(buffer, map_get_key(map_health, battery_health, "unknown"));
+ strcat(buffer, "\n");
return strlen(buffer);
}
@@ -400,6 +404,7 @@ static int param_get_battery_present(char *buffer,
const struct kernel_param *kp)
{
strcpy(buffer, map_get_key(map_present, battery_present, "unknown"));
+ strcat(buffer, "\n");
return strlen(buffer);
}
@@ -417,6 +422,7 @@ static int param_get_battery_technology(char *buffer,
{
strcpy(buffer,
map_get_key(map_technology, battery_technology, "unknown"));
+ strcat(buffer, "\n");
return strlen(buffer);
}
diff --git a/drivers/power/supply/tps65090-charger.c b/drivers/power/supply/tps65090-charger.c
index 1b4b5e09538e..297bf58f0d4f 100644
--- a/drivers/power/supply/tps65090-charger.c
+++ b/drivers/power/supply/tps65090-charger.c
@@ -311,7 +311,7 @@ static int tps65090_charger_probe(struct platform_device *pdev)
if (irq != -ENXIO) {
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
- tps65090_charger_isr, 0, "tps65090-charger", cdata);
+ tps65090_charger_isr, IRQF_ONESHOT, "tps65090-charger", cdata);
if (ret) {
dev_err(cdata->dev,
"Unable to register irq %d err %d\n", irq,
diff --git a/drivers/power/supply/tps65217_charger.c b/drivers/power/supply/tps65217_charger.c
index 814c2b81fdfe..ba33d1617e0b 100644
--- a/drivers/power/supply/tps65217_charger.c
+++ b/drivers/power/supply/tps65217_charger.c
@@ -238,7 +238,7 @@ static int tps65217_charger_probe(struct platform_device *pdev)
for (i = 0; i < NUM_CHARGER_IRQS; i++) {
ret = devm_request_threaded_irq(&pdev->dev, irq[i], NULL,
tps65217_charger_irq,
- 0, "tps65217-charger",
+ IRQF_ONESHOT, "tps65217-charger",
charger);
if (ret) {
dev_err(charger->dev,
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index 9e2f274bd44f..60c8375c3c81 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -379,9 +379,9 @@ static void create_power_zone_common_attributes(
&dev_attr_max_energy_range_uj.attr;
if (power_zone->ops->get_energy_uj) {
if (power_zone->ops->reset_energy_uj)
- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
+ dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUSR;
else
- dev_attr_energy_uj.attr.mode = S_IRUGO;
+ dev_attr_energy_uj.attr.mode = S_IRUSR;
power_zone->zone_dev_attrs[count++] =
&dev_attr_energy_uj.attr;
}
diff --git a/drivers/ps3/ps3stor_lib.c b/drivers/ps3/ps3stor_lib.c
index 8c3f5adf1bc6..2d7618375662 100644
--- a/drivers/ps3/ps3stor_lib.c
+++ b/drivers/ps3/ps3stor_lib.c
@@ -201,7 +201,7 @@ int ps3stor_setup(struct ps3_storage_device *dev, irq_handler_t handler)
dev->bounce_lpar = ps3_mm_phys_to_lpar(__pa(dev->bounce_buf));
dev->bounce_dma = dma_map_single(&dev->sbd.core, dev->bounce_buf,
dev->bounce_size, DMA_BIDIRECTIONAL);
- if (!dev->bounce_dma) {
+ if (dma_mapping_error(&dev->sbd.core, dev->bounce_dma)) {
dev_err(&dev->sbd.core, "%s:%u: map DMA region failed\n",
__func__, __LINE__);
error = -ENODEV;
diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c
index 31b01035d0ab..8cfba3614e60 100644
--- a/drivers/pwm/pwm-bcm-iproc.c
+++ b/drivers/pwm/pwm-bcm-iproc.c
@@ -85,8 +85,6 @@ static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
u64 tmp, multi, rate;
u32 value, prescale;
- rate = clk_get_rate(ip->clk);
-
value = readl(ip->base + IPROC_PWM_CTRL_OFFSET);
if (value & BIT(IPROC_PWM_CTRL_EN_SHIFT(pwm->hwpwm)))
@@ -99,6 +97,13 @@ static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
else
state->polarity = PWM_POLARITY_INVERSED;
+ rate = clk_get_rate(ip->clk);
+ if (rate == 0) {
+ state->period = 0;
+ state->duty_cycle = 0;
+ return;
+ }
+
value = readl(ip->base + IPROC_PWM_PRESCALE_OFFSET);
prescale = value >> IPROC_PWM_PRESCALE_SHIFT(pwm->hwpwm);
prescale &= IPROC_PWM_PRESCALE_MAX;
diff --git a/drivers/pwm/pwm-bcm2835.c b/drivers/pwm/pwm-bcm2835.c
index db001cba937f..e340ad79a1ec 100644
--- a/drivers/pwm/pwm-bcm2835.c
+++ b/drivers/pwm/pwm-bcm2835.c
@@ -166,6 +166,7 @@ static int bcm2835_pwm_probe(struct platform_device *pdev)
pc->chip.dev = &pdev->dev;
pc->chip.ops = &bcm2835_pwm_ops;
+ pc->chip.base = -1;
pc->chip.npwm = 2;
pc->chip.of_xlate = of_pwm_xlate_with_flags;
pc->chip.of_pwm_n_cells = 3;
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
index 815f5333bb8f..3b0a097ce2ab 100644
--- a/drivers/pwm/pwm-img.c
+++ b/drivers/pwm/pwm-img.c
@@ -132,8 +132,10 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
duty = DIV_ROUND_UP(timebase * duty_ns, period_ns);
ret = pm_runtime_get_sync(chip->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(chip->dev);
return ret;
+ }
val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
val &= ~(PWM_CTRL_CFG_DIV_MASK << PWM_CTRL_CFG_DIV_SHIFT(pwm->hwpwm));
@@ -278,6 +280,8 @@ static int img_pwm_probe(struct platform_device *pdev)
return PTR_ERR(pwm->pwm_clk);
}
+ platform_set_drvdata(pdev, pwm);
+
pm_runtime_set_autosuspend_delay(&pdev->dev, IMG_PWM_PM_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
@@ -314,7 +318,6 @@ static int img_pwm_probe(struct platform_device *pdev)
goto err_suspend;
}
- platform_set_drvdata(pdev, pwm);
return 0;
err_suspend:
@@ -334,8 +337,10 @@ static int img_pwm_remove(struct platform_device *pdev)
int ret;
ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put(&pdev->dev);
return ret;
+ }
for (i = 0; i < pwm_chip->chip.npwm; i++) {
val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c
index 15b40a8bc4fb..5055ba2c6c94 100644
--- a/drivers/pwm/pwm-lp3943.c
+++ b/drivers/pwm/pwm-lp3943.c
@@ -278,6 +278,7 @@ static int lp3943_pwm_probe(struct platform_device *pdev)
lp3943_pwm->chip.dev = &pdev->dev;
lp3943_pwm->chip.ops = &lp3943_pwm_ops;
lp3943_pwm->chip.npwm = LP3943_NUM_PWMS;
+ lp3943_pwm->chip.base = -1;
platform_set_drvdata(pdev, lp3943_pwm);
diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
index 7a4a6406cf69..69f8be065919 100644
--- a/drivers/pwm/pwm-lpss.c
+++ b/drivers/pwm/pwm-lpss.c
@@ -105,10 +105,12 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
* The equation is:
* base_unit = round(base_unit_range * freq / c)
*/
- base_unit_range = BIT(lpwm->info->base_unit_bits) - 1;
+ base_unit_range = BIT(lpwm->info->base_unit_bits);
freq *= base_unit_range;
base_unit = DIV_ROUND_CLOSEST_ULL(freq, c);
+ /* base_unit must not be 0 and we also want to avoid overflowing it */
+ base_unit = clamp_val(base_unit, 1, base_unit_range - 1);
on_time_div = 255ULL * duty_ns;
do_div(on_time_div, period_ns);
@@ -116,8 +118,7 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
orig_ctrl = ctrl = pwm_lpss_read(pwm);
ctrl &= ~PWM_ON_TIME_DIV_MASK;
- ctrl &= ~(base_unit_range << PWM_BASE_UNIT_SHIFT);
- base_unit &= base_unit_range;
+ ctrl &= ~((base_unit_range - 1) << PWM_BASE_UNIT_SHIFT);
ctrl |= (u32) base_unit << PWM_BASE_UNIT_SHIFT;
ctrl |= on_time_div;
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
index e1e5dfcb16f3..259fd58812ae 100644
--- a/drivers/pwm/pwm-pca9685.c
+++ b/drivers/pwm/pwm-pca9685.c
@@ -31,6 +31,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
+#include <linux/bitmap.h>
/*
* Because the PCA9685 has only one prescaler per chip, changing the period of
@@ -85,6 +86,7 @@ struct pca9685 {
#if IS_ENABLED(CONFIG_GPIOLIB)
struct mutex lock;
struct gpio_chip gpio;
+ DECLARE_BITMAP(pwms_inuse, PCA9685_MAXCHAN + 1);
#endif
};
@@ -94,51 +96,51 @@ static inline struct pca9685 *to_pca(struct pwm_chip *chip)
}
#if IS_ENABLED(CONFIG_GPIOLIB)
-static int pca9685_pwm_gpio_request(struct gpio_chip *gpio, unsigned int offset)
+static bool pca9685_pwm_test_and_set_inuse(struct pca9685 *pca, int pwm_idx)
{
- struct pca9685 *pca = gpiochip_get_data(gpio);
- struct pwm_device *pwm;
+ bool is_inuse;
mutex_lock(&pca->lock);
-
- pwm = &pca->chip.pwms[offset];
-
- if (pwm->flags & (PWMF_REQUESTED | PWMF_EXPORTED)) {
- mutex_unlock(&pca->lock);
- return -EBUSY;
+ if (pwm_idx >= PCA9685_MAXCHAN) {
+ /*
+ * "all LEDs" channel:
+ * pretend already in use if any of the PWMs are requested
+ */
+ if (!bitmap_empty(pca->pwms_inuse, PCA9685_MAXCHAN)) {
+ is_inuse = true;
+ goto out;
+ }
+ } else {
+ /*
+ * regular channel:
+ * pretend already in use if the "all LEDs" channel is requested
+ */
+ if (test_bit(PCA9685_MAXCHAN, pca->pwms_inuse)) {
+ is_inuse = true;
+ goto out;
+ }
}
-
- pwm_set_chip_data(pwm, (void *)1);
-
+ is_inuse = test_and_set_bit(pwm_idx, pca->pwms_inuse);
+out:
mutex_unlock(&pca->lock);
- pm_runtime_get_sync(pca->chip.dev);
- return 0;
+ return is_inuse;
}
-static bool pca9685_pwm_is_gpio(struct pca9685 *pca, struct pwm_device *pwm)
+static void pca9685_pwm_clear_inuse(struct pca9685 *pca, int pwm_idx)
{
- bool is_gpio = false;
-
mutex_lock(&pca->lock);
+ clear_bit(pwm_idx, pca->pwms_inuse);
+ mutex_unlock(&pca->lock);
+}
- if (pwm->hwpwm >= PCA9685_MAXCHAN) {
- unsigned int i;
-
- /*
- * Check if any of the GPIOs are requested and in that case
- * prevent using the "all LEDs" channel.
- */
- for (i = 0; i < pca->gpio.ngpio; i++)
- if (gpiochip_is_requested(&pca->gpio, i)) {
- is_gpio = true;
- break;
- }
- } else if (pwm_get_chip_data(pwm)) {
- is_gpio = true;
- }
+static int pca9685_pwm_gpio_request(struct gpio_chip *gpio, unsigned int offset)
+{
+ struct pca9685 *pca = gpiochip_get_data(gpio);
- mutex_unlock(&pca->lock);
- return is_gpio;
+ if (pca9685_pwm_test_and_set_inuse(pca, offset))
+ return -EBUSY;
+ pm_runtime_get_sync(pca->chip.dev);
+ return 0;
}
static int pca9685_pwm_gpio_get(struct gpio_chip *gpio, unsigned int offset)
@@ -173,6 +175,7 @@ static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset)
pca9685_pwm_gpio_set(gpio, offset, 0);
pm_runtime_put(pca->chip.dev);
+ pca9685_pwm_clear_inuse(pca, offset);
}
static int pca9685_pwm_gpio_get_direction(struct gpio_chip *chip,
@@ -224,12 +227,17 @@ static int pca9685_pwm_gpio_probe(struct pca9685 *pca)
return devm_gpiochip_add_data(dev, &pca->gpio, pca);
}
#else
-static inline bool pca9685_pwm_is_gpio(struct pca9685 *pca,
- struct pwm_device *pwm)
+static inline bool pca9685_pwm_test_and_set_inuse(struct pca9685 *pca,
+ int pwm_idx)
{
return false;
}
+static inline void
+pca9685_pwm_clear_inuse(struct pca9685 *pca, int pwm_idx)
+{
+}
+
static inline int pca9685_pwm_gpio_probe(struct pca9685 *pca)
{
return 0;
@@ -413,7 +421,7 @@ static int pca9685_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct pca9685 *pca = to_pca(chip);
- if (pca9685_pwm_is_gpio(pca, pwm))
+ if (pca9685_pwm_test_and_set_inuse(pca, pwm->hwpwm))
return -EBUSY;
pm_runtime_get_sync(chip->dev);
@@ -422,8 +430,11 @@ static int pca9685_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
static void pca9685_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
+ struct pca9685 *pca = to_pca(chip);
+
pca9685_pwm_disable(chip, pwm);
pm_runtime_put(chip->dev);
+ pca9685_pwm_clear_inuse(pca, pwm->hwpwm);
}
static const struct pwm_ops pca9685_pwm_ops = {
diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
index 748f614d5375..b7d71bf297d6 100644
--- a/drivers/pwm/pwm-rcar.c
+++ b/drivers/pwm/pwm-rcar.c
@@ -232,24 +232,28 @@ static int rcar_pwm_probe(struct platform_device *pdev)
rcar_pwm->chip.base = -1;
rcar_pwm->chip.npwm = 1;
+ pm_runtime_enable(&pdev->dev);
+
ret = pwmchip_add(&rcar_pwm->chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register PWM chip: %d\n", ret);
+ pm_runtime_disable(&pdev->dev);
return ret;
}
- pm_runtime_enable(&pdev->dev);
-
return 0;
}
static int rcar_pwm_remove(struct platform_device *pdev)
{
struct rcar_pwm_chip *rcar_pwm = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = pwmchip_remove(&rcar_pwm->chip);
pm_runtime_disable(&pdev->dev);
- return pwmchip_remove(&rcar_pwm->chip);
+ return ret;
}
static const struct of_device_id rcar_pwm_of_table[] = {
diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c
index 29267d12fb4c..9c7962f2f0aa 100644
--- a/drivers/pwm/pwm-renesas-tpu.c
+++ b/drivers/pwm/pwm-renesas-tpu.c
@@ -423,16 +423,17 @@ static int tpu_probe(struct platform_device *pdev)
tpu->chip.base = -1;
tpu->chip.npwm = TPU_CHANNEL_MAX;
+ pm_runtime_enable(&pdev->dev);
+
ret = pwmchip_add(&tpu->chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register PWM chip\n");
+ pm_runtime_disable(&pdev->dev);
return ret;
}
dev_info(&pdev->dev, "TPU PWM %d registered\n", tpu->pdev->id);
- pm_runtime_enable(&pdev->dev);
-
return 0;
}
@@ -442,12 +443,10 @@ static int tpu_remove(struct platform_device *pdev)
int ret;
ret = pwmchip_remove(&tpu->chip);
- if (ret)
- return ret;
pm_runtime_disable(&pdev->dev);
- return 0;
+ return ret;
}
#ifdef CONFIG_OF
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index 4d99d468df09..48bcc853d57a 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -370,7 +370,6 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
ret = pwmchip_add(&pc->chip);
if (ret < 0) {
- clk_unprepare(pc->clk);
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
goto err_pclk;
}
diff --git a/drivers/pwm/pwm-zx.c b/drivers/pwm/pwm-zx.c
index 5d27c16edfb1..0d4112410b69 100644
--- a/drivers/pwm/pwm-zx.c
+++ b/drivers/pwm/pwm-zx.c
@@ -241,6 +241,7 @@ static int zx_pwm_probe(struct platform_device *pdev)
ret = pwmchip_add(&zpc->chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
+ clk_disable_unprepare(zpc->pclk);
return ret;
}
diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig
index d6d2f20c4597..21df2816def7 100644
--- a/drivers/rapidio/Kconfig
+++ b/drivers/rapidio/Kconfig
@@ -25,7 +25,7 @@ config RAPIDIO_ENABLE_RX_TX_PORTS
config RAPIDIO_DMA_ENGINE
bool "DMA Engine support for RapidIO"
depends on RAPIDIO
- select DMADEVICES
+ depends on DMADEVICES
select DMA_ENGINE
help
Say Y here if you want to use DMA Engine frameork for RapidIO data
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index fa0bbda4b3f2..a136a7ae7714 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -875,9 +875,15 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
rmcd_error("get_user_pages_unlocked err=%ld",
pinned);
nr_pages = 0;
- } else
+ } else {
rmcd_error("pinned %ld out of %ld pages",
pinned, nr_pages);
+ /*
+ * Set nr_pages up to mean "how many pages to unpin, in
+ * the error handler:
+ */
+ nr_pages = pinned;
+ }
ret = -EFAULT;
goto err_pg;
}
@@ -1679,6 +1685,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
struct rio_dev *rdev;
struct rio_switch *rswitch = NULL;
struct rio_mport *mport;
+ struct device *dev;
size_t size;
u32 rval;
u32 swpinfo = 0;
@@ -1693,8 +1700,10 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name,
dev_info.comptag, dev_info.destid, dev_info.hopcount);
- if (bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name)) {
+ dev = bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name);
+ if (dev) {
rmcd_debug(RDEV, "device %s already exists", dev_info.name);
+ put_device(dev);
return -EEXIST;
}
@@ -2380,13 +2389,6 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
cdev_init(&md->cdev, &mport_fops);
md->cdev.owner = THIS_MODULE;
- ret = cdev_device_add(&md->cdev, &md->dev);
- if (ret) {
- rmcd_error("Failed to register mport %d (err=%d)",
- mport->id, ret);
- goto err_cdev;
- }
-
INIT_LIST_HEAD(&md->doorbells);
spin_lock_init(&md->db_lock);
INIT_LIST_HEAD(&md->portwrites);
@@ -2406,6 +2408,13 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
#else
md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
#endif
+
+ ret = cdev_device_add(&md->cdev, &md->dev);
+ if (ret) {
+ rmcd_error("Failed to register mport %d (err=%d)",
+ mport->id, ret);
+ goto err_cdev;
+ }
ret = rio_query_mport(mport, &attr);
if (!ret) {
md->properties.flags = attr.flags;
diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c
index b29fc258eeba..ab57a1eb519f 100644
--- a/drivers/rapidio/rio_cm.c
+++ b/drivers/rapidio/rio_cm.c
@@ -2136,6 +2136,14 @@ static int riocm_add_mport(struct device *dev,
return -ENODEV;
}
+ cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
+ if (!cm->rx_wq) {
+ rio_release_inb_mbox(mport, cmbox);
+ rio_release_outb_mbox(mport, cmbox);
+ kfree(cm);
+ return -ENOMEM;
+ }
+
/*
* Allocate and register inbound messaging buffers to be ready
* to receive channel and system management requests
@@ -2146,15 +2154,6 @@ static int riocm_add_mport(struct device *dev,
cm->rx_slots = RIOCM_RX_RING_SIZE;
mutex_init(&cm->rx_lock);
riocm_rx_fill(cm, RIOCM_RX_RING_SIZE);
- cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
- if (!cm->rx_wq) {
- riocm_error("failed to allocate IBMBOX_%d on %s",
- cmbox, mport->name);
- rio_release_outb_mbox(mport, cmbox);
- kfree(cm);
- return -ENOMEM;
- }
-
INIT_WORK(&cm->rx_work, rio_ibmsg_handler);
cm->tx_slot = 0;
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 91b8ff8bac15..c9e3677ac734 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -558,7 +558,7 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
static int axp20x_regulator_parse_dt(struct platform_device *pdev)
{
struct device_node *np, *regulators;
- int ret;
+ int ret = 0;
u32 dcdcfreq = 0;
np = of_node_get(pdev->dev.parent->of_node);
@@ -573,13 +573,12 @@ static int axp20x_regulator_parse_dt(struct platform_device *pdev)
ret = axp20x_set_dcdc_freq(pdev, dcdcfreq);
if (ret < 0) {
dev_err(&pdev->dev, "Error setting dcdc frequency: %d\n", ret);
- return ret;
}
-
of_node_put(regulators);
}
- return 0;
+ of_node_put(np);
+ return ret;
}
static int axp20x_set_dcdc_workmode(struct regulator_dev *rdev, int id, u32 workmode)
diff --git a/drivers/regulator/bd9571mwv-regulator.c b/drivers/regulator/bd9571mwv-regulator.c
index 274c5ed7cd73..713730757b14 100644
--- a/drivers/regulator/bd9571mwv-regulator.c
+++ b/drivers/regulator/bd9571mwv-regulator.c
@@ -132,7 +132,7 @@ static struct regulator_ops vid_ops = {
static struct regulator_desc regulators[] = {
BD9571MWV_REG("VD09", "vd09", VD09, avs_ops, 0, 0x7f,
- 0x80, 600000, 10000, 0x3c),
+ 0x6f, 600000, 10000, 0x3c),
BD9571MWV_REG("VD18", "vd18", VD18, vid_ops, BD9571MWV_VD18_VID, 0xf,
16, 1625000, 25000, 0),
BD9571MWV_REG("VD25", "vd25", VD25, vid_ops, BD9571MWV_VD25_VID, 0xf,
@@ -141,7 +141,7 @@ static struct regulator_desc regulators[] = {
11, 2800000, 100000, 0),
BD9571MWV_REG("DVFS", "dvfs", DVFS, reg_ops,
BD9571MWV_DVFS_MONIVDAC, 0x7f,
- 0x80, 600000, 10000, 0x3c),
+ 0x6f, 600000, 10000, 0x3c),
};
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 4bab758d14b1..65e1cde13d59 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1091,7 +1091,6 @@ static int _regulator_do_enable(struct regulator_dev *rdev);
/**
* set_machine_constraints - sets regulator constraints
* @rdev: regulator source
- * @constraints: constraints to apply
*
* Allows platform initialisation code to define and constrain
* regulator circuits e.g. valid voltage/current ranges, etc. NOTE:
@@ -1099,21 +1098,11 @@ static int _regulator_do_enable(struct regulator_dev *rdev);
* regulator operations to proceed i.e. set_voltage, set_current_limit,
* set_mode.
*/
-static int set_machine_constraints(struct regulator_dev *rdev,
- const struct regulation_constraints *constraints)
+static int set_machine_constraints(struct regulator_dev *rdev)
{
int ret = 0;
const struct regulator_ops *ops = rdev->desc->ops;
- if (constraints)
- rdev->constraints = kmemdup(constraints, sizeof(*constraints),
- GFP_KERNEL);
- else
- rdev->constraints = kzalloc(sizeof(*constraints),
- GFP_KERNEL);
- if (!rdev->constraints)
- return -ENOMEM;
-
ret = machine_constraints_voltage(rdev, rdev->constraints);
if (ret != 0)
return ret;
@@ -1153,17 +1142,6 @@ static int set_machine_constraints(struct regulator_dev *rdev,
}
}
- /* If the constraints say the regulator should be on at this point
- * and we have control then make sure it is enabled.
- */
- if (rdev->constraints->always_on || rdev->constraints->boot_on) {
- ret = _regulator_do_enable(rdev);
- if (ret < 0 && ret != -EINVAL) {
- rdev_err(rdev, "failed to enable\n");
- return ret;
- }
- }
-
if ((rdev->constraints->ramp_delay || rdev->constraints->ramp_disable)
&& ops->set_ramp_delay) {
ret = ops->set_ramp_delay(rdev, rdev->constraints->ramp_delay);
@@ -1209,6 +1187,27 @@ static int set_machine_constraints(struct regulator_dev *rdev,
}
}
+ /* If the constraints say the regulator should be on at this point
+ * and we have control then make sure it is enabled.
+ */
+ if (rdev->constraints->always_on || rdev->constraints->boot_on) {
+ if (rdev->supply) {
+ ret = regulator_enable(rdev->supply);
+ if (ret < 0) {
+ _regulator_put(rdev->supply);
+ rdev->supply = NULL;
+ return ret;
+ }
+ }
+
+ ret = _regulator_do_enable(rdev);
+ if (ret < 0 && ret != -EINVAL) {
+ rdev_err(rdev, "failed to enable\n");
+ return ret;
+ }
+ rdev->use_count++;
+ }
+
print_constraints(rdev);
return 0;
}
@@ -1257,7 +1256,7 @@ static int set_consumer_device_supply(struct regulator_dev *rdev,
const char *consumer_dev_name,
const char *supply)
{
- struct regulator_map *node;
+ struct regulator_map *node, *new_node;
int has_dev;
if (supply == NULL)
@@ -1268,6 +1267,22 @@ static int set_consumer_device_supply(struct regulator_dev *rdev,
else
has_dev = 0;
+ new_node = kzalloc(sizeof(struct regulator_map), GFP_KERNEL);
+ if (new_node == NULL)
+ return -ENOMEM;
+
+ new_node->regulator = rdev;
+ new_node->supply = supply;
+
+ if (has_dev) {
+ new_node->dev_name = kstrdup(consumer_dev_name, GFP_KERNEL);
+ if (new_node->dev_name == NULL) {
+ kfree(new_node);
+ return -ENOMEM;
+ }
+ }
+
+ mutex_lock(&regulator_list_mutex);
list_for_each_entry(node, &regulator_map_list, list) {
if (node->dev_name && consumer_dev_name) {
if (strcmp(node->dev_name, consumer_dev_name) != 0)
@@ -1285,26 +1300,19 @@ static int set_consumer_device_supply(struct regulator_dev *rdev,
node->regulator->desc->name,
supply,
dev_name(&rdev->dev), rdev_get_name(rdev));
- return -EBUSY;
+ goto fail;
}
- node = kzalloc(sizeof(struct regulator_map), GFP_KERNEL);
- if (node == NULL)
- return -ENOMEM;
-
- node->regulator = rdev;
- node->supply = supply;
-
- if (has_dev) {
- node->dev_name = kstrdup(consumer_dev_name, GFP_KERNEL);
- if (node->dev_name == NULL) {
- kfree(node);
- return -ENOMEM;
- }
- }
+ list_add(&new_node->list, &regulator_map_list);
+ mutex_unlock(&regulator_list_mutex);
- list_add(&node->list, &regulator_map_list);
return 0;
+
+fail:
+ mutex_unlock(&regulator_list_mutex);
+ kfree(new_node->dev_name);
+ kfree(new_node);
+ return -EBUSY;
}
static void unset_regulator_supplies(struct regulator_dev *rdev)
@@ -1569,13 +1577,13 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
{
struct regulator_dev *r;
struct device *dev = rdev->dev.parent;
- int ret;
+ int ret = 0;
/* No supply to resovle? */
if (!rdev->supply_name)
return 0;
- /* Supply already resolved? */
+ /* Supply already resolved? (fast-path without locking contention) */
if (rdev->supply)
return 0;
@@ -1585,7 +1593,7 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
/* Did the lookup explicitly defer for us? */
if (ret == -EPROBE_DEFER)
- return ret;
+ goto out;
if (have_full_constraints()) {
r = dummy_regulator_rdev;
@@ -1593,10 +1601,22 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
} else {
dev_err(dev, "Failed to resolve %s-supply for %s\n",
rdev->supply_name, rdev->desc->name);
- return -EPROBE_DEFER;
+ ret = -EPROBE_DEFER;
+ goto out;
}
}
+ if (r == rdev) {
+ dev_err(dev, "Supply for %s (%s) resolved to itself\n",
+ rdev->desc->name, rdev->supply_name);
+ if (!have_full_constraints()) {
+ ret = -EINVAL;
+ goto out;
+ }
+ r = dummy_regulator_rdev;
+ get_device(&r->dev);
+ }
+
/*
* If the supply's parent device is not the same as the
* regulator's parent device, then ensure the parent device
@@ -1606,7 +1626,8 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
if (r->dev.parent && r->dev.parent != rdev->dev.parent) {
if (!device_is_bound(r->dev.parent)) {
put_device(&r->dev);
- return -EPROBE_DEFER;
+ ret = -EPROBE_DEFER;
+ goto out;
}
}
@@ -1614,26 +1635,48 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
ret = regulator_resolve_supply(r);
if (ret < 0) {
put_device(&r->dev);
- return ret;
+ goto out;
+ }
+
+ /*
+ * Recheck rdev->supply with rdev->mutex lock held to avoid a race
+ * between rdev->supply null check and setting rdev->supply in
+ * set_supply() from concurrent tasks.
+ */
+ regulator_lock(rdev);
+
+ /* Supply just resolved by a concurrent task? */
+ if (rdev->supply) {
+ regulator_unlock(rdev);
+ put_device(&r->dev);
+ goto out;
}
ret = set_supply(rdev, r);
if (ret < 0) {
+ regulator_unlock(rdev);
put_device(&r->dev);
- return ret;
+ goto out;
}
- /* Cascade always-on state to supply */
- if (_regulator_is_enabled(rdev)) {
+ regulator_unlock(rdev);
+
+ /*
+ * In set_machine_constraints() we may have turned this regulator on
+ * but we couldn't propagate to the supply if it hadn't been resolved
+ * yet. Do it now.
+ */
+ if (rdev->use_count) {
ret = regulator_enable(rdev->supply);
if (ret < 0) {
_regulator_put(rdev->supply);
rdev->supply = NULL;
- return ret;
+ goto out;
}
}
- return 0;
+out:
+ return ret;
}
/* Internal regulator request function */
@@ -3396,6 +3439,8 @@ static int _regulator_get_voltage(struct regulator_dev *rdev)
ret = rdev->desc->fixed_uV;
} else if (rdev->supply) {
ret = _regulator_get_voltage(rdev->supply->rdev);
+ } else if (rdev->supply_name) {
+ return -EPROBE_DEFER;
} else {
return -EINVAL;
}
@@ -4246,7 +4291,6 @@ struct regulator_dev *
regulator_register(const struct regulator_desc *regulator_desc,
const struct regulator_config *cfg)
{
- const struct regulation_constraints *constraints = NULL;
const struct regulator_init_data *init_data;
struct regulator_config *config = NULL;
static atomic_t regulator_no = ATOMIC_INIT(-1);
@@ -4347,22 +4391,36 @@ regulator_register(const struct regulator_desc *regulator_desc,
/* set regulator constraints */
if (init_data)
- constraints = &init_data->constraints;
+ rdev->constraints = kmemdup(&init_data->constraints,
+ sizeof(*rdev->constraints),
+ GFP_KERNEL);
+ else
+ rdev->constraints = kzalloc(sizeof(*rdev->constraints),
+ GFP_KERNEL);
+ if (!rdev->constraints) {
+ ret = -ENOMEM;
+ goto wash;
+ }
if (init_data && init_data->supply_regulator)
rdev->supply_name = init_data->supply_regulator;
else if (regulator_desc->supply_name)
rdev->supply_name = regulator_desc->supply_name;
- /*
- * Attempt to resolve the regulator supply, if specified,
- * but don't return an error if we fail because we will try
- * to resolve it again later as more regulators are added.
- */
- if (regulator_resolve_supply(rdev))
- rdev_dbg(rdev, "unable to resolve supply\n");
-
- ret = set_machine_constraints(rdev, constraints);
+ ret = set_machine_constraints(rdev);
+ if (ret == -EPROBE_DEFER) {
+ /* Regulator might be in bypass mode and so needs its supply
+ * to set the constraints */
+ /* FIXME: this currently triggers a chicken-and-egg problem
+ * when creating -SUPPLY symlink in sysfs to a regulator
+ * that is just being created */
+ ret = regulator_resolve_supply(rdev);
+ if (!ret)
+ ret = set_machine_constraints(rdev);
+ else
+ rdev_dbg(rdev, "unable to resolve supply early: %pe\n",
+ ERR_PTR(ret));
+ }
if (ret < 0)
goto wash;
@@ -4375,19 +4433,16 @@ regulator_register(const struct regulator_desc *regulator_desc,
/* add consumers devices */
if (init_data) {
- mutex_lock(&regulator_list_mutex);
for (i = 0; i < init_data->num_consumer_supplies; i++) {
ret = set_consumer_device_supply(rdev,
init_data->consumer_supplies[i].dev_name,
init_data->consumer_supplies[i].supply);
if (ret < 0) {
- mutex_unlock(&regulator_list_mutex);
dev_err(dev, "Failed to set supply %s\n",
init_data->consumer_supplies[i].supply);
goto unset_supplies;
}
}
- mutex_unlock(&regulator_list_mutex);
}
if (!rdev->desc->ops->get_voltage &&
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index 69a377ab2604..4b8306594c3f 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -196,6 +196,19 @@ static const struct regulator_ops pfuze100_swb_regulator_ops = {
};
+static const struct regulator_ops pfuze3000_sw_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_ramp_delay = pfuze100_set_ramp_delay,
+
+};
+
#define PFUZE100_FIXED_REG(_chip, _name, base, voltage) \
[_chip ## _ ## _name] = { \
.desc = { \
@@ -305,23 +318,28 @@ static const struct regulator_ops pfuze100_swb_regulator_ops = {
.stby_mask = 0x20, \
}
-
-#define PFUZE3000_SW2_REG(_chip, _name, base, min, max, step) { \
- .desc = { \
- .name = #_name,\
- .n_voltages = ((max) - (min)) / (step) + 1, \
- .ops = &pfuze100_sw_regulator_ops, \
- .type = REGULATOR_VOLTAGE, \
- .id = _chip ## _ ## _name, \
- .owner = THIS_MODULE, \
- .min_uV = (min), \
- .uV_step = (step), \
- .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \
- .vsel_mask = 0x7, \
- }, \
- .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \
- .stby_mask = 0x7, \
-}
+/* No linar case for the some switches of PFUZE3000 */
+#define PFUZE3000_SW_REG(_chip, _name, base, mask, voltages) \
+ [_chip ## _ ## _name] = { \
+ .desc = { \
+ .name = #_name, \
+ .n_voltages = ARRAY_SIZE(voltages), \
+ .ops = &pfuze3000_sw_regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = _chip ## _ ## _name, \
+ .owner = THIS_MODULE, \
+ .volt_table = voltages, \
+ .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \
+ .vsel_mask = (mask), \
+ .enable_reg = (base) + PFUZE100_MODE_OFFSET, \
+ .enable_mask = 0xf, \
+ .enable_val = 0x8, \
+ .enable_time = 500, \
+ }, \
+ .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \
+ .stby_mask = (mask), \
+ .sw_reg = true, \
+ }
#define PFUZE3000_SW3_REG(_chip, _name, base, min, max, step) { \
.desc = { \
@@ -377,9 +395,9 @@ static struct pfuze_regulator pfuze200_regulators[] = {
};
static struct pfuze_regulator pfuze3000_regulators[] = {
- PFUZE100_SWB_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
+ PFUZE3000_SW_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
PFUZE100_SW_REG(PFUZE3000, SW1B, PFUZE100_SW1CVOL, 700000, 1475000, 25000),
- PFUZE100_SWB_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
+ PFUZE3000_SW_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
PFUZE3000_SW3_REG(PFUZE3000, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
PFUZE100_SWB_REG(PFUZE3000, SWBST, PFUZE100_SWBSTCON1, 0x3, pfuze100_swbst),
PFUZE100_SWB_REG(PFUZE3000, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
@@ -393,8 +411,8 @@ static struct pfuze_regulator pfuze3000_regulators[] = {
};
static struct pfuze_regulator pfuze3001_regulators[] = {
- PFUZE100_SWB_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
- PFUZE100_SWB_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
+ PFUZE3000_SW_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
+ PFUZE3000_SW_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
PFUZE3000_SW3_REG(PFUZE3001, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
PFUZE100_SWB_REG(PFUZE3001, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
PFUZE100_VGEN_REG(PFUZE3001, VLDO1, PFUZE100_VGEN1VOL, 1800000, 3300000, 100000),
@@ -737,11 +755,14 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
* the switched regulator till yet.
*/
if (pfuze_chip->flags & PFUZE_FLAG_DISABLE_SW) {
- if (pfuze_chip->regulator_descs[i].sw_reg) {
- desc->ops = &pfuze100_sw_disable_regulator_ops;
- desc->enable_val = 0x8;
- desc->disable_val = 0x0;
- desc->enable_time = 500;
+ if (pfuze_chip->chip_id == PFUZE100 ||
+ pfuze_chip->chip_id == PFUZE200) {
+ if (pfuze_chip->regulator_descs[i].sw_reg) {
+ desc->ops = &pfuze100_sw_disable_regulator_ops;
+ desc->enable_val = 0x8;
+ desc->disable_val = 0x0;
+ desc->enable_time = 500;
+ }
}
}
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
index a2fd140eff81..34f3b9778ffa 100644
--- a/drivers/regulator/pwm-regulator.c
+++ b/drivers/regulator/pwm-regulator.c
@@ -285,7 +285,7 @@ static int pwm_regulator_init_table(struct platform_device *pdev,
return ret;
}
- drvdata->state = -EINVAL;
+ drvdata->state = -ENOTRECOVERABLE;
drvdata->duty_cycle_table = duty_cycle_table;
memcpy(&drvdata->ops, &pwm_regulator_voltage_table_ops,
sizeof(drvdata->ops));
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 667d16dc83ce..7ff94695eee7 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -548,14 +548,18 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
rdata = devm_kcalloc(&pdev->dev,
pdata->num_regulators, sizeof(*rdata),
GFP_KERNEL);
- if (!rdata)
+ if (!rdata) {
+ of_node_put(regulators_np);
return -ENOMEM;
+ }
rmode = devm_kcalloc(&pdev->dev,
pdata->num_regulators, sizeof(*rmode),
GFP_KERNEL);
- if (!rmode)
+ if (!rmode) {
+ of_node_put(regulators_np);
return -ENOMEM;
+ }
pdata->regulators = rdata;
pdata->opmode = rmode;
diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
index 89b9314d64c9..016330f909c0 100644
--- a/drivers/regulator/ti-abb-regulator.c
+++ b/drivers/regulator/ti-abb-regulator.c
@@ -342,8 +342,17 @@ static int ti_abb_set_voltage_sel(struct regulator_dev *rdev, unsigned sel)
return ret;
}
- /* If data is exactly the same, then just update index, no change */
info = &abb->info[sel];
+ /*
+ * When Linux kernel is starting up, we are'nt sure of the
+ * Bias configuration that bootloader has configured.
+ * So, we get to know the actual setting the first time
+ * we are asked to transition.
+ */
+ if (abb->current_info_idx == -EINVAL)
+ goto just_set_abb;
+
+ /* If data is exactly the same, then just update index, no change */
oinfo = &abb->info[abb->current_info_idx];
if (!memcmp(info, oinfo, sizeof(*info))) {
dev_dbg(dev, "%s: Same data new idx=%d, old idx=%d\n", __func__,
@@ -351,6 +360,7 @@ static int ti_abb_set_voltage_sel(struct regulator_dev *rdev, unsigned sel)
goto out;
}
+just_set_abb:
ret = ti_abb_set_opp(rdev, abb, info);
out:
diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
index 0d33e3079f0d..ef61cb709acd 100644
--- a/drivers/remoteproc/qcom_q6v5.c
+++ b/drivers/remoteproc/qcom_q6v5.c
@@ -151,6 +151,8 @@ int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5)
{
int ret;
+ q6v5->running = false;
+
qcom_smem_state_update_bits(q6v5->state,
BIT(q6v5->stop_bit), BIT(q6v5->stop_bit));
diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c
index cc475dcbf27f..604828c04ddf 100644
--- a/drivers/remoteproc/qcom_q6v5_pil.c
+++ b/drivers/remoteproc/qcom_q6v5_pil.c
@@ -340,6 +340,12 @@ static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
{
struct q6v5 *qproc = rproc->priv;
+ /* MBA is restricted to a maximum size of 1M */
+ if (fw->size > qproc->mba_size || fw->size > SZ_1M) {
+ dev_err(qproc->dev, "MBA firmware load failed\n");
+ return -EINVAL;
+ }
+
memcpy(qproc->mba_region, fw->data, fw->size);
return 0;
@@ -739,14 +745,13 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
if (phdr->p_filesz) {
snprintf(seg_name, sizeof(seg_name), "modem.b%02d", i);
- ret = request_firmware(&seg_fw, seg_name, qproc->dev);
+ ret = request_firmware_into_buf(&seg_fw, seg_name, qproc->dev,
+ ptr, phdr->p_filesz);
if (ret) {
dev_err(qproc->dev, "failed to load %s\n", seg_name);
goto release_firmware;
}
- memcpy(ptr, seg_fw->data, seg_fw->size);
-
release_firmware(seg_fw);
}
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index abbef17c97ee..e48069db1703 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -289,7 +289,7 @@ void rproc_free_vring(struct rproc_vring *rvring)
{
int size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
struct rproc *rproc = rvring->rvdev->rproc;
- int idx = rvring->rvdev->vring - rvring;
+ int idx = rvring - rvring->rvdev->vring;
struct fw_rsc_vdev *rsc;
dma_free_coherent(rproc->dev.parent, size, rvring->va, rvring->dma);
@@ -1598,6 +1598,7 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
rproc->dev.type = &rproc_type;
rproc->dev.class = &rproc_class;
rproc->dev.driver_data = rproc;
+ idr_init(&rproc->notifyids);
/* Assign a unique device index and name */
rproc->index = ida_simple_get(&rproc_dev_index, 0, 0, GFP_KERNEL);
@@ -1622,8 +1623,6 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
mutex_init(&rproc->lock);
- idr_init(&rproc->notifyids);
-
INIT_LIST_HEAD(&rproc->carveouts);
INIT_LIST_HEAD(&rproc->mappings);
INIT_LIST_HEAD(&rproc->traces);
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index facc577ab0ac..8fa0f0eaaf43 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -857,6 +857,7 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
dev_err(glink->dev,
"no intent found for channel %s intent %d",
channel->name, liid);
+ ret = -ENOENT;
goto advance_rx;
}
}
@@ -970,7 +971,7 @@ static int qcom_glink_rx_open_ack(struct qcom_glink *glink, unsigned int lcid)
return -EINVAL;
}
- complete(&channel->open_ack);
+ complete_all(&channel->open_ack);
return 0;
}
@@ -1178,7 +1179,7 @@ static int qcom_glink_announce_create(struct rpmsg_device *rpdev)
__be32 *val = defaults;
int size;
- if (glink->intentless)
+ if (glink->intentless || !completion_done(&channel->open_ack))
return 0;
prop = of_find_property(np, "qcom,intents", NULL);
@@ -1413,7 +1414,7 @@ static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid,
channel->rcid = ret;
spin_unlock_irqrestore(&glink->idr_lock, flags);
- complete(&channel->open_req);
+ complete_all(&channel->open_req);
if (create_device) {
rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL);
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
index b2e5a6abf7d5..aa008fa11002 100644
--- a/drivers/rpmsg/qcom_smd.c
+++ b/drivers/rpmsg/qcom_smd.c
@@ -1338,7 +1338,7 @@ static int qcom_smd_parse_edge(struct device *dev,
ret = of_property_read_u32(node, key, &edge->edge_id);
if (ret) {
dev_err(dev, "edge missing %s property\n", key);
- return -EINVAL;
+ goto put_node;
}
edge->remote_pid = QCOM_SMEM_HOST_ANY;
@@ -1349,32 +1349,37 @@ static int qcom_smd_parse_edge(struct device *dev,
edge->mbox_client.knows_txdone = true;
edge->mbox_chan = mbox_request_channel(&edge->mbox_client, 0);
if (IS_ERR(edge->mbox_chan)) {
- if (PTR_ERR(edge->mbox_chan) != -ENODEV)
- return PTR_ERR(edge->mbox_chan);
+ if (PTR_ERR(edge->mbox_chan) != -ENODEV) {
+ ret = PTR_ERR(edge->mbox_chan);
+ goto put_node;
+ }
edge->mbox_chan = NULL;
syscon_np = of_parse_phandle(node, "qcom,ipc", 0);
if (!syscon_np) {
dev_err(dev, "no qcom,ipc node\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto put_node;
}
edge->ipc_regmap = syscon_node_to_regmap(syscon_np);
- if (IS_ERR(edge->ipc_regmap))
- return PTR_ERR(edge->ipc_regmap);
+ if (IS_ERR(edge->ipc_regmap)) {
+ ret = PTR_ERR(edge->ipc_regmap);
+ goto put_node;
+ }
key = "qcom,ipc";
ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset);
if (ret < 0) {
dev_err(dev, "no offset in %s\n", key);
- return -EINVAL;
+ goto put_node;
}
ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit);
if (ret < 0) {
dev_err(dev, "no bit in %s\n", key);
- return -EINVAL;
+ goto put_node;
}
}
@@ -1385,7 +1390,8 @@ static int qcom_smd_parse_edge(struct device *dev,
irq = irq_of_parse_and_map(node, 0);
if (irq < 0) {
dev_err(dev, "required smd interrupt missing\n");
- return -EINVAL;
+ ret = irq;
+ goto put_node;
}
ret = devm_request_irq(dev, irq,
@@ -1393,12 +1399,18 @@ static int qcom_smd_parse_edge(struct device *dev,
node->name, edge);
if (ret) {
dev_err(dev, "failed to request smd irq\n");
- return ret;
+ goto put_node;
}
edge->irq = irq;
return 0;
+
+put_node:
+ of_node_put(node);
+ edge->of_node = NULL;
+
+ return ret;
}
/*
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 28a4505a1bc8..b5845f16a3a2 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -639,6 +639,7 @@ config RTC_DRV_S5M
tristate "Samsung S2M/S5M series"
depends on MFD_SEC_CORE || COMPILE_TEST
select REGMAP_IRQ
+ select REGMAP_I2C
help
If you say yes here you will get support for the
RTC of Samsung S2MPS14 and S5M PMIC series.
diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c
index 73697e4b18a9..9d4a59aa29a1 100644
--- a/drivers/rtc/rtc-88pm860x.c
+++ b/drivers/rtc/rtc-88pm860x.c
@@ -341,6 +341,10 @@ static int pm860x_rtc_probe(struct platform_device *pdev)
info->dev = &pdev->dev;
dev_set_drvdata(&pdev->dev, info);
+ info->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(info->rtc_dev))
+ return PTR_ERR(info->rtc_dev);
+
ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL,
rtc_update_handler, IRQF_ONESHOT, "rtc",
info);
@@ -382,13 +386,11 @@ static int pm860x_rtc_probe(struct platform_device *pdev)
}
}
- info->rtc_dev = devm_rtc_device_register(&pdev->dev, "88pm860x-rtc",
- &pm860x_rtc_ops, THIS_MODULE);
- ret = PTR_ERR(info->rtc_dev);
- if (IS_ERR(info->rtc_dev)) {
- dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret);
+ info->rtc_dev->ops = &pm860x_rtc_ops;
+
+ ret = rtc_register_device(info->rtc_dev);
+ if (ret)
return ret;
- }
/*
* enable internal XO instead of internal 3.25MHz clock since it can
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index ebd59e86a567..94d31779933f 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -434,7 +434,11 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
t->tm_min = bcd2bin(regs[DS1307_REG_MIN] & 0x7f);
tmp = regs[DS1307_REG_HOUR] & 0x3f;
t->tm_hour = bcd2bin(tmp);
- t->tm_wday = bcd2bin(regs[DS1307_REG_WDAY] & 0x07) - 1;
+ /* rx8130 is bit position, not BCD */
+ if (ds1307->type == rx_8130)
+ t->tm_wday = fls(regs[DS1307_REG_WDAY] & 0x7f);
+ else
+ t->tm_wday = bcd2bin(regs[DS1307_REG_WDAY] & 0x07) - 1;
t->tm_mday = bcd2bin(regs[DS1307_REG_MDAY] & 0x3f);
tmp = regs[DS1307_REG_MONTH] & 0x1f;
t->tm_mon = bcd2bin(tmp) - 1;
@@ -481,7 +485,11 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
regs[DS1307_REG_SECS] = bin2bcd(t->tm_sec);
regs[DS1307_REG_MIN] = bin2bcd(t->tm_min);
regs[DS1307_REG_HOUR] = bin2bcd(t->tm_hour);
- regs[DS1307_REG_WDAY] = bin2bcd(t->tm_wday + 1);
+ /* rx8130 is bit position, not BCD */
+ if (ds1307->type == rx_8130)
+ regs[DS1307_REG_WDAY] = 1 << t->tm_wday;
+ else
+ regs[DS1307_REG_WDAY] = bin2bcd(t->tm_wday + 1);
regs[DS1307_REG_MDAY] = bin2bcd(t->tm_mday);
regs[DS1307_REG_MONTH] = bin2bcd(t->tm_mon + 1);
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 38a2e9e684df..77a106e90124 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -620,6 +620,10 @@ static int ds1374_probe(struct i2c_client *client,
if (!ds1374)
return -ENOMEM;
+ ds1374->rtc = devm_rtc_allocate_device(&client->dev);
+ if (IS_ERR(ds1374->rtc))
+ return PTR_ERR(ds1374->rtc);
+
ds1374->client = client;
i2c_set_clientdata(client, ds1374);
@@ -641,12 +645,11 @@ static int ds1374_probe(struct i2c_client *client,
device_set_wakeup_capable(&client->dev, 1);
}
- ds1374->rtc = devm_rtc_device_register(&client->dev, client->name,
- &ds1374_rtc_ops, THIS_MODULE);
- if (IS_ERR(ds1374->rtc)) {
- dev_err(&client->dev, "unable to register the class device\n");
- return PTR_ERR(ds1374->rtc);
- }
+ ds1374->rtc->ops = &ds1374_rtc_ops;
+
+ ret = rtc_register_device(ds1374->rtc);
+ if (ret)
+ return ret;
#ifdef CONFIG_RTC_DRV_DS1374_WDT
save_client = client;
diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c
index a1c44d0c8557..30cbe22c57a8 100644
--- a/drivers/rtc/rtc-goldfish.c
+++ b/drivers/rtc/rtc-goldfish.c
@@ -87,6 +87,7 @@ static int goldfish_rtc_set_alarm(struct device *dev,
rtc_alarm64 = rtc_alarm * NSEC_PER_SEC;
writel((rtc_alarm64 >> 32), base + TIMER_ALARM_HIGH);
writel(rtc_alarm64, base + TIMER_ALARM_LOW);
+ writel(1, base + TIMER_IRQ_ENABLED);
} else {
/*
* if this function was called with enabled=0
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 323ff55cc165..080211dd916a 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -561,9 +561,7 @@ static const struct pinctrl_ops rtc_pinctrl_ops = {
.dt_free_map = pinconf_generic_dt_free_map,
};
-enum rtc_pin_config_param {
- PIN_CONFIG_ACTIVE_HIGH = PIN_CONFIG_END + 1,
-};
+#define PIN_CONFIG_ACTIVE_HIGH (PIN_CONFIG_END + 1)
static const struct pinconf_generic_params rtc_params[] = {
{"ti,active-high", PIN_CONFIG_ACTIVE_HIGH, 0},
diff --git a/drivers/rtc/rtc-rx8010.c b/drivers/rtc/rtc-rx8010.c
index 7ddc22eb5b0f..f4db80f9c1b1 100644
--- a/drivers/rtc/rtc-rx8010.c
+++ b/drivers/rtc/rtc-rx8010.c
@@ -428,16 +428,26 @@ static int rx8010_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
}
}
-static struct rtc_class_ops rx8010_rtc_ops = {
+static const struct rtc_class_ops rx8010_rtc_ops_default = {
.read_time = rx8010_get_time,
.set_time = rx8010_set_time,
.ioctl = rx8010_ioctl,
};
+static const struct rtc_class_ops rx8010_rtc_ops_alarm = {
+ .read_time = rx8010_get_time,
+ .set_time = rx8010_set_time,
+ .ioctl = rx8010_ioctl,
+ .read_alarm = rx8010_read_alarm,
+ .set_alarm = rx8010_set_alarm,
+ .alarm_irq_enable = rx8010_alarm_irq_enable,
+};
+
static int rx8010_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ const struct rtc_class_ops *rtc_ops;
struct rx8010_data *rx8010;
int err = 0;
@@ -468,16 +478,16 @@ static int rx8010_probe(struct i2c_client *client,
if (err) {
dev_err(&client->dev, "unable to request IRQ\n");
- client->irq = 0;
- } else {
- rx8010_rtc_ops.read_alarm = rx8010_read_alarm;
- rx8010_rtc_ops.set_alarm = rx8010_set_alarm;
- rx8010_rtc_ops.alarm_irq_enable = rx8010_alarm_irq_enable;
+ return err;
}
+
+ rtc_ops = &rx8010_rtc_ops_alarm;
+ } else {
+ rtc_ops = &rx8010_rtc_ops_default;
}
rx8010->rtc = devm_rtc_device_register(&client->dev, client->name,
- &rx8010_rtc_ops, THIS_MODULE);
+ rtc_ops, THIS_MODULE);
if (IS_ERR(rx8010->rtc)) {
dev_err(&client->dev, "unable to register the class device\n");
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 304d905cb23f..56f625371735 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -186,7 +186,6 @@ static const struct rtc_class_ops sa1100_rtc_ops = {
int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info)
{
- struct rtc_device *rtc;
int ret;
spin_lock_init(&info->lock);
@@ -215,15 +214,14 @@ int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info)
writel_relaxed(0, info->rcnr);
}
- rtc = devm_rtc_device_register(&pdev->dev, pdev->name, &sa1100_rtc_ops,
- THIS_MODULE);
- if (IS_ERR(rtc)) {
+ info->rtc->ops = &sa1100_rtc_ops;
+ info->rtc->max_user_freq = RTC_FREQ;
+
+ ret = rtc_register_device(info->rtc);
+ if (ret) {
clk_disable_unprepare(info->clk);
- return PTR_ERR(rtc);
+ return ret;
}
- info->rtc = rtc;
-
- rtc->max_user_freq = RTC_FREQ;
/* Fix for a nasty initialization problem the in SA11xx RTSR register.
* See also the comments in sa1100_rtc_interrupt().
@@ -272,6 +270,10 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
info->irq_1hz = irq_1hz;
info->irq_alarm = irq_alarm;
+ info->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(info->rtc))
+ return PTR_ERR(info->rtc);
+
ret = devm_request_irq(&pdev->dev, irq_1hz, sa1100_rtc_interrupt, 0,
"rtc 1Hz", &pdev->dev);
if (ret) {
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index 2cd5a7b1a2e3..e85abe805606 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -232,7 +232,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node)
300000000);
if (IS_ERR(rtc->int_osc)) {
pr_crit("Couldn't register the internal oscillator\n");
- return;
+ goto err;
}
parents[0] = clk_hw_get_name(rtc->int_osc);
@@ -248,7 +248,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node)
rtc->losc = clk_register(NULL, &rtc->hw);
if (IS_ERR(rtc->losc)) {
pr_crit("Couldn't register the LOSC clock\n");
- return;
+ goto err_register;
}
of_property_read_string_index(node, "clock-output-names", 1,
@@ -259,7 +259,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node)
&rtc->lock);
if (IS_ERR(rtc->ext_losc)) {
pr_crit("Couldn't register the LOSC external gate\n");
- return;
+ goto err_register;
}
clk_data->num = 2;
@@ -268,6 +268,8 @@ static void __init sun6i_rtc_clk_init(struct device_node *node)
of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
return;
+err_register:
+ clk_hw_unregister_fixed_rate(rtc->int_osc);
err:
kfree(clk_data);
}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index a23e7d394a0a..7beda20cf122 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2833,6 +2833,12 @@ static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
if (!block)
return -EINVAL;
+ /*
+ * If the request is an ERP request there is nothing to requeue.
+ * This will be done with the remaining original request.
+ */
+ if (cqr->refers)
+ return 0;
spin_lock_irq(&cqr->dq->lock);
req = (struct request *) cqr->callback_data;
blk_mq_requeue_request(req, false);
@@ -2934,7 +2940,8 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
basedev = block->base;
spin_lock_irq(&dq->lock);
- if (basedev->state < DASD_STATE_READY) {
+ if (basedev->state < DASD_STATE_READY ||
+ test_bit(DASD_FLAG_OFFLINE, &basedev->flags)) {
DBF_DEV_EVENT(DBF_ERR, basedev,
"device not ready for request %p", req);
rc = BLK_STS_IOERR;
@@ -3420,8 +3427,6 @@ void dasd_generic_remove(struct ccw_device *cdev)
struct dasd_device *device;
struct dasd_block *block;
- cdev->handler = NULL;
-
device = dasd_device_from_cdev(cdev);
if (IS_ERR(device)) {
dasd_remove_sysfs_files(cdev);
@@ -3440,6 +3445,7 @@ void dasd_generic_remove(struct ccw_device *cdev)
* no quite down yet.
*/
dasd_set_target_state(device, DASD_STATE_NEW);
+ cdev->handler = NULL;
/* dasd_delete_device destroys the device reference. */
block = device->block;
dasd_delete_device(device);
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 99f86612f775..dc78a523a69f 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -256,7 +256,6 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
return;
device->discipline->get_uid(device, &uid);
spin_lock_irqsave(&lcu->lock, flags);
- list_del_init(&device->alias_list);
/* make sure that the workers don't use this device */
if (device == lcu->suc_data.device) {
spin_unlock_irqrestore(&lcu->lock, flags);
@@ -283,6 +282,7 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
spin_lock_irqsave(&aliastree.lock, flags);
spin_lock(&lcu->lock);
+ list_del_init(&device->alias_list);
if (list_empty(&lcu->grouplist) &&
list_empty(&lcu->active_devices) &&
list_empty(&lcu->inactive_devices)) {
@@ -462,11 +462,19 @@ static int read_unit_address_configuration(struct dasd_device *device,
spin_unlock_irqrestore(&lcu->lock, flags);
rc = dasd_sleep_on(cqr);
- if (rc && !suborder_not_supported(cqr)) {
+ if (!rc)
+ goto out;
+
+ if (suborder_not_supported(cqr)) {
+ /* suborder not supported or device unusable for IO */
+ rc = -EOPNOTSUPP;
+ } else {
+ /* IO failed but should be retried */
spin_lock_irqsave(&lcu->lock, flags);
lcu->flags |= NEED_UAC_UPDATE;
spin_unlock_irqrestore(&lcu->lock, flags);
}
+out:
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
@@ -503,6 +511,14 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
return rc;
spin_lock_irqsave(&lcu->lock, flags);
+ /*
+ * there is another update needed skip the remaining handling
+ * the data might already be outdated
+ * but especially do not add the device to an LCU with pending
+ * update
+ */
+ if (lcu->flags & NEED_UAC_UPDATE)
+ goto out;
lcu->pav = NO_PAV;
for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
switch (lcu->uac->unit[i].ua_type) {
@@ -521,6 +537,7 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
alias_list) {
_add_device_to_lcu(lcu, device, refdev);
}
+out:
spin_unlock_irqrestore(&lcu->lock, flags);
return 0;
}
@@ -625,6 +642,7 @@ int dasd_alias_add_device(struct dasd_device *device)
}
if (lcu->flags & UPDATE_PENDING) {
list_move(&device->alias_list, &lcu->active_devices);
+ private->pavgroup = NULL;
_schedule_lcu_update(lcu, device);
}
spin_unlock_irqrestore(&lcu->lock, flags);
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 56007a3e7f11..fab09455ba94 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -40,6 +40,7 @@
MODULE_LICENSE("GPL");
static struct dasd_discipline dasd_fba_discipline;
+static void *dasd_fba_zero_page;
struct dasd_fba_private {
struct dasd_fba_characteristics rdc_data;
@@ -270,7 +271,7 @@ static void ccw_write_zero(struct ccw1 *ccw, int count)
ccw->cmd_code = DASD_FBA_CCW_WRITE;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = count;
- ccw->cda = (__u32) (addr_t) page_to_phys(ZERO_PAGE(0));
+ ccw->cda = (__u32) (addr_t) dasd_fba_zero_page;
}
/*
@@ -811,6 +812,11 @@ dasd_fba_init(void)
int ret;
ASCEBC(dasd_fba_discipline.ebcname, 4);
+
+ dasd_fba_zero_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!dasd_fba_zero_page)
+ return -ENOMEM;
+
ret = ccw_driver_register(&dasd_fba_driver);
if (!ret)
wait_for_device_probe();
@@ -822,6 +828,7 @@ static void __exit
dasd_fba_cleanup(void)
{
ccw_driver_unregister(&dasd_fba_driver);
+ free_page((unsigned long)dasd_fba_zero_page);
}
module_init(dasd_fba_init);
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 7036a6c6f86f..5542d9eadfe0 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -76,7 +76,7 @@ int dasd_gendisk_alloc(struct dasd_block *block)
gdp->queue = block->request_queue;
block->gdp = gdp;
set_capacity(block->gdp, 0);
- device_add_disk(&base->cdev->dev, block->gdp);
+ device_add_disk(&base->cdev->dev, block->gdp, NULL);
return 0;
}
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 23e526cda5c1..4e8aedd50cb0 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -685,7 +685,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
}
get_device(&dev_info->dev);
- device_add_disk(&dev_info->dev, dev_info->gd);
+ device_add_disk(&dev_info->dev, dev_info->gd, NULL);
switch (dev_info->segment_type) {
case SEG_TYPE_SR:
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 98f66b7b6794..e01889394c84 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -500,7 +500,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
/* 512 byte sectors */
set_capacity(bdev->gendisk, scmdev->size >> 9);
- device_add_disk(&scmdev->dev, bdev->gendisk);
+ device_add_disk(&scmdev->dev, bdev->gendisk, NULL);
return 0;
out_queue:
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index df09ed53ab45..825a8f2703b4 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -615,6 +615,11 @@ static int slow_eval_known_fn(struct subchannel *sch, void *data)
rc = css_evaluate_known_subchannel(sch, 1);
if (rc == -EAGAIN)
css_schedule_eval(sch->schid);
+ /*
+ * The loop might take long time for platforms with lots of
+ * known devices. Allow scheduling here.
+ */
+ cond_resched();
}
return 0;
}
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 1540229a37bb..c9bc9a6bd73b 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -827,8 +827,10 @@ static void io_subchannel_register(struct ccw_device *cdev)
* Now we know this subchannel will stay, we can throw
* our delayed uevent.
*/
- dev_set_uevent_suppress(&sch->dev, 0);
- kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+ if (dev_get_uevent_suppress(&sch->dev)) {
+ dev_set_uevent_suppress(&sch->dev, 0);
+ kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+ }
/* make it known to the system */
ret = ccw_device_add(cdev);
if (ret) {
@@ -1036,8 +1038,11 @@ static int io_subchannel_probe(struct subchannel *sch)
* Throw the delayed uevent for the subchannel, register
* the ccw_device and exit.
*/
- dev_set_uevent_suppress(&sch->dev, 0);
- kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+ if (dev_get_uevent_suppress(&sch->dev)) {
+ /* should always be the case for the console */
+ dev_set_uevent_suppress(&sch->dev, 0);
+ kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+ }
cdev = sch_get_cdev(sch);
rc = ccw_device_add(cdev);
if (rc) {
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index a6f7c2986b94..ed60b8d4efe6 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -377,7 +377,6 @@ static inline int multicast_outbound(struct qdio_q *q)
extern u64 last_ai_time;
/* prototypes for thin interrupt */
-void qdio_setup_thinint(struct qdio_irq *irq_ptr);
int qdio_establish_thinint(struct qdio_irq *irq_ptr);
void qdio_shutdown_thinint(struct qdio_irq *irq_ptr);
void tiqdio_add_input_queues(struct qdio_irq *irq_ptr);
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index d040c4920ee7..b8955e20f246 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -480,7 +480,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
setup_queues(irq_ptr, init_data);
setup_qib(irq_ptr, init_data);
- qdio_setup_thinint(irq_ptr);
set_impl_params(irq_ptr, init_data->qib_param_field_format,
init_data->qib_param_field,
init_data->input_slib_elements,
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 6628e0c9e70e..e6b22a58150a 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -267,17 +267,19 @@ int __init tiqdio_register_thinints(void)
int qdio_establish_thinint(struct qdio_irq *irq_ptr)
{
+ int rc;
+
if (!is_thinint_irq(irq_ptr))
return 0;
- return set_subchannel_ind(irq_ptr, 0);
-}
-void qdio_setup_thinint(struct qdio_irq *irq_ptr)
-{
- if (!is_thinint_irq(irq_ptr))
- return;
irq_ptr->dsci = get_indicator();
DBF_HEX(&irq_ptr->dsci, sizeof(void *));
+
+ rc = set_subchannel_ind(irq_ptr, 0);
+ if (rc)
+ put_indicator(irq_ptr->dsci);
+
+ return rc;
}
void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index dc5ff47de3fe..6873ab5fe57a 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -341,7 +341,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
if (ret)
return ret;
- return copy_to_user((void __user *)arg, &info, minsz);
+ return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
}
case VFIO_DEVICE_GET_REGION_INFO:
{
@@ -362,7 +362,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
if (ret)
return ret;
- return copy_to_user((void __user *)arg, &info, minsz);
+ return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
}
case VFIO_DEVICE_GET_IRQ_INFO:
{
@@ -383,7 +383,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
if (info.count == -1)
return -EINVAL;
- return copy_to_user((void __user *)arg, &info, minsz);
+ return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
}
case VFIO_DEVICE_SET_IRQS:
{
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 23c24a699cef..b7cb897cd83e 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -915,7 +915,8 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (!reqcnt)
return -ENOMEM;
zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
- if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
+ if (copy_to_user((int __user *) arg, reqcnt,
+ sizeof(u32) * AP_DEVICES))
rc = -EFAULT;
kfree(reqcnt);
return rc;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 5f59e2dfc7db..d0aaef937b0f 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -470,12 +470,6 @@ static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
}
}
- if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
- QETH_QDIO_BUF_HANDLED_DELAYED)) {
- /* for recovery situations */
- qeth_init_qdio_out_buf(q, bidx);
- QETH_CARD_TEXT(q->card, 2, "clprecov");
- }
}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index eb917e93fa72..8d30f9ac3e9d 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1463,6 +1463,10 @@ static void qeth_bridge_state_change(struct qeth_card *card,
int extrasize;
QETH_CARD_TEXT(card, 2, "brstchng");
+ if (qports->num_entries == 0) {
+ QETH_CARD_TEXT(card, 2, "BPempty");
+ return;
+ }
if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) {
QETH_CARD_TEXT_(card, 2, "BPsz%04x", qports->entry_length);
return;
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index f602b42b8343..b7afdb55a459 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -592,7 +592,10 @@ static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
ZFCP_STATUS_ERP_TIMEDOUT)) {
req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
zfcp_dbf_rec_run("erscf_1", act);
- req->erp_action = NULL;
+ /* lock-free concurrent access with
+ * zfcp_erp_timeout_handler()
+ */
+ WRITE_ONCE(req->erp_action, NULL);
}
if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
zfcp_dbf_rec_run("erscf_2", act);
@@ -628,8 +631,14 @@ void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask)
void zfcp_erp_timeout_handler(struct timer_list *t)
{
struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
- struct zfcp_erp_action *act = fsf_req->erp_action;
+ struct zfcp_erp_action *act;
+ if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
+ return;
+ /* lock-free concurrent access with zfcp_erp_strategy_check_fsfreq() */
+ act = READ_ONCE(fsf_req->erp_action);
+ if (!act)
+ return;
zfcp_erp_notify(act, ZFCP_STATUS_ERP_TIMEDOUT);
}
@@ -738,7 +747,7 @@ static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter)
adapter->peer_d_id);
if (IS_ERR(port)) /* error or port already attached */
return;
- _zfcp_erp_port_reopen(port, 0, "ereptp1");
+ zfcp_erp_port_reopen(port, 0, "ereptp1");
}
static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 91aa4bfcf8d6..5bb278a604ed 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -403,7 +403,7 @@ static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
return;
}
- del_timer(&req->timer);
+ del_timer_sync(&req->timer);
zfcp_fsf_protstatus_eval(req);
zfcp_fsf_fsfstatus_eval(req);
req->handler(req);
@@ -758,7 +758,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
req->issued = get_tod_clock();
if (zfcp_qdio_send(qdio, &req->qdio_req)) {
- del_timer(&req->timer);
+ del_timer_sync(&req->timer);
/* lookup request again, list might have changed */
zfcp_reqlist_find_rm(adapter->req_list, req_id);
zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 67efdf25657f..0447ae2781ba 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -103,7 +103,7 @@ struct virtio_rev_info {
};
/* the highest virtio-ccw revision we support */
-#define VIRTIO_CCW_REV_MAX 1
+#define VIRTIO_CCW_REV_MAX 2
struct virtio_ccw_vq_info {
struct virtqueue *vq;
@@ -911,7 +911,7 @@ static u8 virtio_ccw_get_status(struct virtio_device *vdev)
u8 old_status = *vcdev->status;
struct ccw1 *ccw;
- if (vcdev->revision < 1)
+ if (vcdev->revision < 2)
return *vcdev->status;
ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 0d4ffe0ae306..79b5c5457cc2 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -3081,11 +3081,11 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
ccb->opcode = BLOGIC_INITIATOR_CCB_SG;
ccb->datalen = count * sizeof(struct blogic_sg_seg);
if (blogic_multimaster_type(adapter))
- ccb->data = (void *)((unsigned int) ccb->dma_handle +
+ ccb->data = (unsigned int) ccb->dma_handle +
((unsigned long) &ccb->sglist -
- (unsigned long) ccb));
+ (unsigned long) ccb);
else
- ccb->data = ccb->sglist;
+ ccb->data = virt_to_32bit_virt(ccb->sglist);
scsi_for_each_sg(command, sg, count, i) {
ccb->sglist[i].segbytes = sg_dma_len(sg);
diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h
index 8d47e2c88d24..1a33a4b28d45 100644
--- a/drivers/scsi/BusLogic.h
+++ b/drivers/scsi/BusLogic.h
@@ -821,7 +821,7 @@ struct blogic_ccb {
unsigned char cdblen; /* Byte 2 */
unsigned char sense_datalen; /* Byte 3 */
u32 datalen; /* Bytes 4-7 */
- void *data; /* Bytes 8-11 */
+ u32 data; /* Bytes 8-11 */
unsigned char:8; /* Byte 12 */
unsigned char:8; /* Byte 13 */
enum blogic_adapter_status adapter_status; /* Byte 14 */
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index a8ac48027632..7cb6e2b9e180 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -144,15 +144,6 @@ config BLK_DEV_SR
<file:Documentation/scsi/scsi.txt>.
The module will be called sr_mod.
-config BLK_DEV_SR_VENDOR
- bool "Enable vendor-specific extensions (for SCSI CDROM)"
- depends on BLK_DEV_SR
- help
- This enables the usage of vendor specific SCSI commands. This is
- required to support multisession CDs with old NEC/TOSHIBA cdrom
- drives (and HP Writers). If you have such a drive and get the first
- session only, try saying Y here; everybody else says N.
-
config CHR_DEV_SG
tristate "SCSI generic support"
depends on SCSI
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 6e356325d8d9..54717fb84a54 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -2481,13 +2481,13 @@ static int aac_read(struct scsi_cmnd * scsicmd)
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
- HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+ ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
scsicmd->scsi_done(scsicmd);
- return 1;
+ return 0;
}
dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
@@ -2573,13 +2573,13 @@ static int aac_write(struct scsi_cmnd * scsicmd)
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
- HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+ ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
scsicmd->scsi_done(scsicmd);
- return 1;
+ return 0;
}
dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index b7588de4484e..4cb6ee6e1212 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -743,7 +743,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
hbacmd->request_id =
cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
fibptr->flags |= FIB_CONTEXT_FLAG_SCSI_CMD;
- } else if (command != HBA_IU_TYPE_SCSI_TM_REQ)
+ } else
return -EINVAL;
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 1046947064a0..eecffc03084c 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -736,7 +736,11 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
status = aac_hba_send(HBA_IU_TYPE_SCSI_TM_REQ, fib,
(fib_callback) aac_hba_callback,
(void *) cmd);
-
+ if (status != -EINPROGRESS) {
+ aac_fib_complete(fib);
+ aac_fib_free(fib);
+ return ret;
+ }
/* Wait up to 15 secs for completion */
for (count = 0; count < 15; ++count) {
if (cmd->SCp.sent_command) {
@@ -915,11 +919,11 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd)
info = &aac->hba_map[bus][cid];
- if (info->devtype != AAC_DEVTYPE_NATIVE_RAW &&
- info->reset_state > 0)
+ if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW &&
+ !(info->reset_state > 0)))
return FAILED;
- pr_err("%s: Host adapter reset request. SCSI hang ?\n",
+ pr_err("%s: Host device reset request. SCSI hang ?\n",
AAC_DRIVERNAME);
fib = aac_fib_alloc(aac);
@@ -934,7 +938,12 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd)
status = aac_hba_send(command, fib,
(fib_callback) aac_tmf_callback,
(void *) info);
-
+ if (status != -EINPROGRESS) {
+ info->reset_state = 0;
+ aac_fib_complete(fib);
+ aac_fib_free(fib);
+ return ret;
+ }
/* Wait up to 15 seconds for completion */
for (count = 0; count < 15; ++count) {
if (info->reset_state == 0) {
@@ -973,11 +982,11 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd)
info = &aac->hba_map[bus][cid];
- if (info->devtype != AAC_DEVTYPE_NATIVE_RAW &&
- info->reset_state > 0)
+ if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW &&
+ !(info->reset_state > 0)))
return FAILED;
- pr_err("%s: Host adapter reset request. SCSI hang ?\n",
+ pr_err("%s: Host target reset request. SCSI hang ?\n",
AAC_DRIVERNAME);
fib = aac_fib_alloc(aac);
@@ -994,6 +1003,13 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd)
(fib_callback) aac_tmf_callback,
(void *) info);
+ if (status != -EINPROGRESS) {
+ info->reset_state = 0;
+ aac_fib_complete(fib);
+ aac_fib_free(fib);
+ return ret;
+ }
+
/* Wait up to 15 seconds for completion */
for (count = 0; count < 15; ++count) {
if (info->reset_state <= 0) {
@@ -1046,7 +1062,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
}
}
- pr_err("%s: Host adapter reset request. SCSI hang ?\n", AAC_DRIVERNAME);
+ pr_err("%s: Host bus reset request. SCSI hang ?\n", AAC_DRIVERNAME);
/*
* Check the health of the controller
@@ -1604,7 +1620,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
struct Scsi_Host *shost;
struct aac_dev *aac;
struct list_head *insert = &aac_devices;
- int error = -ENODEV;
+ int error;
int unique_id = 0;
u64 dmamask;
int mask_bits = 0;
@@ -1629,7 +1645,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
error = pci_enable_device(pdev);
if (error)
goto out;
- error = -ENODEV;
if (!(aac_drivers[index].quirks & AAC_QUIRK_SRC)) {
error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
@@ -1661,8 +1676,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev));
- if (!shost)
+ if (!shost) {
+ error = -ENOMEM;
goto out_disable_pdev;
+ }
shost->irq = pdev->irq;
shost->unique_id = unique_id;
@@ -1687,8 +1704,11 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
aac->fibs = kcalloc(shost->can_queue + AAC_NUM_MGT_FIB,
sizeof(struct fib),
GFP_KERNEL);
- if (!aac->fibs)
+ if (!aac->fibs) {
+ error = -ENOMEM;
goto out_free_host;
+ }
+
spin_lock_init(&aac->fib_lock);
mutex_init(&aac->ioctl_mutex);
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 421fe869a11e..ef9d907f2df5 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -2914,8 +2914,10 @@ static int acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
ashost->base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0);
ashost->fast = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
- if (!ashost->base || !ashost->fast)
+ if (!ashost->base || !ashost->fast) {
+ ret = -ENOMEM;
goto out_put;
+ }
host->irq = ec->irq;
ashost->host = host;
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c
index edce5f3cfdba..93ba83e3148e 100644
--- a/drivers/scsi/arm/cumana_2.c
+++ b/drivers/scsi/arm/cumana_2.c
@@ -454,7 +454,7 @@ static int cumanascsi2_probe(struct expansion_card *ec,
if (info->info.scsi.dma != NO_DMA)
free_dma(info->info.scsi.dma);
- free_irq(ec->irq, host);
+ free_irq(ec->irq, info);
out_release:
fas216_release(host);
diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c
index e93e047f4316..65bb34ce93b9 100644
--- a/drivers/scsi/arm/eesox.c
+++ b/drivers/scsi/arm/eesox.c
@@ -575,7 +575,7 @@ static int eesoxscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
if (info->info.scsi.dma != NO_DMA)
free_dma(info->info.scsi.dma);
- free_irq(ec->irq, host);
+ free_irq(ec->irq, info);
out_remove:
fas216_remove(host);
diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c
index 79aa88911b7f..b5e4a25ea1ef 100644
--- a/drivers/scsi/arm/powertec.c
+++ b/drivers/scsi/arm/powertec.c
@@ -382,7 +382,7 @@ static int powertecscsi_probe(struct expansion_card *ec,
if (info->info.scsi.dma != NO_DMA)
free_dma(info->info.scsi.dma);
- free_irq(ec->irq, host);
+ free_irq(ec->irq, info);
out_release:
fas216_release(host);
diff --git a/drivers/scsi/bnx2fc/Kconfig b/drivers/scsi/bnx2fc/Kconfig
index d401a096dfc7..2eb2476852b1 100644
--- a/drivers/scsi/bnx2fc/Kconfig
+++ b/drivers/scsi/bnx2fc/Kconfig
@@ -4,6 +4,7 @@ config SCSI_BNX2X_FCOE
depends on (IPV6 || IPV6=n)
depends on LIBFC
depends on LIBFCOE
+ depends on MMU
select NETDEVICES
select ETHERNET
select NET_VENDOR_BROADCOM
diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig
index ba30ff86d581..b27a3738d940 100644
--- a/drivers/scsi/bnx2i/Kconfig
+++ b/drivers/scsi/bnx2i/Kconfig
@@ -3,6 +3,7 @@ config SCSI_BNX2_ISCSI
depends on NET
depends on PCI
depends on (IPV6 || IPV6=n)
+ depends on MMU
select SCSI_ISCSI_ATTRS
select NETDEVICES
select ETHERNET
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index e51923886475..1b6f9351b43f 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -2384,7 +2384,7 @@ static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info,
FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
- ret = EINVAL;
+ ret = -EINVAL;
goto bye;
}
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index f987c40c47a1..443813feaef4 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -3749,6 +3749,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
cfg->afu_cookie = cfg->ops->create_afu(pdev);
if (unlikely(!cfg->afu_cookie)) {
dev_err(dev, "%s: create_afu failed\n", __func__);
+ rc = -ENOMEM;
goto out_remove;
}
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index c95c782b93a5..efd2b4312528 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -579,10 +579,11 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
* even though it shouldn't according to T10.
* The retry without rtpg_ext_hdr_req set
* handles this.
+ * Note: some arrays return a sense key of ILLEGAL_REQUEST
+ * with ASC 00h if they don't support the extended header.
*/
if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) &&
- sense_hdr.sense_key == ILLEGAL_REQUEST &&
- sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) {
+ sense_hdr.sense_key == ILLEGAL_REQUEST) {
pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP;
goto retry;
}
@@ -672,8 +673,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
rcu_read_lock();
list_for_each_entry_rcu(h,
&tmp_pg->dh_list, node) {
- /* h->sdev should always be valid */
- BUG_ON(!h->sdev);
+ if (!h->sdev)
+ continue;
h->sdev->access_state = desc[0];
}
rcu_read_unlock();
@@ -719,7 +720,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
pg->expiry = 0;
rcu_read_lock();
list_for_each_entry_rcu(h, &pg->dh_list, node) {
- BUG_ON(!h->sdev);
+ if (!h->sdev)
+ continue;
h->sdev->access_state =
(pg->state & SCSI_ACCESS_STATE_MASK);
if (pg->pref)
@@ -1160,7 +1162,6 @@ static void alua_bus_detach(struct scsi_device *sdev)
spin_lock(&h->pg_lock);
pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
rcu_assign_pointer(h->pg, NULL);
- h->sdev = NULL;
spin_unlock(&h->pg_lock);
if (pg) {
spin_lock_irq(&pg->lock);
@@ -1169,6 +1170,7 @@ static void alua_bus_detach(struct scsi_device *sdev)
kref_put(&pg->kref, release_port_group);
}
sdev->handler_data = NULL;
+ synchronize_rcu();
kfree(h);
}
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 24cbd0a2cc69..658c0726581f 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -267,9 +267,9 @@ static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new)
WARN_ON(!fcf_dev);
new->fcf_dev = NULL;
fcoe_fcf_device_delete(fcf_dev);
- kfree(new);
mutex_unlock(&cdev->lock);
}
+ kfree(new);
}
/**
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index e52599f44170..bc5dbe3bae5c 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -746,6 +746,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < FNIC_IO_LOCKS; i++)
spin_lock_init(&fnic->io_req_lock[i]);
+ err = -ENOMEM;
fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
if (!fnic->io_req_pool)
goto err_out_free_resources;
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 73ffc16ec022..b521fc7650cb 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -1034,7 +1034,8 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
atomic64_inc(&fnic_stats->io_stats.io_completions);
- io_duration_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time);
+ io_duration_time = jiffies_to_msecs(jiffies) -
+ jiffies_to_msecs(start_time);
if(io_duration_time <= 10)
atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 33191673249c..de4f41bce8e9 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -789,12 +789,13 @@ static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
struct hisi_sas_phy *phy = sas_phy->lldd_phy;
struct asd_sas_port *sas_port = sas_phy->port;
- struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
+ struct hisi_sas_port *port;
unsigned long flags;
if (!sas_port)
return;
+ port = to_hisi_sas_port(sas_port);
spin_lock_irqsave(&hisi_hba->lock, flags);
port->port_attached = 1;
port->id = phy->port_id;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index f570b8c5d857..0fe21cbdf0ca 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -507,6 +507,12 @@ static ssize_t host_store_rescan(struct device *dev,
return count;
}
+static void hpsa_turn_off_ioaccel_for_device(struct hpsa_scsi_dev_t *device)
+{
+ device->offload_enabled = 0;
+ device->offload_to_be_enabled = 0;
+}
+
static ssize_t host_show_firmware_revision(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1743,8 +1749,7 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
__func__,
h->scsi_host->host_no, logical_drive->bus,
logical_drive->target, logical_drive->lun);
- logical_drive->offload_enabled = 0;
- logical_drive->offload_to_be_enabled = 0;
+ hpsa_turn_off_ioaccel_for_device(logical_drive);
logical_drive->queue_depth = 8;
}
}
@@ -2496,8 +2501,7 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
IOACCEL2_SERV_RESPONSE_FAILURE) {
if (c2->error_data.status ==
IOACCEL2_STATUS_SR_IOACCEL_DISABLED) {
- dev->offload_enabled = 0;
- dev->offload_to_be_enabled = 0;
+ hpsa_turn_off_ioaccel_for_device(dev);
}
return hpsa_retry_cmd(h, c);
@@ -3676,10 +3680,17 @@ static void hpsa_get_ioaccel_status(struct ctlr_info *h,
this_device->offload_config =
!!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
if (this_device->offload_config) {
- this_device->offload_to_be_enabled =
+ bool offload_enabled =
!!(ioaccel_status & OFFLOAD_ENABLED_BIT);
- if (hpsa_get_raid_map(h, scsi3addr, this_device))
- this_device->offload_to_be_enabled = 0;
+ /*
+ * Check to see if offload can be enabled.
+ */
+ if (offload_enabled) {
+ rc = hpsa_get_raid_map(h, scsi3addr, this_device);
+ if (rc) /* could not load raid_map */
+ goto out;
+ this_device->offload_to_be_enabled = 1;
+ }
}
out:
@@ -3998,8 +4009,7 @@ static int hpsa_update_device_info(struct ctlr_info *h,
} else {
this_device->raid_level = RAID_UNKNOWN;
this_device->offload_config = 0;
- this_device->offload_enabled = 0;
- this_device->offload_to_be_enabled = 0;
+ hpsa_turn_off_ioaccel_for_device(this_device);
this_device->hba_ioaccel_enabled = 0;
this_device->volume_offline = 0;
this_device->queue_depth = h->nr_cmds;
@@ -5213,8 +5223,12 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
/* Handles load balance across RAID 1 members.
* (2-drive R1 and R10 with even # of drives.)
* Appropriate for SSDs, not optimal for HDDs
+ * Ensure we have the correct raid_map.
*/
- BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
+ if (le16_to_cpu(map->layout_map_count) != 2) {
+ hpsa_turn_off_ioaccel_for_device(dev);
+ return IO_ACCEL_INELIGIBLE;
+ }
if (dev->offload_to_mirror)
map_index += le16_to_cpu(map->data_disks_per_row);
dev->offload_to_mirror = !dev->offload_to_mirror;
@@ -5222,8 +5236,12 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
case HPSA_RAID_ADM:
/* Handles N-way mirrors (R1-ADM)
* and R10 with # of drives divisible by 3.)
+ * Ensure we have the correct raid_map.
*/
- BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
+ if (le16_to_cpu(map->layout_map_count) != 3) {
+ hpsa_turn_off_ioaccel_for_device(dev);
+ return IO_ACCEL_INELIGIBLE;
+ }
offload_to_mirror = dev->offload_to_mirror;
raid_map_helper(map, offload_to_mirror,
@@ -5248,7 +5266,10 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
r5or6_blocks_per_row =
le16_to_cpu(map->strip_size) *
le16_to_cpu(map->data_disks_per_row);
- BUG_ON(r5or6_blocks_per_row == 0);
+ if (r5or6_blocks_per_row == 0) {
+ hpsa_turn_off_ioaccel_for_device(dev);
+ return IO_ACCEL_INELIGIBLE;
+ }
stripesize = r5or6_blocks_per_row *
le16_to_cpu(map->layout_map_count);
#if BITS_PER_LONG == 32
@@ -8218,7 +8239,7 @@ static int detect_controller_lockup(struct ctlr_info *h)
*
* Called from monitor controller worker (hpsa_event_monitor_worker)
*
- * A Volume (or Volumes that comprise an Array set may be undergoing a
+ * A Volume (or Volumes that comprise an Array set) may be undergoing a
* transformation, so we will be turning off ioaccel for all volumes that
* make up the Array.
*/
@@ -8241,6 +8262,9 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h)
* Run through current device list used during I/O requests.
*/
for (i = 0; i < h->ndevices; i++) {
+ int offload_to_be_enabled = 0;
+ int offload_config = 0;
+
device = h->dev[i];
if (!device)
@@ -8258,25 +8282,35 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h)
continue;
ioaccel_status = buf[IOACCEL_STATUS_BYTE];
- device->offload_config =
+
+ /*
+ * Check if offload is still configured on
+ */
+ offload_config =
!!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
- if (device->offload_config)
- device->offload_to_be_enabled =
+ /*
+ * If offload is configured on, check to see if ioaccel
+ * needs to be enabled.
+ */
+ if (offload_config)
+ offload_to_be_enabled =
!!(ioaccel_status & OFFLOAD_ENABLED_BIT);
/*
+ * If ioaccel is to be re-enabled, re-enable later during the
+ * scan operation so the driver can get a fresh raidmap
+ * before turning ioaccel back on.
+ */
+ if (offload_to_be_enabled)
+ continue;
+
+ /*
* Immediately turn off ioaccel for any volume the
* controller tells us to. Some of the reasons could be:
* transformation - change to the LVs of an Array.
* degraded volume - component failure
- *
- * If ioaccel is to be re-enabled, re-enable later during the
- * scan operation so the driver can get a fresh raidmap
- * before turning ioaccel back on.
- *
*/
- if (!device->offload_to_be_enabled)
- device->offload_enabled = 0;
+ hpsa_turn_off_ioaccel_for_device(device);
}
kfree(buf);
@@ -8747,7 +8781,7 @@ reinit_after_soft_reset:
/* hook into SCSI subsystem */
rc = hpsa_scsi_add_host(h);
if (rc)
- goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
+ goto clean8; /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
/* Monitor the controller for firmware lockups */
h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
@@ -8762,6 +8796,8 @@ reinit_after_soft_reset:
HPSA_EVENT_MONITOR_INTERVAL);
return 0;
+clean8: /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
+ kfree(h->lastlogicals);
clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
hpsa_free_performant_mode(h);
h->access.set_intr_mask(h, HPSA_INTR_OFF);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 71d53bb239e2..b811436a46d0 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -506,8 +506,17 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
vhost->action = action;
break;
+ case IBMVFC_HOST_ACTION_REENABLE:
+ case IBMVFC_HOST_ACTION_RESET:
+ vhost->action = action;
+ break;
case IBMVFC_HOST_ACTION_INIT:
case IBMVFC_HOST_ACTION_TGT_DEL:
+ case IBMVFC_HOST_ACTION_LOGO:
+ case IBMVFC_HOST_ACTION_QUERY_TGTS:
+ case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
+ case IBMVFC_HOST_ACTION_NONE:
+ default:
switch (vhost->action) {
case IBMVFC_HOST_ACTION_RESET:
case IBMVFC_HOST_ACTION_REENABLE:
@@ -517,15 +526,6 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
break;
}
break;
- case IBMVFC_HOST_ACTION_LOGO:
- case IBMVFC_HOST_ACTION_QUERY_TGTS:
- case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
- case IBMVFC_HOST_ACTION_NONE:
- case IBMVFC_HOST_ACTION_RESET:
- case IBMVFC_HOST_ACTION_REENABLE:
- default:
- vhost->action = action;
- break;
}
}
@@ -2890,8 +2890,10 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev)
unsigned long flags = 0;
spin_lock_irqsave(shost->host_lock, flags);
- if (sdev->type == TYPE_DISK)
+ if (sdev->type == TYPE_DISK) {
sdev->allow_restart = 1;
+ blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
+ }
spin_unlock_irqrestore(shost->host_lock, flags);
return 0;
}
@@ -4344,26 +4346,45 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
case IBMVFC_HOST_ACTION_INIT_WAIT:
break;
case IBMVFC_HOST_ACTION_RESET:
- vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
spin_unlock_irqrestore(vhost->host->host_lock, flags);
rc = ibmvfc_reset_crq(vhost);
+
spin_lock_irqsave(vhost->host->host_lock, flags);
- if (rc == H_CLOSED)
+ if (!rc || rc == H_CLOSED)
vio_enable_interrupts(to_vio_dev(vhost->dev));
- if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
- (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
- ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
- dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
+ if (vhost->action == IBMVFC_HOST_ACTION_RESET) {
+ /*
+ * The only action we could have changed to would have
+ * been reenable, in which case, we skip the rest of
+ * this path and wait until we've done the re-enable
+ * before sending the crq init.
+ */
+ vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
+
+ if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
+ (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
+ }
}
break;
case IBMVFC_HOST_ACTION_REENABLE:
- vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
spin_unlock_irqrestore(vhost->host->host_lock, flags);
rc = ibmvfc_reenable_crq_queue(vhost);
+
spin_lock_irqsave(vhost->host->host_lock, flags);
- if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
- ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
- dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
+ if (vhost->action == IBMVFC_HOST_ACTION_REENABLE) {
+ /*
+ * The only action we could have changed to would have
+ * been reset, in which case, we skip the rest of this
+ * path and wait until we've done the reset before
+ * sending the crq init.
+ */
+ vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
+ if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
+ }
}
break;
case IBMVFC_HOST_ACTION_LOGO:
@@ -4795,6 +4816,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
if (IS_ERR(vhost->work_thread)) {
dev_err(dev, "Couldn't create kernel thread: %ld\n",
PTR_ERR(vhost->work_thread));
+ rc = PTR_ERR(vhost->work_thread);
goto free_host_mem;
}
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index e60822f07653..036508a4bd5a 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -429,6 +429,8 @@ static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
int rc = 0;
struct vio_dev *vdev = to_vio_dev(hostdata->dev);
+ set_adapter_info(hostdata);
+
/* Re-enable the CRQ */
do {
if (rc)
@@ -2296,16 +2298,12 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
static int ibmvscsi_remove(struct vio_dev *vdev)
{
struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
- unsigned long flags;
srp_remove_host(hostdata->host);
scsi_remove_host(hostdata->host);
purge_requests(hostdata, DID_ERROR);
-
- spin_lock_irqsave(hostdata->host->host_lock, flags);
release_event_pool(&hostdata->pool, hostdata);
- spin_unlock_irqrestore(hostdata->host->host_lock, flags);
ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
max_events);
diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c
index d453667612f8..15d64f96e623 100644
--- a/drivers/scsi/iscsi_boot_sysfs.c
+++ b/drivers/scsi/iscsi_boot_sysfs.c
@@ -360,7 +360,7 @@ iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset,
boot_kobj->kobj.kset = boot_kset->kset;
if (kobject_init_and_add(&boot_kobj->kobj, &iscsi_boot_ktype,
NULL, name, index)) {
- kfree(boot_kobj);
+ kobject_put(&boot_kobj->kobj);
return NULL;
}
boot_kobj->data = data;
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 6eb5ff3e2e61..7dfe4237e5e8 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -170,7 +170,9 @@ static int esp_jazz_probe(struct platform_device *dev)
if (!esp->command_block)
goto fail_unmap_regs;
- host->irq = platform_get_irq(dev, 0);
+ host->irq = err = platform_get_irq(dev, 0);
+ if (err < 0)
+ goto fail_unmap_command_block;
err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
if (err < 0)
goto fail_unmap_command_block;
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 8839f509b19a..0b3f4538c1d4 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -593,8 +593,12 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
if (PTR_ERR(fp) == -FC_EX_CLOSED)
goto out;
- if (IS_ERR(fp))
- goto redisc;
+ if (IS_ERR(fp)) {
+ mutex_lock(&disc->disc_mutex);
+ fc_disc_restart(disc);
+ mutex_unlock(&disc->disc_mutex);
+ goto out;
+ }
cp = fc_frame_payload_get(fp, sizeof(*cp));
if (!cp)
@@ -621,7 +625,7 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
new_rdata->disc_id = disc->disc_id;
fc_rport_login(new_rdata);
}
- goto out;
+ goto free_fp;
}
rdata->disc_id = disc->disc_id;
mutex_unlock(&rdata->rp_mutex);
@@ -638,10 +642,10 @@ redisc:
fc_disc_restart(disc);
mutex_unlock(&disc->disc_mutex);
}
+free_fp:
+ fc_frame_free(fp);
out:
kref_put(&rdata->kref, fc_rport_destroy);
- if (!IS_ERR(fp))
- fc_frame_free(fp);
}
/**
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 6ba257cbc6d9..384458d1f73c 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -1631,8 +1631,13 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
rc = fc_exch_done_locked(ep);
WARN_ON(fc_seq_exch(sp) != ep);
spin_unlock_bh(&ep->ex_lock);
- if (!rc)
+ if (!rc) {
fc_exch_delete(ep);
+ } else {
+ FC_EXCH_DBG(ep, "ep is completed already,"
+ "hence skip calling the resp\n");
+ goto skip_resp;
+ }
}
/*
@@ -1651,6 +1656,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
if (!fc_invoke_resp(ep, sp, fp))
fc_frame_free(fp);
+skip_resp:
fc_exch_release(ep);
return;
rel:
@@ -1907,10 +1913,16 @@ static void fc_exch_reset(struct fc_exch *ep)
fc_exch_hold(ep);
- if (!rc)
+ if (!rc) {
fc_exch_delete(ep);
+ } else {
+ FC_EXCH_DBG(ep, "ep is completed already,"
+ "hence skip calling the resp\n");
+ goto skip_resp;
+ }
fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED));
+skip_resp:
fc_seq_set_resp(sp, NULL, ep->arg);
fc_exch_release(ep);
}
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index ff943f477d6f..f653109d56af 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1741,7 +1741,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) {
FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
- "lport->mfs:%hu\n", mfs, lport->mfs);
+ "lport->mfs:%u\n", mfs, lport->mfs);
fc_lport_error(lport, fp);
goto out;
}
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 90a748551ede..2b3239765c24 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -145,8 +145,10 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
lockdep_assert_held(&lport->disc.disc_mutex);
rdata = fc_rport_lookup(lport, port_id);
- if (rdata)
+ if (rdata) {
+ kref_put(&rdata->kref, fc_rport_destroy);
return rdata;
+ }
if (lport->rport_priv_size > 0)
rport_priv_size = lport->rport_priv_size;
@@ -493,10 +495,11 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
fc_rport_state_enter(rdata, RPORT_ST_DELETE);
- kref_get(&rdata->kref);
- if (rdata->event == RPORT_EV_NONE &&
- !queue_work(rport_event_queue, &rdata->event_work))
- kref_put(&rdata->kref, fc_rport_destroy);
+ if (rdata->event == RPORT_EV_NONE) {
+ kref_get(&rdata->kref);
+ if (!queue_work(rport_event_queue, &rdata->event_work))
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
rdata->event = event;
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 7a05c7271766..81471c304991 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -571,8 +571,8 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
if (conn->task == task)
conn->task = NULL;
- if (conn->ping_task == task)
- conn->ping_task = NULL;
+ if (READ_ONCE(conn->ping_task) == task)
+ WRITE_ONCE(conn->ping_task, NULL);
/* release get from queueing */
__iscsi_put_task(task);
@@ -781,6 +781,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
task->conn->session->age);
}
+ if (unlikely(READ_ONCE(conn->ping_task) == INVALID_SCSI_TASK))
+ WRITE_ONCE(conn->ping_task, task);
+
if (!ihost->workq) {
if (iscsi_prep_mgmt_task(conn, task))
goto free_task;
@@ -988,8 +991,11 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
struct iscsi_nopout hdr;
struct iscsi_task *task;
- if (!rhdr && conn->ping_task)
- return -EINVAL;
+ if (!rhdr) {
+ if (READ_ONCE(conn->ping_task))
+ return -EINVAL;
+ WRITE_ONCE(conn->ping_task, INVALID_SCSI_TASK);
+ }
memset(&hdr, 0, sizeof(struct iscsi_nopout));
hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE;
@@ -1004,11 +1010,12 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
if (!task) {
+ if (!rhdr)
+ WRITE_ONCE(conn->ping_task, NULL);
iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
return -EIO;
} else if (!rhdr) {
/* only track our nops */
- conn->ping_task = task;
conn->last_ping = jiffies;
}
@@ -1021,7 +1028,7 @@ static int iscsi_nop_out_rsp(struct iscsi_task *task,
struct iscsi_conn *conn = task->conn;
int rc = 0;
- if (conn->ping_task != task) {
+ if (READ_ONCE(conn->ping_task) != task) {
/*
* If this is not in response to one of our
* nops then it must be from userspace.
@@ -1562,14 +1569,9 @@ check_mgmt:
}
rc = iscsi_prep_scsi_cmd_pdu(conn->task);
if (rc) {
- if (rc == -ENOMEM || rc == -EACCES) {
- spin_lock_bh(&conn->taskqueuelock);
- list_add_tail(&conn->task->running,
- &conn->cmdqueue);
- conn->task = NULL;
- spin_unlock_bh(&conn->taskqueuelock);
- goto done;
- } else
+ if (rc == -ENOMEM || rc == -EACCES)
+ fail_scsi_task(conn->task, DID_IMM_RETRY);
+ else
fail_scsi_task(conn->task, DID_ABORT);
spin_lock_bh(&conn->taskqueuelock);
continue;
@@ -1961,7 +1963,7 @@ static void iscsi_start_tx(struct iscsi_conn *conn)
*/
static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
{
- if (conn->ping_task &&
+ if (READ_ONCE(conn->ping_task) &&
time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
(conn->ping_timeout * HZ), jiffies))
return 1;
@@ -2096,7 +2098,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
* Checking the transport already or nop from a cmd timeout still
* running
*/
- if (conn->ping_task) {
+ if (READ_ONCE(conn->ping_task)) {
task->have_checked_conn = true;
rc = BLK_EH_RESET_TIMER;
goto done;
@@ -3361,125 +3363,125 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
switch(param) {
case ISCSI_PARAM_FAST_ABORT:
- len = sprintf(buf, "%d\n", session->fast_abort);
+ len = sysfs_emit(buf, "%d\n", session->fast_abort);
break;
case ISCSI_PARAM_ABORT_TMO:
- len = sprintf(buf, "%d\n", session->abort_timeout);
+ len = sysfs_emit(buf, "%d\n", session->abort_timeout);
break;
case ISCSI_PARAM_LU_RESET_TMO:
- len = sprintf(buf, "%d\n", session->lu_reset_timeout);
+ len = sysfs_emit(buf, "%d\n", session->lu_reset_timeout);
break;
case ISCSI_PARAM_TGT_RESET_TMO:
- len = sprintf(buf, "%d\n", session->tgt_reset_timeout);
+ len = sysfs_emit(buf, "%d\n", session->tgt_reset_timeout);
break;
case ISCSI_PARAM_INITIAL_R2T_EN:
- len = sprintf(buf, "%d\n", session->initial_r2t_en);
+ len = sysfs_emit(buf, "%d\n", session->initial_r2t_en);
break;
case ISCSI_PARAM_MAX_R2T:
- len = sprintf(buf, "%hu\n", session->max_r2t);
+ len = sysfs_emit(buf, "%hu\n", session->max_r2t);
break;
case ISCSI_PARAM_IMM_DATA_EN:
- len = sprintf(buf, "%d\n", session->imm_data_en);
+ len = sysfs_emit(buf, "%d\n", session->imm_data_en);
break;
case ISCSI_PARAM_FIRST_BURST:
- len = sprintf(buf, "%u\n", session->first_burst);
+ len = sysfs_emit(buf, "%u\n", session->first_burst);
break;
case ISCSI_PARAM_MAX_BURST:
- len = sprintf(buf, "%u\n", session->max_burst);
+ len = sysfs_emit(buf, "%u\n", session->max_burst);
break;
case ISCSI_PARAM_PDU_INORDER_EN:
- len = sprintf(buf, "%d\n", session->pdu_inorder_en);
+ len = sysfs_emit(buf, "%d\n", session->pdu_inorder_en);
break;
case ISCSI_PARAM_DATASEQ_INORDER_EN:
- len = sprintf(buf, "%d\n", session->dataseq_inorder_en);
+ len = sysfs_emit(buf, "%d\n", session->dataseq_inorder_en);
break;
case ISCSI_PARAM_DEF_TASKMGMT_TMO:
- len = sprintf(buf, "%d\n", session->def_taskmgmt_tmo);
+ len = sysfs_emit(buf, "%d\n", session->def_taskmgmt_tmo);
break;
case ISCSI_PARAM_ERL:
- len = sprintf(buf, "%d\n", session->erl);
+ len = sysfs_emit(buf, "%d\n", session->erl);
break;
case ISCSI_PARAM_TARGET_NAME:
- len = sprintf(buf, "%s\n", session->targetname);
+ len = sysfs_emit(buf, "%s\n", session->targetname);
break;
case ISCSI_PARAM_TARGET_ALIAS:
- len = sprintf(buf, "%s\n", session->targetalias);
+ len = sysfs_emit(buf, "%s\n", session->targetalias);
break;
case ISCSI_PARAM_TPGT:
- len = sprintf(buf, "%d\n", session->tpgt);
+ len = sysfs_emit(buf, "%d\n", session->tpgt);
break;
case ISCSI_PARAM_USERNAME:
- len = sprintf(buf, "%s\n", session->username);
+ len = sysfs_emit(buf, "%s\n", session->username);
break;
case ISCSI_PARAM_USERNAME_IN:
- len = sprintf(buf, "%s\n", session->username_in);
+ len = sysfs_emit(buf, "%s\n", session->username_in);
break;
case ISCSI_PARAM_PASSWORD:
- len = sprintf(buf, "%s\n", session->password);
+ len = sysfs_emit(buf, "%s\n", session->password);
break;
case ISCSI_PARAM_PASSWORD_IN:
- len = sprintf(buf, "%s\n", session->password_in);
+ len = sysfs_emit(buf, "%s\n", session->password_in);
break;
case ISCSI_PARAM_IFACE_NAME:
- len = sprintf(buf, "%s\n", session->ifacename);
+ len = sysfs_emit(buf, "%s\n", session->ifacename);
break;
case ISCSI_PARAM_INITIATOR_NAME:
- len = sprintf(buf, "%s\n", session->initiatorname);
+ len = sysfs_emit(buf, "%s\n", session->initiatorname);
break;
case ISCSI_PARAM_BOOT_ROOT:
- len = sprintf(buf, "%s\n", session->boot_root);
+ len = sysfs_emit(buf, "%s\n", session->boot_root);
break;
case ISCSI_PARAM_BOOT_NIC:
- len = sprintf(buf, "%s\n", session->boot_nic);
+ len = sysfs_emit(buf, "%s\n", session->boot_nic);
break;
case ISCSI_PARAM_BOOT_TARGET:
- len = sprintf(buf, "%s\n", session->boot_target);
+ len = sysfs_emit(buf, "%s\n", session->boot_target);
break;
case ISCSI_PARAM_AUTO_SND_TGT_DISABLE:
- len = sprintf(buf, "%u\n", session->auto_snd_tgt_disable);
+ len = sysfs_emit(buf, "%u\n", session->auto_snd_tgt_disable);
break;
case ISCSI_PARAM_DISCOVERY_SESS:
- len = sprintf(buf, "%u\n", session->discovery_sess);
+ len = sysfs_emit(buf, "%u\n", session->discovery_sess);
break;
case ISCSI_PARAM_PORTAL_TYPE:
- len = sprintf(buf, "%s\n", session->portal_type);
+ len = sysfs_emit(buf, "%s\n", session->portal_type);
break;
case ISCSI_PARAM_CHAP_AUTH_EN:
- len = sprintf(buf, "%u\n", session->chap_auth_en);
+ len = sysfs_emit(buf, "%u\n", session->chap_auth_en);
break;
case ISCSI_PARAM_DISCOVERY_LOGOUT_EN:
- len = sprintf(buf, "%u\n", session->discovery_logout_en);
+ len = sysfs_emit(buf, "%u\n", session->discovery_logout_en);
break;
case ISCSI_PARAM_BIDI_CHAP_EN:
- len = sprintf(buf, "%u\n", session->bidi_chap_en);
+ len = sysfs_emit(buf, "%u\n", session->bidi_chap_en);
break;
case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL:
- len = sprintf(buf, "%u\n", session->discovery_auth_optional);
+ len = sysfs_emit(buf, "%u\n", session->discovery_auth_optional);
break;
case ISCSI_PARAM_DEF_TIME2WAIT:
- len = sprintf(buf, "%d\n", session->time2wait);
+ len = sysfs_emit(buf, "%d\n", session->time2wait);
break;
case ISCSI_PARAM_DEF_TIME2RETAIN:
- len = sprintf(buf, "%d\n", session->time2retain);
+ len = sysfs_emit(buf, "%d\n", session->time2retain);
break;
case ISCSI_PARAM_TSID:
- len = sprintf(buf, "%u\n", session->tsid);
+ len = sysfs_emit(buf, "%u\n", session->tsid);
break;
case ISCSI_PARAM_ISID:
- len = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n",
+ len = sysfs_emit(buf, "%02x%02x%02x%02x%02x%02x\n",
session->isid[0], session->isid[1],
session->isid[2], session->isid[3],
session->isid[4], session->isid[5]);
break;
case ISCSI_PARAM_DISCOVERY_PARENT_IDX:
- len = sprintf(buf, "%u\n", session->discovery_parent_idx);
+ len = sysfs_emit(buf, "%u\n", session->discovery_parent_idx);
break;
case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
if (session->discovery_parent_type)
- len = sprintf(buf, "%s\n",
+ len = sysfs_emit(buf, "%s\n",
session->discovery_parent_type);
else
- len = sprintf(buf, "\n");
+ len = sysfs_emit(buf, "\n");
break;
default:
return -ENOSYS;
@@ -3511,16 +3513,16 @@ int iscsi_conn_get_addr_param(struct sockaddr_storage *addr,
case ISCSI_PARAM_CONN_ADDRESS:
case ISCSI_HOST_PARAM_IPADDRESS:
if (sin)
- len = sprintf(buf, "%pI4\n", &sin->sin_addr.s_addr);
+ len = sysfs_emit(buf, "%pI4\n", &sin->sin_addr.s_addr);
else
- len = sprintf(buf, "%pI6\n", &sin6->sin6_addr);
+ len = sysfs_emit(buf, "%pI6\n", &sin6->sin6_addr);
break;
case ISCSI_PARAM_CONN_PORT:
case ISCSI_PARAM_LOCAL_PORT:
if (sin)
- len = sprintf(buf, "%hu\n", be16_to_cpu(sin->sin_port));
+ len = sysfs_emit(buf, "%hu\n", be16_to_cpu(sin->sin_port));
else
- len = sprintf(buf, "%hu\n",
+ len = sysfs_emit(buf, "%hu\n",
be16_to_cpu(sin6->sin6_port));
break;
default:
@@ -3539,88 +3541,88 @@ int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
switch(param) {
case ISCSI_PARAM_PING_TMO:
- len = sprintf(buf, "%u\n", conn->ping_timeout);
+ len = sysfs_emit(buf, "%u\n", conn->ping_timeout);
break;
case ISCSI_PARAM_RECV_TMO:
- len = sprintf(buf, "%u\n", conn->recv_timeout);
+ len = sysfs_emit(buf, "%u\n", conn->recv_timeout);
break;
case ISCSI_PARAM_MAX_RECV_DLENGTH:
- len = sprintf(buf, "%u\n", conn->max_recv_dlength);
+ len = sysfs_emit(buf, "%u\n", conn->max_recv_dlength);
break;
case ISCSI_PARAM_MAX_XMIT_DLENGTH:
- len = sprintf(buf, "%u\n", conn->max_xmit_dlength);
+ len = sysfs_emit(buf, "%u\n", conn->max_xmit_dlength);
break;
case ISCSI_PARAM_HDRDGST_EN:
- len = sprintf(buf, "%d\n", conn->hdrdgst_en);
+ len = sysfs_emit(buf, "%d\n", conn->hdrdgst_en);
break;
case ISCSI_PARAM_DATADGST_EN:
- len = sprintf(buf, "%d\n", conn->datadgst_en);
+ len = sysfs_emit(buf, "%d\n", conn->datadgst_en);
break;
case ISCSI_PARAM_IFMARKER_EN:
- len = sprintf(buf, "%d\n", conn->ifmarker_en);
+ len = sysfs_emit(buf, "%d\n", conn->ifmarker_en);
break;
case ISCSI_PARAM_OFMARKER_EN:
- len = sprintf(buf, "%d\n", conn->ofmarker_en);
+ len = sysfs_emit(buf, "%d\n", conn->ofmarker_en);
break;
case ISCSI_PARAM_EXP_STATSN:
- len = sprintf(buf, "%u\n", conn->exp_statsn);
+ len = sysfs_emit(buf, "%u\n", conn->exp_statsn);
break;
case ISCSI_PARAM_PERSISTENT_PORT:
- len = sprintf(buf, "%d\n", conn->persistent_port);
+ len = sysfs_emit(buf, "%d\n", conn->persistent_port);
break;
case ISCSI_PARAM_PERSISTENT_ADDRESS:
- len = sprintf(buf, "%s\n", conn->persistent_address);
+ len = sysfs_emit(buf, "%s\n", conn->persistent_address);
break;
case ISCSI_PARAM_STATSN:
- len = sprintf(buf, "%u\n", conn->statsn);
+ len = sysfs_emit(buf, "%u\n", conn->statsn);
break;
case ISCSI_PARAM_MAX_SEGMENT_SIZE:
- len = sprintf(buf, "%u\n", conn->max_segment_size);
+ len = sysfs_emit(buf, "%u\n", conn->max_segment_size);
break;
case ISCSI_PARAM_KEEPALIVE_TMO:
- len = sprintf(buf, "%u\n", conn->keepalive_tmo);
+ len = sysfs_emit(buf, "%u\n", conn->keepalive_tmo);
break;
case ISCSI_PARAM_LOCAL_PORT:
- len = sprintf(buf, "%u\n", conn->local_port);
+ len = sysfs_emit(buf, "%u\n", conn->local_port);
break;
case ISCSI_PARAM_TCP_TIMESTAMP_STAT:
- len = sprintf(buf, "%u\n", conn->tcp_timestamp_stat);
+ len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_stat);
break;
case ISCSI_PARAM_TCP_NAGLE_DISABLE:
- len = sprintf(buf, "%u\n", conn->tcp_nagle_disable);
+ len = sysfs_emit(buf, "%u\n", conn->tcp_nagle_disable);
break;
case ISCSI_PARAM_TCP_WSF_DISABLE:
- len = sprintf(buf, "%u\n", conn->tcp_wsf_disable);
+ len = sysfs_emit(buf, "%u\n", conn->tcp_wsf_disable);
break;
case ISCSI_PARAM_TCP_TIMER_SCALE:
- len = sprintf(buf, "%u\n", conn->tcp_timer_scale);
+ len = sysfs_emit(buf, "%u\n", conn->tcp_timer_scale);
break;
case ISCSI_PARAM_TCP_TIMESTAMP_EN:
- len = sprintf(buf, "%u\n", conn->tcp_timestamp_en);
+ len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_en);
break;
case ISCSI_PARAM_IP_FRAGMENT_DISABLE:
- len = sprintf(buf, "%u\n", conn->fragment_disable);
+ len = sysfs_emit(buf, "%u\n", conn->fragment_disable);
break;
case ISCSI_PARAM_IPV4_TOS:
- len = sprintf(buf, "%u\n", conn->ipv4_tos);
+ len = sysfs_emit(buf, "%u\n", conn->ipv4_tos);
break;
case ISCSI_PARAM_IPV6_TC:
- len = sprintf(buf, "%u\n", conn->ipv6_traffic_class);
+ len = sysfs_emit(buf, "%u\n", conn->ipv6_traffic_class);
break;
case ISCSI_PARAM_IPV6_FLOW_LABEL:
- len = sprintf(buf, "%u\n", conn->ipv6_flow_label);
+ len = sysfs_emit(buf, "%u\n", conn->ipv6_flow_label);
break;
case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6:
- len = sprintf(buf, "%u\n", conn->is_fw_assigned_ipv6);
+ len = sysfs_emit(buf, "%u\n", conn->is_fw_assigned_ipv6);
break;
case ISCSI_PARAM_TCP_XMIT_WSF:
- len = sprintf(buf, "%u\n", conn->tcp_xmit_wsf);
+ len = sysfs_emit(buf, "%u\n", conn->tcp_xmit_wsf);
break;
case ISCSI_PARAM_TCP_RECV_WSF:
- len = sprintf(buf, "%u\n", conn->tcp_recv_wsf);
+ len = sysfs_emit(buf, "%u\n", conn->tcp_recv_wsf);
break;
case ISCSI_PARAM_LOCAL_IPADDR:
- len = sprintf(buf, "%s\n", conn->local_ipaddr);
+ len = sysfs_emit(buf, "%s\n", conn->local_ipaddr);
break;
default:
return -ENOSYS;
@@ -3638,13 +3640,13 @@ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
switch (param) {
case ISCSI_HOST_PARAM_NETDEV_NAME:
- len = sprintf(buf, "%s\n", ihost->netdev);
+ len = sysfs_emit(buf, "%s\n", ihost->netdev);
break;
case ISCSI_HOST_PARAM_HWADDRESS:
- len = sprintf(buf, "%s\n", ihost->hwaddress);
+ len = sysfs_emit(buf, "%s\n", ihost->hwaddress);
break;
case ISCSI_HOST_PARAM_INITIATOR_NAME:
- len = sprintf(buf, "%s\n", ihost->initiatorname);
+ len = sysfs_emit(buf, "%s\n", ihost->initiatorname);
break;
default:
return -ENOSYS;
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 64a958a99f6a..e032af654733 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -215,15 +215,17 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
task->total_xfer_len = qc->nbytes;
task->num_scatter = qc->n_elem;
+ task->data_dir = qc->dma_dir;
+ } else if (qc->tf.protocol == ATA_PROT_NODATA) {
+ task->data_dir = DMA_NONE;
} else {
for_each_sg(qc->sg, sg, qc->n_elem, si)
xfer += sg_dma_len(sg);
task->total_xfer_len = xfer;
task->num_scatter = si;
+ task->data_dir = qc->dma_dir;
}
-
- task->data_dir = qc->dma_dir;
task->scatter = qc->sg;
task->ata_task.retry_count = 1;
task->task_state_flags = SAS_TASK_STATE_PENDING;
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index fad23dd39114..1a0b2ce398f7 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -41,7 +41,7 @@ static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy
static void sas_resume_port(struct asd_sas_phy *phy)
{
- struct domain_device *dev;
+ struct domain_device *dev, *n;
struct asd_sas_port *port = phy->port;
struct sas_ha_struct *sas_ha = phy->ha;
struct sas_internal *si = to_sas_internal(sas_ha->core.shost->transportt);
@@ -60,7 +60,7 @@ static void sas_resume_port(struct asd_sas_phy *phy)
* 1/ presume every device came back
* 2/ force the next revalidation to check all expander phys
*/
- list_for_each_entry(dev, &port->dev_list, dev_list_node) {
+ list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) {
int i, rc;
rc = sas_notify_lldd_dev_found(dev);
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index fe084d47ed9e..3447d19d4147 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -332,7 +332,6 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE)
goto buffer_done;
- rcu_read_lock();
scnprintf(tmp, sizeof(tmp),
"XRI Dist lpfc%d Total %d NVME %d SCSI %d ELS %d\n",
phba->brd_no,
@@ -341,7 +340,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
phba->sli4_hba.scsi_xri_max,
lpfc_sli4_get_els_iocb_cnt(phba));
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto buffer_done;
/* Port state is only one of two values for now. */
if (localport->port_id)
@@ -357,7 +356,9 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
wwn_to_u64(vport->fc_nodename.u.wwn),
localport->port_id, statep);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto buffer_done;
+
+ spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
nrport = NULL;
@@ -384,39 +385,39 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
/* Tab in to show lport ownership. */
if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
if (phba->brd_no >= 10) {
if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
scnprintf(tmp, sizeof(tmp), "WWPN x%llx ",
nrport->port_name);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
scnprintf(tmp, sizeof(tmp), "WWNN x%llx ",
nrport->node_name);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
scnprintf(tmp, sizeof(tmp), "DID x%06x ",
nrport->port_id);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
/* An NVME rport can have multiple roles. */
if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) {
if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) {
if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) {
if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
FC_PORT_ROLE_NVME_TARGET |
@@ -424,14 +425,14 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x",
nrport->port_role);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
scnprintf(tmp, sizeof(tmp), "%s\n", statep);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
- rcu_read_unlock();
+ spin_unlock_irq(shost->host_lock);
if (!lport)
goto buffer_done;
@@ -491,11 +492,11 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&lport->cmpl_fcp_err));
strlcat(buf, tmp, PAGE_SIZE);
- /* RCU is already unlocked. */
+ /* host_lock is already unlocked. */
goto buffer_done;
- rcu_unlock_buf_done:
- rcu_read_unlock();
+ unlock_buf_done:
+ spin_unlock_irq(shost->host_lock);
buffer_done:
len = strnlen(buf, PAGE_SIZE);
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index bea24bc4410a..1a0b1cb9de78 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -56,9 +56,6 @@ void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
-void lpfc_supported_pages(struct lpfcMboxq *);
-void lpfc_pc_sli4_params(struct lpfcMboxq *);
-int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *, struct lpfcMboxq *,
uint16_t, uint16_t, bool);
int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 384f5cd7c3c8..99b4ff78f9dc 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1737,8 +1737,8 @@ lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, sizeof(struct lpfc_name));
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName,
sizeof(struct lpfc_name));
@@ -1754,8 +1754,8 @@ lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
/* This string MUST be consistent with other FC platforms
* supported by Broadcom.
@@ -1779,8 +1779,8 @@ lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, phba->SerialNumber,
sizeof(ae->un.AttrString));
@@ -1801,8 +1801,8 @@ lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, phba->ModelName,
sizeof(ae->un.AttrString));
@@ -1822,8 +1822,8 @@ lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, phba->ModelDesc,
sizeof(ae->un.AttrString));
@@ -1845,8 +1845,8 @@ lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t i, j, incr, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
/* Convert JEDEC ID to ascii for hardware version */
incr = vp->rev.biuRev;
@@ -1875,8 +1875,8 @@ lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, lpfc_release_version,
sizeof(ae->un.AttrString));
@@ -1897,8 +1897,8 @@ lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
@@ -1922,8 +1922,8 @@ lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
len = strnlen(ae->un.AttrString,
@@ -1942,8 +1942,8 @@ lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s %s %s",
init_utsname()->sysname,
@@ -1965,7 +1965,7 @@ lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
ae->un.AttrInt = cpu_to_be32(LPFC_MAX_CT_SIZE);
size = FOURBYTES + sizeof(uint32_t);
@@ -1981,8 +1981,8 @@ lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
len = lpfc_vport_symbolic_node_name(vport,
ae->un.AttrString, 256);
@@ -2000,7 +2000,7 @@ lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
/* Nothing is defined for this currently */
ae->un.AttrInt = cpu_to_be32(0);
@@ -2017,7 +2017,7 @@ lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
/* Each driver instance corresponds to a single port */
ae->un.AttrInt = cpu_to_be32(1);
@@ -2034,8 +2034,8 @@ lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, sizeof(struct lpfc_name));
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
memcpy(&ae->un.AttrWWN, &vport->fabric_nodename,
sizeof(struct lpfc_name));
@@ -2053,8 +2053,8 @@ lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
len = strnlen(ae->un.AttrString,
@@ -2073,7 +2073,7 @@ lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
/* Driver doesn't have access to this information */
ae->un.AttrInt = cpu_to_be32(0);
@@ -2090,8 +2090,8 @@ lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, "EMULEX",
sizeof(ae->un.AttrString));
@@ -2112,8 +2112,8 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 32);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
@@ -2134,7 +2134,7 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
ae->un.AttrInt = 0;
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
@@ -2186,7 +2186,7 @@ lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
switch (phba->fc_linkspeed) {
@@ -2253,7 +2253,7 @@ lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
hsp = (struct serv_parm *)&vport->fc_sparam;
ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb) << 8) |
@@ -2273,8 +2273,8 @@ lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
snprintf(ae->un.AttrString, sizeof(ae->un.AttrString),
"/sys/class/scsi_host/host%d", shost->host_no);
@@ -2294,8 +2294,8 @@ lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s",
init_utsname()->nodename);
@@ -2315,8 +2315,8 @@ lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, sizeof(struct lpfc_name));
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName,
sizeof(struct lpfc_name));
@@ -2333,8 +2333,8 @@ lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, sizeof(struct lpfc_name));
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
memcpy(&ae->un.AttrWWN, &vport->fc_sparam.portName,
sizeof(struct lpfc_name));
@@ -2351,8 +2351,8 @@ lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
len = lpfc_vport_symbolic_port_name(vport, ae->un.AttrString, 256);
len += (len & 3) ? (4 - (len & 3)) : 4;
@@ -2370,7 +2370,7 @@ lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP)
ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTTYPE_NLPORT);
else
@@ -2388,7 +2388,7 @@ lpfc_fdmi_port_attr_class(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
ae->un.AttrInt = cpu_to_be32(FC_COS_CLASS2 | FC_COS_CLASS3);
size = FOURBYTES + sizeof(uint32_t);
ad->AttrLen = cpu_to_be16(size);
@@ -2403,8 +2403,8 @@ lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, sizeof(struct lpfc_name));
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
memcpy(&ae->un.AttrWWN, &vport->fabric_portname,
sizeof(struct lpfc_name));
@@ -2421,8 +2421,8 @@ lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 32);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
@@ -2442,7 +2442,7 @@ lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
/* Link Up - operational */
ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTSTATE_ONLINE);
size = FOURBYTES + sizeof(uint32_t);
@@ -2458,7 +2458,7 @@ lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
vport->fdmi_num_disc = lpfc_find_map_node(vport);
ae->un.AttrInt = cpu_to_be32(vport->fdmi_num_disc);
size = FOURBYTES + sizeof(uint32_t);
@@ -2474,7 +2474,7 @@ lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
ae->un.AttrInt = cpu_to_be32(vport->fc_myDID);
size = FOURBYTES + sizeof(uint32_t);
ad->AttrLen = cpu_to_be16(size);
@@ -2489,8 +2489,8 @@ lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, "Smart SAN Initiator",
sizeof(ae->un.AttrString));
@@ -2510,8 +2510,8 @@ lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
memcpy(&ae->un.AttrString, &vport->fc_sparam.nodeName,
sizeof(struct lpfc_name));
@@ -2531,8 +2531,8 @@ lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, "Smart SAN Version 2.0",
sizeof(ae->un.AttrString));
@@ -2553,8 +2553,8 @@ lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, phba->ModelName,
sizeof(ae->un.AttrString));
@@ -2573,7 +2573,7 @@ lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
/* SRIOV (type 3) is not supported */
if (vport->vpi)
@@ -2593,7 +2593,7 @@ lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
ae->un.AttrInt = cpu_to_be32(0);
size = FOURBYTES + sizeof(uint32_t);
ad->AttrLen = cpu_to_be16(size);
@@ -2608,7 +2608,7 @@ lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
ae->un.AttrInt = cpu_to_be32(1);
size = FOURBYTES + sizeof(uint32_t);
ad->AttrLen = cpu_to_be16(size);
@@ -2756,7 +2756,8 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Registered Port List */
/* One entry (port) per adapter */
rh->rpl.EntryCnt = cpu_to_be32(1);
- memcpy(&rh->rpl.pe, &phba->pport->fc_sparam.portName,
+ memcpy(&rh->rpl.pe.PortName,
+ &phba->pport->fc_sparam.portName,
sizeof(struct lpfc_name));
/* point to the HBA attribute block */
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index f1951c432766..2c70e311943a 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1843,7 +1843,7 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
memset(dstbuf, 0, 33);
size = (nbytes < 32) ? nbytes : 32;
if (copy_from_user(dstbuf, buf, size))
- return 0;
+ return -EFAULT;
if (dent == phba->debug_InjErrLBA) {
if ((buf[0] == 'o') && (buf[1] == 'f') && (buf[2] == 'f'))
@@ -1851,7 +1851,7 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
}
if ((tmp == 0) && (kstrtoull(dstbuf, 0, &tmp)))
- return 0;
+ return -EINVAL;
if (dent == phba->debug_writeGuard)
phba->lpfc_injerr_wgrd_cnt = (uint32_t)tmp;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 7398350b08b4..6a4b496081e4 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -4112,7 +4112,9 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
out:
if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) {
spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
+ if (mbox)
+ ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
+ ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI;
spin_unlock_irq(shost->host_lock);
/* If the node is not being used by another discovery thread,
@@ -7949,6 +7951,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
spin_lock_irq(shost->host_lock);
if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) {
spin_unlock_irq(shost->host_lock);
+ if (newnode)
+ lpfc_nlp_put(ndlp);
goto dropit;
}
spin_unlock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 009aa0eee040..48d4d576d588 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1333,25 +1333,8 @@ struct fc_rdp_res_frame {
/* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */
#define SLI_CT_FDMI_Subtypes 0x10 /* Management Service Subtype */
-/*
- * Registered Port List Format
- */
-struct lpfc_fdmi_reg_port_list {
- uint32_t EntryCnt;
- uint32_t pe; /* Variable-length array */
-};
-
-
/* Definitions for HBA / Port attribute entries */
-struct lpfc_fdmi_attr_def { /* Defined in TLV format */
- /* Structure is in Big Endian format */
- uint32_t AttrType:16;
- uint32_t AttrLen:16;
- uint32_t AttrValue; /* Marks start of Value (ATTRIBUTE_ENTRY) */
-};
-
-
/* Attribute Entry */
struct lpfc_fdmi_attr_entry {
union {
@@ -1362,7 +1345,13 @@ struct lpfc_fdmi_attr_entry {
} un;
};
-#define LPFC_FDMI_MAX_AE_SIZE sizeof(struct lpfc_fdmi_attr_entry)
+struct lpfc_fdmi_attr_def { /* Defined in TLV format */
+ /* Structure is in Big Endian format */
+ uint32_t AttrType:16;
+ uint32_t AttrLen:16;
+ /* Marks start of Value (ATTRIBUTE_ENTRY) */
+ struct lpfc_fdmi_attr_entry AttrValue;
+} __packed;
/*
* HBA Attribute Block
@@ -1387,12 +1376,19 @@ struct lpfc_fdmi_hba_ident {
};
/*
+ * Registered Port List Format
+ */
+struct lpfc_fdmi_reg_port_list {
+ uint32_t EntryCnt;
+ struct lpfc_fdmi_port_entry pe;
+} __packed;
+
+/*
* Register HBA(RHBA)
*/
struct lpfc_fdmi_reg_hba {
struct lpfc_fdmi_hba_ident hi;
- struct lpfc_fdmi_reg_port_list rpl; /* variable-length array */
-/* struct lpfc_fdmi_attr_block ab; */
+ struct lpfc_fdmi_reg_port_list rpl;
};
/*
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 083f8c8706e5..a9bd12bfc15e 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -122,6 +122,7 @@ struct lpfc_sli_intf {
/* Define SLI4 Alignment requirements. */
#define LPFC_ALIGN_16_BYTE 16
#define LPFC_ALIGN_64_BYTE 64
+#define SLI4_PAGE_SIZE 4096
/* Define SLI4 specific definitions. */
#define LPFC_MQ_CQE_BYTE_OFFSET 256
@@ -2886,62 +2887,6 @@ struct lpfc_mbx_request_features {
#define lpfc_mbx_rq_ftr_rsp_mrqp_WORD word3
};
-struct lpfc_mbx_supp_pages {
- uint32_t word1;
-#define qs_SHIFT 0
-#define qs_MASK 0x00000001
-#define qs_WORD word1
-#define wr_SHIFT 1
-#define wr_MASK 0x00000001
-#define wr_WORD word1
-#define pf_SHIFT 8
-#define pf_MASK 0x000000ff
-#define pf_WORD word1
-#define cpn_SHIFT 16
-#define cpn_MASK 0x000000ff
-#define cpn_WORD word1
- uint32_t word2;
-#define list_offset_SHIFT 0
-#define list_offset_MASK 0x000000ff
-#define list_offset_WORD word2
-#define next_offset_SHIFT 8
-#define next_offset_MASK 0x000000ff
-#define next_offset_WORD word2
-#define elem_cnt_SHIFT 16
-#define elem_cnt_MASK 0x000000ff
-#define elem_cnt_WORD word2
- uint32_t word3;
-#define pn_0_SHIFT 24
-#define pn_0_MASK 0x000000ff
-#define pn_0_WORD word3
-#define pn_1_SHIFT 16
-#define pn_1_MASK 0x000000ff
-#define pn_1_WORD word3
-#define pn_2_SHIFT 8
-#define pn_2_MASK 0x000000ff
-#define pn_2_WORD word3
-#define pn_3_SHIFT 0
-#define pn_3_MASK 0x000000ff
-#define pn_3_WORD word3
- uint32_t word4;
-#define pn_4_SHIFT 24
-#define pn_4_MASK 0x000000ff
-#define pn_4_WORD word4
-#define pn_5_SHIFT 16
-#define pn_5_MASK 0x000000ff
-#define pn_5_WORD word4
-#define pn_6_SHIFT 8
-#define pn_6_MASK 0x000000ff
-#define pn_6_WORD word4
-#define pn_7_SHIFT 0
-#define pn_7_MASK 0x000000ff
-#define pn_7_WORD word4
- uint32_t rsvd[27];
-#define LPFC_SUPP_PAGES 0
-#define LPFC_BLOCK_GUARD_PROFILES 1
-#define LPFC_SLI4_PARAMETERS 2
-};
-
struct lpfc_mbx_memory_dump_type3 {
uint32_t word1;
#define lpfc_mbx_memory_dump_type3_type_SHIFT 0
@@ -3158,121 +3103,6 @@ struct user_eeprom {
uint8_t reserved191[57];
};
-struct lpfc_mbx_pc_sli4_params {
- uint32_t word1;
-#define qs_SHIFT 0
-#define qs_MASK 0x00000001
-#define qs_WORD word1
-#define wr_SHIFT 1
-#define wr_MASK 0x00000001
-#define wr_WORD word1
-#define pf_SHIFT 8
-#define pf_MASK 0x000000ff
-#define pf_WORD word1
-#define cpn_SHIFT 16
-#define cpn_MASK 0x000000ff
-#define cpn_WORD word1
- uint32_t word2;
-#define if_type_SHIFT 0
-#define if_type_MASK 0x00000007
-#define if_type_WORD word2
-#define sli_rev_SHIFT 4
-#define sli_rev_MASK 0x0000000f
-#define sli_rev_WORD word2
-#define sli_family_SHIFT 8
-#define sli_family_MASK 0x000000ff
-#define sli_family_WORD word2
-#define featurelevel_1_SHIFT 16
-#define featurelevel_1_MASK 0x000000ff
-#define featurelevel_1_WORD word2
-#define featurelevel_2_SHIFT 24
-#define featurelevel_2_MASK 0x0000001f
-#define featurelevel_2_WORD word2
- uint32_t word3;
-#define fcoe_SHIFT 0
-#define fcoe_MASK 0x00000001
-#define fcoe_WORD word3
-#define fc_SHIFT 1
-#define fc_MASK 0x00000001
-#define fc_WORD word3
-#define nic_SHIFT 2
-#define nic_MASK 0x00000001
-#define nic_WORD word3
-#define iscsi_SHIFT 3
-#define iscsi_MASK 0x00000001
-#define iscsi_WORD word3
-#define rdma_SHIFT 4
-#define rdma_MASK 0x00000001
-#define rdma_WORD word3
- uint32_t sge_supp_len;
-#define SLI4_PAGE_SIZE 4096
- uint32_t word5;
-#define if_page_sz_SHIFT 0
-#define if_page_sz_MASK 0x0000ffff
-#define if_page_sz_WORD word5
-#define loopbk_scope_SHIFT 24
-#define loopbk_scope_MASK 0x0000000f
-#define loopbk_scope_WORD word5
-#define rq_db_window_SHIFT 28
-#define rq_db_window_MASK 0x0000000f
-#define rq_db_window_WORD word5
- uint32_t word6;
-#define eq_pages_SHIFT 0
-#define eq_pages_MASK 0x0000000f
-#define eq_pages_WORD word6
-#define eqe_size_SHIFT 8
-#define eqe_size_MASK 0x000000ff
-#define eqe_size_WORD word6
- uint32_t word7;
-#define cq_pages_SHIFT 0
-#define cq_pages_MASK 0x0000000f
-#define cq_pages_WORD word7
-#define cqe_size_SHIFT 8
-#define cqe_size_MASK 0x000000ff
-#define cqe_size_WORD word7
- uint32_t word8;
-#define mq_pages_SHIFT 0
-#define mq_pages_MASK 0x0000000f
-#define mq_pages_WORD word8
-#define mqe_size_SHIFT 8
-#define mqe_size_MASK 0x000000ff
-#define mqe_size_WORD word8
-#define mq_elem_cnt_SHIFT 16
-#define mq_elem_cnt_MASK 0x000000ff
-#define mq_elem_cnt_WORD word8
- uint32_t word9;
-#define wq_pages_SHIFT 0
-#define wq_pages_MASK 0x0000ffff
-#define wq_pages_WORD word9
-#define wqe_size_SHIFT 8
-#define wqe_size_MASK 0x000000ff
-#define wqe_size_WORD word9
- uint32_t word10;
-#define rq_pages_SHIFT 0
-#define rq_pages_MASK 0x0000ffff
-#define rq_pages_WORD word10
-#define rqe_size_SHIFT 8
-#define rqe_size_MASK 0x000000ff
-#define rqe_size_WORD word10
- uint32_t word11;
-#define hdr_pages_SHIFT 0
-#define hdr_pages_MASK 0x0000000f
-#define hdr_pages_WORD word11
-#define hdr_size_SHIFT 8
-#define hdr_size_MASK 0x0000000f
-#define hdr_size_WORD word11
-#define hdr_pp_align_SHIFT 16
-#define hdr_pp_align_MASK 0x0000ffff
-#define hdr_pp_align_WORD word11
- uint32_t word12;
-#define sgl_pages_SHIFT 0
-#define sgl_pages_MASK 0x0000000f
-#define sgl_pages_WORD word12
-#define sgl_pp_align_SHIFT 16
-#define sgl_pp_align_MASK 0x0000ffff
-#define sgl_pp_align_WORD word12
- uint32_t rsvd_13_63[51];
-};
#define SLI4_PAGE_ALIGN(addr) (((addr)+((SLI4_PAGE_SIZE)-1)) \
&(~((SLI4_PAGE_SIZE)-1)))
@@ -3854,8 +3684,6 @@ struct lpfc_mqe {
struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
struct lpfc_mbx_query_fw_config query_fw_cfg;
struct lpfc_mbx_set_beacon_config beacon_config;
- struct lpfc_mbx_supp_pages supp_pages;
- struct lpfc_mbx_pc_sli4_params sli4_params;
struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
struct lpfc_mbx_set_link_diag_state link_diag_state;
struct lpfc_mbx_set_link_diag_loopback link_diag_loopback;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 57510a831735..c6caacaa3e7a 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -5854,8 +5854,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
LPFC_MBOXQ_t *mboxq;
MAILBOX_t *mb;
int rc, i, max_buf_size;
- uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
- struct lpfc_mqe *mqe;
int longs;
int fof_vectors = 0;
int extra;
@@ -6150,32 +6148,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
lpfc_nvme_mod_param_dep(phba);
- /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
- lpfc_supported_pages(mboxq);
- rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
- if (!rc) {
- mqe = &mboxq->u.mqe;
- memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
- LPFC_MAX_SUPPORTED_PAGES);
- for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
- switch (pn_page[i]) {
- case LPFC_SLI4_PARAMETERS:
- phba->sli4_hba.pc_sli4_params.supported = 1;
- break;
- default:
- break;
- }
- }
- /* Read the port's SLI4 Parameters capabilities if supported. */
- if (phba->sli4_hba.pc_sli4_params.supported)
- rc = lpfc_pc_sli4_params_get(phba, mboxq);
- if (rc) {
- mempool_free(mboxq, phba->mbox_mem_pool);
- rc = -EIO;
- goto out_free_bsmbx;
- }
- }
-
/*
* Get sli4 parameters that override parameters from Port capabilities.
* If this call fails, it isn't critical unless the SLI4 parameters come
@@ -10517,78 +10489,6 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
phba->pport->work_port_events = 0;
}
- /**
- * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
- * @phba: Pointer to HBA context object.
- * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
- *
- * This function is called in the SLI4 code path to read the port's
- * sli4 capabilities.
- *
- * This function may be be called from any context that can block-wait
- * for the completion. The expectation is that this routine is called
- * typically from probe_one or from the online routine.
- **/
-int
-lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
-{
- int rc;
- struct lpfc_mqe *mqe;
- struct lpfc_pc_sli4_params *sli4_params;
- uint32_t mbox_tmo;
-
- rc = 0;
- mqe = &mboxq->u.mqe;
-
- /* Read the port's SLI4 Parameters port capabilities */
- lpfc_pc_sli4_params(mboxq);
- if (!phba->sli4_hba.intr_enable)
- rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
- else {
- mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
- rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
- }
-
- if (unlikely(rc))
- return 1;
-
- sli4_params = &phba->sli4_hba.pc_sli4_params;
- sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
- sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
- sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
- sli4_params->featurelevel_1 = bf_get(featurelevel_1,
- &mqe->un.sli4_params);
- sli4_params->featurelevel_2 = bf_get(featurelevel_2,
- &mqe->un.sli4_params);
- sli4_params->proto_types = mqe->un.sli4_params.word3;
- sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
- sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
- sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
- sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
- sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
- sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
- sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
- sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
- sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
- sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
- sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
- sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
- sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
- sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
- sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
- sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
- sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
- sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
- sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
- sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
-
- /* Make sure that sge_supp_len can be handled by the driver */
- if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
- sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
-
- return rc;
-}
-
/**
* lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
* @phba: Pointer to HBA context object.
@@ -10647,7 +10547,8 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
else
phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
- sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
+ sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
+ mbx_sli4_parameters);
sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index e6bf5e8bc767..a4c382d2ce79 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2622,39 +2622,3 @@ lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
resume_rpi->event_tag = ndlp->phba->fc_eventTag;
}
-/**
- * lpfc_supported_pages - Initialize the PORT_CAPABILITIES supported pages
- * mailbox command.
- * @mbox: pointer to lpfc mbox command to initialize.
- *
- * The PORT_CAPABILITIES supported pages mailbox command is issued to
- * retrieve the particular feature pages supported by the port.
- **/
-void
-lpfc_supported_pages(struct lpfcMboxq *mbox)
-{
- struct lpfc_mbx_supp_pages *supp_pages;
-
- memset(mbox, 0, sizeof(*mbox));
- supp_pages = &mbox->u.mqe.un.supp_pages;
- bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
- bf_set(cpn, supp_pages, LPFC_SUPP_PAGES);
-}
-
-/**
- * lpfc_pc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params mbox cmd.
- * @mbox: pointer to lpfc mbox command to initialize.
- *
- * The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to
- * retrieve the particular SLI4 features supported by the port.
- **/
-void
-lpfc_pc_sli4_params(struct lpfcMboxq *mbox)
-{
- struct lpfc_mbx_pc_sli4_params *sli4_params;
-
- memset(mbox, 0, sizeof(*mbox));
- sli4_params = &mbox->u.mqe.un.sli4_params;
- bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
- bf_set(cpn, sli4_params, LPFC_SLI4_PARAMETERS);
-}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 9c22a2c93462..fa758f9b82e0 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -560,8 +560,6 @@ lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
* Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
* pool along a non-DMA-mapped container for it.
*
- * Notes: Not interrupt-safe. Must be called with no locks held.
- *
* Returns:
* pointer to HBQ on success
* NULL on failure
@@ -631,7 +629,7 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
{
struct rqb_dmabuf *dma_buf;
- dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL);
+ dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL);
if (!dma_buf)
return NULL;
@@ -754,7 +752,6 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys);
rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
if (rc < 0) {
- (rqbp->rqb_free_buffer)(phba, rqb_entry);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6409 Cannot post to HRQ %d: %x %x %x "
"DRQ %x %x\n",
@@ -764,6 +761,7 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
rqb_entry->hrq->entry_count,
rqb_entry->drq->host_index,
rqb_entry->drq->hba_index);
+ (rqbp->rqb_free_buffer)(phba, rqb_entry);
} else {
list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
rqbp->buffer_count++;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 3dfed191252c..9442fb30e7cd 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -708,9 +708,14 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
} else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
((ndlp->nlp_type & NLP_FCP_TARGET) ||
- !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
+ (ndlp->nlp_type & NLP_NVME_TARGET) ||
+ (vport->fc_flag & FC_PT2PT))) ||
(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
- /* Only try to re-login if this is NOT a Fabric Node */
+ /* Only try to re-login if this is NOT a Fabric Node
+ * AND the remote NPORT is a FCP/NVME Target or we
+ * are in pt2pt mode. NLP_STE_ADISC_ISSUE is a special
+ * case for LOGO as a response to ADISC behavior.
+ */
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(1000 * 1));
spin_lock_irq(shost->host_lock);
@@ -1738,8 +1743,6 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
lpfc_issue_els_logo(vport, ndlp, 0);
- ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
return ndlp->nlp_state;
}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 6c355d87c709..e3013858937b 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -342,13 +342,15 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
if (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG) {
ndlp->nrport = NULL;
ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
- }
- spin_unlock_irq(&vport->phba->hbalock);
+ spin_unlock_irq(&vport->phba->hbalock);
- /* Remove original register reference. The host transport
- * won't reference this rport/remoteport any further.
- */
- lpfc_nlp_put(ndlp);
+ /* Remove original register reference. The host transport
+ * won't reference this rport/remoteport any further.
+ */
+ lpfc_nlp_put(ndlp);
+ } else {
+ spin_unlock_irq(&vport->phba->hbalock);
+ }
rport_err:
return;
@@ -1903,8 +1905,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
/* Declare and initialization an instance of the FC NVME template. */
static struct nvme_fc_port_template lpfc_nvme_template = {
- .module = THIS_MODULE,
-
/* initiator-based functions */
.localport_delete = lpfc_nvme_localport_delete,
.remoteport_delete = lpfc_nvme_remoteport_delete,
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 768eba8c111d..23ead17e60fe 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1712,7 +1712,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
}
tgtp->tport_unreg_cmp = &tport_unreg_cmp;
nvmet_fc_unregister_targetport(phba->targetport);
- if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp,
+ if (!wait_for_completion_timeout(&tport_unreg_cmp,
msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
"6179 Unreg targetport %p timeout "
@@ -2912,7 +2912,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
/* Word 10 */
- bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
LPFC_WQE_LENLOC_WORD12);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index a801917d3c19..f4633c9f8183 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -2472,6 +2472,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
!pmb->u.mb.mbxStatus) {
rpi = pmb->u.mb.un.varWords[0];
vpi = pmb->u.mb.un.varRegLogin.vpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
lpfc_unreg_login(phba, vpi, rpi, pmb);
pmb->vport = vport;
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -6753,12 +6755,16 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
struct rqb_dmabuf *rqb_buffer;
LIST_HEAD(rqb_buf_list);
- spin_lock_irqsave(&phba->hbalock, flags);
rqbp = hrq->rqbp;
for (i = 0; i < count; i++) {
+ spin_lock_irqsave(&phba->hbalock, flags);
/* IF RQ is already full, don't bother */
- if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
+ if (rqbp->buffer_count + i >= rqbp->entry_count - 1) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
break;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
rqb_buffer = rqbp->rqb_alloc_buffer(phba);
if (!rqb_buffer)
break;
@@ -6767,6 +6773,8 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
rqb_buffer->idx = idx;
list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
}
+
+ spin_lock_irqsave(&phba->hbalock, flags);
while (!list_empty(&rqb_buf_list)) {
list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
hbuf.list);
@@ -17010,7 +17018,6 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
if (cmd_iocbq) {
ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
lpfc_nlp_put(ndlp);
- lpfc_nlp_not_used(ndlp);
lpfc_sli_release_iocbq(phba, cmd_iocbq);
}
@@ -17411,6 +17418,10 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
list_add_tail(&iocbq->list, &first_iocbq->list);
}
}
+ /* Free the sequence's header buffer */
+ if (!first_iocbq)
+ lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
+
return first_iocbq;
}
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 1ff0f7de9105..64545b300dfc 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -653,27 +653,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
vport->port_state < LPFC_VPORT_READY)
return -EAGAIN;
}
+
/*
- * This is a bit of a mess. We want to ensure the shost doesn't get
- * torn down until we're done with the embedded lpfc_vport structure.
- *
- * Beyond holding a reference for this function, we also need a
- * reference for outstanding I/O requests we schedule during delete
- * processing. But once we scsi_remove_host() we can no longer obtain
- * a reference through scsi_host_get().
- *
- * So we take two references here. We release one reference at the
- * bottom of the function -- after delinking the vport. And we
- * release the other at the completion of the unreg_vpi that get's
- * initiated after we've disposed of all other resources associated
- * with the port.
+ * Take early refcount for outstanding I/O requests we schedule during
+ * delete processing for unreg_vpi. Always keep this before
+ * scsi_remove_host() as we can no longer obtain a reference through
+ * scsi_host_get() after scsi_host_remove as shost is set to SHOST_DEL.
*/
if (!scsi_host_get(shost))
return VPORT_INVAL;
- if (!scsi_host_get(shost)) {
- scsi_host_put(shost);
- return VPORT_INVAL;
- }
+
lpfc_free_sysfs_attr(vport);
lpfc_debugfs_terminate(vport);
@@ -820,8 +809,9 @@ skip_logo:
if (!(vport->vpi_state & LPFC_VPI_REGISTERED) ||
lpfc_mbx_unreg_vpi(vport))
scsi_host_put(shost);
- } else
+ } else {
scsi_host_put(shost);
+ }
lpfc_free_vpi(phba, vport->vpi);
vport->work_port_events = 0;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 21f971447dd8..8877a21102f1 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -7192,7 +7192,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
int error = 0, i;
void *sense = NULL;
dma_addr_t sense_handle;
- unsigned long *sense_ptr;
+ void *sense_ptr;
u32 opcode = 0;
memset(kbuff_arr, 0, sizeof(kbuff_arr));
@@ -7309,6 +7309,13 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
}
if (ioc->sense_len) {
+ /* make sure the pointer is part of the frame */
+ if (ioc->sense_off >
+ (sizeof(union megasas_frame) - sizeof(__le64))) {
+ error = -EINVAL;
+ goto out;
+ }
+
sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
&sense_handle, GFP_KERNEL);
if (!sense) {
@@ -7316,12 +7323,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
goto out;
}
- sense_ptr =
- (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
- if (instance->consistent_mask_64bit)
- *sense_ptr = cpu_to_le64(sense_handle);
- else
- *sense_ptr = cpu_to_le32(sense_handle);
+ /* always store 64 bits regardless of addressing */
+ sense_ptr = (void *)cmd->frame + ioc->sense_off;
+ put_unaligned_le64(sense_handle, sense_ptr);
}
/*
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 81bd824bb9d9..bdb12bf0d5c7 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -3940,6 +3940,7 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
struct fusion_context *fusion;
struct megasas_cmd *cmd_mfi;
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req;
u16 smid;
bool refire_cmd = 0;
u8 result;
@@ -3990,6 +3991,11 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
break;
}
+ scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *)
+ cmd_fusion->io_request;
+ if (scsi_io_req->Function == MPI2_FUNCTION_SCSI_TASK_MGMT)
+ result = RETURN_CMD;
+
switch (result) {
case REFIRE_CMD:
megasas_fire_cmd_fusion(instance, req_desc);
@@ -4187,7 +4193,6 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
if (!timeleft) {
dev_err(&instance->pdev->dev,
"task mgmt type 0x%x timed out\n", type);
- cmd_mfi->flags |= DRV_DCMD_SKIP_REFIRE;
mutex_unlock(&instance->reset_mutex);
rc = megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR);
mutex_lock(&instance->reset_mutex);
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 82e01dbe90af..7c0eaa9ea1ed 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1044,6 +1044,8 @@ static void handle_error(struct mesh_state *ms)
while ((in_8(&mr->bus_status1) & BS1_RST) != 0)
udelay(1);
printk("done\n");
+ if (ms->dma_started)
+ halt_dma(ms);
handle_reset(ms);
/* request_q is empty, no point in mesh_start() */
return;
@@ -1356,7 +1358,8 @@ static void halt_dma(struct mesh_state *ms)
ms->conn_tgt, ms->data_ptr, scsi_bufflen(cmd),
ms->tgts[ms->conn_tgt].data_goes_out);
}
- scsi_dma_unmap(cmd);
+ if (cmd)
+ scsi_dma_unmap(cmd);
ms->dma_started = 0;
}
@@ -1711,6 +1714,9 @@ static int mesh_host_reset(struct scsi_cmnd *cmd)
spin_lock_irqsave(ms->host->host_lock, flags);
+ if (ms->dma_started)
+ halt_dma(ms);
+
/* Reset the controller & dbdma channel */
out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* stop dma */
out_8(&mr->exception, 0xff); /* clear all exception bits */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 2c556c7fcf0d..447ac667f4b2 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -4284,7 +4284,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
}
kfree(ioc->hpr_lookup);
+ ioc->hpr_lookup = NULL;
kfree(ioc->internal_lookup);
+ ioc->internal_lookup = NULL;
if (ioc->chain_lookup) {
for (i = 0; i < ioc->scsiio_depth; i++) {
for (j = ioc->chains_per_prp_buffer;
@@ -5769,7 +5771,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
r = _base_handshake_req_reply_wait(ioc,
sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
- sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10);
+ sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 30);
if (r != 0) {
pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
@@ -6627,14 +6629,18 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->pend_os_device_add_sz++;
ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
GFP_KERNEL);
- if (!ioc->pend_os_device_add)
+ if (!ioc->pend_os_device_add) {
+ r = -ENOMEM;
goto out_free_resources;
+ }
ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
ioc->device_remove_in_progress =
kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
- if (!ioc->device_remove_in_progress)
+ if (!ioc->device_remove_in_progress) {
+ r = -ENOMEM;
goto out_free_resources;
+ }
ioc->fwfault_debug = mpt3sas_fwfault_debug;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 07345016fd9c..8cfb4f12c68c 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -654,7 +654,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
struct _pcie_device *pcie_device = NULL;
u32 ioc_state;
u16 smid;
- u8 timeout;
+ unsigned long timeout;
u8 issue_reset;
u32 sz, sz_arg;
void *psge;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index d3c944d99703..5a5e5c3da657 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -9841,8 +9841,8 @@ static void scsih_remove(struct pci_dev *pdev)
ioc->remove_host = 1;
- mpt3sas_wait_for_commands_to_complete(ioc);
- _scsih_flush_running_cmds(ioc);
+ if (!pci_device_is_present(pdev))
+ _scsih_flush_running_cmds(ioc);
_scsih_fw_event_cleanup_queue(ioc);
@@ -9919,8 +9919,8 @@ scsih_shutdown(struct pci_dev *pdev)
ioc->remove_host = 1;
- mpt3sas_wait_for_commands_to_complete(ioc);
- _scsih_flush_running_cmds(ioc);
+ if (!pci_device_is_present(pdev))
+ _scsih_flush_running_cmds(ioc);
_scsih_fw_event_cleanup_queue(ioc);
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index b3cd9a6b1d30..b3df114a1200 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -2439,6 +2439,7 @@ static int mvumi_io_attach(struct mvumi_hba *mhba)
if (IS_ERR(mhba->dm_thread)) {
dev_err(&mhba->pdev->dev,
"failed to create device scan thread\n");
+ ret = PTR_ERR(mhba->dm_thread);
mutex_unlock(&mhba->sas_discovery_mutex);
goto fail_create_thread;
}
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 7a697ca68501..1d59d7447a1c 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -1059,7 +1059,8 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
pm8001_init_sas_add(pm8001_ha);
/* phy setting support for motherboard controller */
- if (pm8001_configure_phy_settings(pm8001_ha))
+ rc = pm8001_configure_phy_settings(pm8001_ha);
+ if (rc)
goto err_out_shost;
pm8001_post_sas_ha_init(shost, chip);
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 5be4212312cb..5becdde3ea32 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -794,7 +794,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
if (res)
- return res;
+ goto ex_err;
ccb = &pm8001_ha->ccb_info[ccb_tag];
ccb->device = pm8001_dev;
ccb->ccb_tag = ccb_tag;
@@ -1184,8 +1184,8 @@ int pm8001_abort_task(struct sas_task *task)
pm8001_ha = pm8001_find_ha_by_dev(dev);
device_id = pm8001_dev->device_id;
phy_id = pm8001_dev->attached_phy;
- rc = pm8001_find_tag(task, &tag);
- if (rc == 0) {
+ ret = pm8001_find_tag(task, &tag);
+ if (ret == 0) {
pm8001_printk("no tag for task:%p\n", task);
return TMF_RESP_FUNC_FAILED;
}
@@ -1223,26 +1223,50 @@ int pm8001_abort_task(struct sas_task *task)
/* 2. Send Phy Control Hard Reset */
reinit_completion(&completion);
+ phy->port_reset_status = PORT_RESET_TMO;
phy->reset_success = false;
phy->enable_completion = &completion;
phy->reset_completion = &completion_reset;
ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
PHY_HARD_RESET);
- if (ret)
- goto out;
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("Waiting for local phy ctl\n"));
- wait_for_completion(&completion);
- if (!phy->reset_success)
+ if (ret) {
+ phy->enable_completion = NULL;
+ phy->reset_completion = NULL;
goto out;
+ }
- /* 3. Wait for Port Reset complete / Port reset TMO */
+ /* In the case of the reset timeout/fail we still
+ * abort the command at the firmware. The assumption
+ * here is that the drive is off doing something so
+ * that it's not processing requests, and we want to
+ * avoid getting a completion for this and either
+ * leaking the task in libsas or losing the race and
+ * getting a double free.
+ */
PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("Waiting for local phy ctl\n"));
+ ret = wait_for_completion_timeout(&completion,
+ PM8001_TASK_TIMEOUT * HZ);
+ if (!ret || !phy->reset_success) {
+ phy->enable_completion = NULL;
+ phy->reset_completion = NULL;
+ } else {
+ /* 3. Wait for Port Reset complete or
+ * Port reset TMO
+ */
+ PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("Waiting for Port reset\n"));
- wait_for_completion(&completion_reset);
- if (phy->port_reset_status) {
- pm8001_dev_gone_notify(dev);
- goto out;
+ ret = wait_for_completion_timeout(
+ &completion_reset,
+ PM8001_TASK_TIMEOUT * HZ);
+ if (!ret)
+ phy->reset_completion = NULL;
+ WARN_ON(phy->port_reset_status ==
+ PORT_RESET_TMO);
+ if (phy->port_reset_status == PORT_RESET_TMO) {
+ pm8001_dev_gone_notify(dev);
+ goto out;
+ }
}
/*
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 2c78d8fb9122..fc06be4fd10c 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -335,6 +335,7 @@ struct qedf_ctx {
#define QEDF_GRCDUMP_CAPTURE 4
#define QEDF_IN_RECOVERY 5
#define QEDF_DBG_STOP_IO 6
+#define QEDF_PROBING 8
unsigned long flags; /* Miscellaneous state flags */
int fipvlan_retries;
u8 num_queues;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index cd61905ca2f5..b253523217b8 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -2961,7 +2961,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
{
int rc = -EINVAL;
struct fc_lport *lport;
- struct qedf_ctx *qedf;
+ struct qedf_ctx *qedf = NULL;
struct Scsi_Host *host;
bool is_vf = false;
struct qed_ll2_params params;
@@ -2989,6 +2989,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
/* Initialize qedf_ctx */
qedf = lport_priv(lport);
+ set_bit(QEDF_PROBING, &qedf->flags);
qedf->lport = lport;
qedf->ctlr.lp = lport;
qedf->pdev = pdev;
@@ -3011,9 +3012,12 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
} else {
/* Init pointers during recovery */
qedf = pci_get_drvdata(pdev);
+ set_bit(QEDF_PROBING, &qedf->flags);
lport = qedf->lport;
}
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe started.\n");
+
host = lport->host;
/* Allocate mempool for qedf_io_work structs */
@@ -3312,6 +3316,10 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
else
fc_fabric_login(lport);
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
+
+ clear_bit(QEDF_PROBING, &qedf->flags);
+
/* All good */
return 0;
@@ -3337,6 +3345,11 @@ err2:
err1:
scsi_host_put(lport->host);
err0:
+ if (qedf) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
+
+ clear_bit(QEDF_PROBING, &qedf->flags);
+ }
return rc;
}
@@ -3484,11 +3497,25 @@ void qedf_get_protocol_tlv_data(void *dev, void *data)
{
struct qedf_ctx *qedf = dev;
struct qed_mfw_tlv_fcoe *fcoe = data;
- struct fc_lport *lport = qedf->lport;
- struct Scsi_Host *host = lport->host;
- struct fc_host_attrs *fc_host = shost_to_fc_host(host);
+ struct fc_lport *lport;
+ struct Scsi_Host *host;
+ struct fc_host_attrs *fc_host;
struct fc_host_statistics *hst;
+ if (!qedf) {
+ QEDF_ERR(NULL, "qedf is null.\n");
+ return;
+ }
+
+ if (test_bit(QEDF_PROBING, &qedf->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx, "Function is still probing.\n");
+ return;
+ }
+
+ lport = qedf->lport;
+ host = lport->host;
+ fc_host = shost_to_fc_host(host);
+
/* Force a refresh of the fc_host stats including offload stats */
hst = qedf_fc_get_host_stats(host);
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 25d763ae5d5a..357a0acc5ed2 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -62,6 +62,7 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi,
"Freeing tid=0x%x for cid=0x%x\n",
cmd->task_id, qedi_conn->iscsi_conn_id);
+ spin_lock(&qedi_conn->list_lock);
if (likely(cmd->io_cmd_in_list)) {
cmd->io_cmd_in_list = false;
list_del_init(&cmd->io_cmd);
@@ -72,6 +73,7 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi,
cmd->task_id, qedi_conn->iscsi_conn_id,
&cmd->io_cmd);
}
+ spin_unlock(&qedi_conn->list_lock);
cmd->state = RESPONSE_RECEIVED;
qedi_clear_task_idx(qedi, cmd->task_id);
@@ -125,6 +127,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
"Freeing tid=0x%x for cid=0x%x\n",
cmd->task_id, qedi_conn->iscsi_conn_id);
+ spin_lock(&qedi_conn->list_lock);
if (likely(cmd->io_cmd_in_list)) {
cmd->io_cmd_in_list = false;
list_del_init(&cmd->io_cmd);
@@ -135,6 +138,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
cmd->task_id, qedi_conn->iscsi_conn_id,
&cmd->io_cmd);
}
+ spin_unlock(&qedi_conn->list_lock);
cmd->state = RESPONSE_RECEIVED;
qedi_clear_task_idx(qedi, cmd->task_id);
@@ -227,11 +231,13 @@ static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr;
+ spin_lock(&qedi_conn->list_lock);
if (likely(qedi_cmd->io_cmd_in_list)) {
qedi_cmd->io_cmd_in_list = false;
list_del_init(&qedi_cmd->io_cmd);
qedi_conn->active_cmd_count--;
}
+ spin_unlock(&qedi_conn->list_lock);
if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
@@ -293,11 +299,13 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK;
qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
+ spin_lock(&qedi_conn->list_lock);
if (likely(cmd->io_cmd_in_list)) {
cmd->io_cmd_in_list = false;
list_del_init(&cmd->io_cmd);
qedi_conn->active_cmd_count--;
}
+ spin_unlock(&qedi_conn->list_lock);
memset(task_ctx, '\0', sizeof(*task_ctx));
@@ -829,8 +837,11 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
qedi_clear_task_idx(qedi_conn->qedi, rtid);
spin_lock(&qedi_conn->list_lock);
- list_del_init(&dbg_cmd->io_cmd);
- qedi_conn->active_cmd_count--;
+ if (likely(dbg_cmd->io_cmd_in_list)) {
+ dbg_cmd->io_cmd_in_list = false;
+ list_del_init(&dbg_cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ }
spin_unlock(&qedi_conn->list_lock);
qedi_cmd->state = CLEANUP_RECV;
wake_up_interruptible(&qedi_conn->wait_queue);
@@ -1249,6 +1260,7 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
qedi_conn->cmd_cleanup_req++;
qedi_iscsi_cleanup_task(ctask, true);
+ cmd->io_cmd_in_list = false;
list_del_init(&cmd->io_cmd);
qedi_conn->active_cmd_count--;
QEDI_WARN(&qedi->dbg_ctx,
@@ -1462,8 +1474,11 @@ ldel_exit:
spin_unlock_bh(&qedi_conn->tmf_work_lock);
spin_lock(&qedi_conn->list_lock);
- list_del_init(&cmd->io_cmd);
- qedi_conn->active_cmd_count--;
+ if (likely(cmd->io_cmd_in_list)) {
+ cmd->io_cmd_in_list = false;
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ }
spin_unlock(&qedi_conn->list_lock);
clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 1b7049dce169..4e8c5fcbded6 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -976,11 +976,13 @@ static void qedi_cleanup_active_cmd_list(struct qedi_conn *qedi_conn)
{
struct qedi_cmd *cmd, *cmd_tmp;
+ spin_lock(&qedi_conn->list_lock);
list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
io_cmd) {
list_del_init(&cmd->io_cmd);
qedi_conn->active_cmd_count--;
}
+ spin_unlock(&qedi_conn->list_lock);
}
static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
@@ -1000,7 +1002,8 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
if (qedi_ep->state == EP_STATE_OFLDCONN_START)
goto ep_exit_recover;
- flush_work(&qedi_ep->offload_work);
+ if (qedi_ep->state != EP_STATE_OFLDCONN_NONE)
+ flush_work(&qedi_ep->offload_work);
if (qedi_ep->conn) {
qedi_conn = qedi_ep->conn;
@@ -1064,6 +1067,9 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
break;
}
+ if (!abrt_conn)
+ wait_delay += qedi->pf_params.iscsi_pf_params.two_msl_timer;
+
qedi_ep->state = EP_STATE_DISCONN_START;
ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn);
if (ret) {
@@ -1217,6 +1223,10 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
}
iscsi_cid = (u32)path_data->handle;
+ if (iscsi_cid >= qedi->max_active_conns) {
+ ret = -EINVAL;
+ goto set_path_exit;
+ }
qedi_ep = qedi->ep_tbl[iscsi_cid];
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep);
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 763c7628356b..fe26144d390a 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1559,6 +1559,7 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
if (!qedi->global_queues[i]) {
QEDI_ERR(&qedi->dbg_ctx,
"Unable to allocation global queue %d.\n", i);
+ status = -ENOMEM;
goto mem_alloc_failure;
}
@@ -2129,7 +2130,7 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
chap_name);
break;
case ISCSI_BOOT_TGT_CHAP_SECRET:
- rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+ rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN,
chap_secret);
break;
case ISCSI_BOOT_TGT_REV_CHAP_NAME:
@@ -2137,7 +2138,7 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
mchap_name);
break;
case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
- rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+ rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN,
mchap_secret);
break;
case ISCSI_BOOT_TGT_FLAGS:
@@ -2580,7 +2581,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
QEDI_ERR(&qedi->dbg_ctx,
"Unable to start offload thread!\n");
rc = -ENODEV;
- goto free_cid_que;
+ goto free_tmf_thread;
}
/* F/w needs 1st task context memory entry for performance */
@@ -2600,6 +2601,8 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
return 0;
+free_tmf_thread:
+ destroy_workqueue(qedi->tmf_thread);
free_cid_que:
qedi_release_cid_que(qedi);
free_uio:
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index b008d583dd6e..d46a10d24ed4 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1934,6 +1934,8 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost)
vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
if (IS_FWI2_CAPABLE(ha)) {
+ int rval;
+
stats = dma_alloc_coherent(&ha->pdev->dev,
sizeof(*stats), &stats_dma, GFP_KERNEL);
if (!stats) {
@@ -1943,7 +1945,11 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost)
}
/* reset firmware statistics */
- qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
+ rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
+ if (rval != QLA_SUCCESS)
+ ql_log(ql_log_warn, vha, 0x70de,
+ "Resetting ISP statistics failed: rval = %d\n",
+ rval);
dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
stats, stats_dma);
@@ -2162,11 +2168,11 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
msleep(1000);
- qla_nvme_delete(vha);
qla24xx_disable_vp(vha);
qla2x00_wait_for_sess_deletion(vha);
+ qla_nvme_delete(vha);
vha->flags.delete_progress = 1;
qlt_remove_target(ha, vha);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 47f062e96e62..eae166572964 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -19,10 +19,11 @@ qla2x00_bsg_job_done(void *ptr, int res)
struct bsg_job *bsg_job = sp->u.bsg_job;
struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ sp->free(sp);
+
bsg_reply->result = res;
bsg_job_done(bsg_job, bsg_reply->result,
bsg_reply->reply_payload_rcv_len);
- sp->free(sp);
}
void
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a9dc9c4a6382..47835d26a973 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -262,8 +262,8 @@ struct name_list_extended {
struct get_name_list_extended *l;
dma_addr_t ldma;
struct list_head fcports;
- spinlock_t fcports_lock;
u32 size;
+ u8 sent;
};
/*
* Timeout timer counts in seconds
@@ -2351,7 +2351,7 @@ typedef struct fc_port {
unsigned int login_succ:1;
unsigned int query:1;
unsigned int id_changed:1;
- unsigned int rscn_rcvd:1;
+ unsigned int scan_needed:1;
struct work_struct nvme_del_work;
struct completion nvme_del_done;
@@ -2375,11 +2375,13 @@ typedef struct fc_port {
unsigned long expires;
struct list_head del_list_entry;
struct work_struct free_work;
-
+ struct work_struct reg_work;
+ uint64_t jiffies_at_registration;
struct qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX];
uint16_t tgt_id;
uint16_t old_tgt_id;
+ uint16_t sec_since_registration;
uint8_t fcp_prio;
@@ -2412,6 +2414,7 @@ typedef struct fc_port {
struct qla_tgt_sess *tgt_session;
struct ct_sns_desc ct_desc;
enum discovery_state disc_state;
+ enum discovery_state next_disc_state;
enum login_state fw_login_state;
unsigned long dm_login_expire;
unsigned long plogi_nack_done_deadline;
@@ -3222,7 +3225,6 @@ enum qla_work_type {
QLA_EVT_GPDB,
QLA_EVT_PRLI,
QLA_EVT_GPSC,
- QLA_EVT_UPD_FCPORT,
QLA_EVT_GNL,
QLA_EVT_NACK,
QLA_EVT_RELOGIN,
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 178974896b5c..b8e4abe804d5 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -54,7 +54,7 @@ extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *);
extern void qla2x00_quiesce_io(scsi_qla_host_t *);
extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
-
+void qla_register_fcport_fn(struct work_struct *);
extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *);
extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *);
@@ -109,6 +109,7 @@ int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *, u8*,
int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *);
int qla24xx_detect_sfp(scsi_qla_host_t *vha);
int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
+
void qla2x00_async_prlo_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_post_async_prlo_work(struct scsi_qla_host *, fc_port_t *,
@@ -208,7 +209,7 @@ extern void qla2x00_disable_board_on_pci_error(struct work_struct *);
extern void qla2x00_sp_compl(void *, int);
extern void qla2xxx_qpair_sp_free_dma(void *);
extern void qla2xxx_qpair_sp_compl(void *, int);
-extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *);
+extern void qla24xx_sched_upd_fcport(fc_port_t *);
void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index f621cb55ccfb..c3195d4c25e5 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -3973,7 +3973,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
continue;
- fcport->rscn_rcvd = 0;
+ fcport->scan_needed = 0;
fcport->scan_state = QLA_FCPORT_FOUND;
found = true;
/*
@@ -4009,20 +4009,19 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
*/
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
- fcport->rscn_rcvd = 0;
+ fcport->scan_needed = 0;
continue;
}
if (fcport->scan_state != QLA_FCPORT_FOUND) {
- fcport->rscn_rcvd = 0;
+ fcport->scan_needed = 0;
if ((qla_dual_mode_enabled(vha) ||
qla_ini_mode_enabled(vha)) &&
atomic_read(&fcport->state) == FCS_ONLINE) {
- qla2x00_mark_device_lost(vha, fcport,
- ql2xplogiabsentdevice, 0);
+ if (fcport->loop_id != FC_NO_LOOP_ID) {
+ if (fcport->flags & FCF_FCP2_DEVICE)
+ fcport->logout_on_delete = 0;
- if (fcport->loop_id != FC_NO_LOOP_ID &&
- (fcport->flags & FCF_FCP2_DEVICE) == 0) {
ql_dbg(ql_dbg_disc, vha, 0x20f0,
"%s %d %8phC post del sess\n",
__func__, __LINE__,
@@ -4033,7 +4032,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
}
}
} else {
- if (fcport->rscn_rcvd ||
+ if (fcport->scan_needed ||
fcport->disc_state != DSC_LOGIN_COMPLETE) {
if (fcport->login_retry == 0) {
fcport->login_retry =
@@ -4043,7 +4042,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
fcport->port_name, fcport->loop_id,
fcport->login_retry);
}
- fcport->rscn_rcvd = 0;
+ fcport->scan_needed = 0;
qla24xx_fcport_handle_login(vha, fcport);
}
}
@@ -4058,7 +4057,7 @@ out:
if (recheck) {
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->rscn_rcvd) {
+ if (fcport->scan_needed) {
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
break;
@@ -4261,12 +4260,13 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
sp->rc = res;
rc = qla2x00_post_nvme_gpnft_done_work(vha, sp, QLA_EVT_GPNFT);
- if (!rc) {
+ if (rc) {
qla24xx_sp_unmap(vha, sp);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
return;
}
+ return;
}
if (cmd == GPN_FT_CMD) {
@@ -4316,6 +4316,8 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
vha->scan.scan_flags &= ~SF_SCANNING;
spin_unlock_irqrestore(&vha->work_lock, flags);
WARN_ON(1);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
goto done_free_sp;
}
@@ -4349,8 +4351,12 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
sp->done = qla2x00_async_gpnft_gnnft_sp_done;
rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
+ if (rval != QLA_SUCCESS) {
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
goto done_free_sp;
+ }
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s hdl=%x FC4Type %x.\n", sp->name,
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f45759b353be..2ebf4e4e0234 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -800,6 +800,7 @@ qla24xx_async_gnl_sp_done(void *s, int res)
if (res == QLA_FUNCTION_TIMEOUT)
return;
+ sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
memset(&ea, 0, sizeof(ea));
ea.sp = sp;
ea.rc = res;
@@ -827,25 +828,24 @@ qla24xx_async_gnl_sp_done(void *s, int res)
(loop_id & 0x7fff));
}
- spin_lock_irqsave(&vha->gnl.fcports_lock, flags);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
INIT_LIST_HEAD(&h);
fcport = tf = NULL;
if (!list_empty(&vha->gnl.fcports))
list_splice_init(&vha->gnl.fcports, &h);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
list_del_init(&fcport->gnl_entry);
- spin_lock(&vha->hw->tgt.sess_lock);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
- spin_unlock(&vha->hw->tgt.sess_lock);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
ea.fcport = fcport;
qla2x00_fcport_event_handler(vha, &ea);
}
- spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
- spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
/* create new fcport if fw has knowledge of new sessions */
for (i = 0; i < n; i++) {
port_id_t id;
@@ -878,6 +878,8 @@ qla24xx_async_gnl_sp_done(void *s, int res)
}
}
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ vha->gnl.sent = 0;
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
sp->free(sp);
@@ -897,27 +899,24 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
ql_dbg(ql_dbg_disc, vha, 0x20d9,
"Async-gnlist WWPN %8phC \n", fcport->port_name);
- spin_lock_irqsave(&vha->gnl.fcports_lock, flags);
- if (!list_empty(&fcport->gnl_entry)) {
- spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
- rval = QLA_SUCCESS;
- goto done;
- }
-
- spin_lock(&vha->hw->tgt.sess_lock);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ fcport->flags |= FCF_ASYNC_SENT;
fcport->disc_state = DSC_GNL;
fcport->last_rscn_gen = fcport->rscn_gen;
fcport->last_login_gen = fcport->login_gen;
- spin_unlock(&vha->hw->tgt.sess_lock);
list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
- spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
+ if (vha->gnl.sent) {
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ return QLA_SUCCESS;
+ }
+ vha->gnl.sent = 1;
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
- fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_MB_IOCB;
sp->name = "gnlist";
sp->gen1 = fcport->rscn_gen;
@@ -1204,11 +1203,9 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
vha->fcport_count++;
ea->fcport->login_succ = 1;
- ql_dbg(ql_dbg_disc, vha, 0x20d6,
- "%s %d %8phC post upd_fcport fcp_cnt %d\n",
- __func__, __LINE__, ea->fcport->port_name,
- vha->fcport_count);
- qla24xx_post_upd_fcport_work(vha, ea->fcport);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ qla24xx_sched_upd_fcport(ea->fcport);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
} else if (ea->fcport->login_succ) {
/*
* We have an existing session. A late RSCN delivery
@@ -1326,6 +1323,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
{
u16 data[2];
u64 wwn;
+ u16 sec;
ql_dbg(ql_dbg_disc, vha, 0x20d8,
"%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d retry %d lid %d scan %d\n",
@@ -1457,6 +1455,22 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
qla24xx_post_prli_work(vha, fcport);
break;
+ case DSC_UPD_FCPORT:
+ sec = jiffies_to_msecs(jiffies -
+ fcport->jiffies_at_registration)/1000;
+ if (fcport->sec_since_registration < sec && sec &&
+ !(sec % 60)) {
+ fcport->sec_since_registration = sec;
+ ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
+ "%s %8phC - Slow Rport registration(%d Sec)\n",
+ __func__, fcport->port_name, sec);
+ }
+
+ if (fcport->next_disc_state != DSC_DELETE_PEND)
+ fcport->next_disc_state = DSC_ADISC;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ break;
+
default:
break;
}
@@ -1572,8 +1586,10 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
case RSCN_PORT_ADDR:
fcport = qla2x00_find_fcport_by_nportid
(vha, &ea->id, 1);
- if (fcport)
- fcport->rscn_rcvd = 1;
+ if (fcport) {
+ fcport->scan_needed = 1;
+ fcport->rscn_gen++;
+ }
spin_lock_irqsave(&vha->work_lock, flags);
if (vha->scan.scan_flags == 0) {
@@ -4741,6 +4757,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
return NULL;
}
INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
+ INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
INIT_LIST_HEAD(&fcport->gnl_entry);
INIT_LIST_HEAD(&fcport->list);
@@ -5221,13 +5238,15 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
void
qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
{
- fcport->vha = vha;
-
if (IS_SW_RESV_ADDR(fcport->d_id))
return;
+ ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
+ __func__, fcport->port_name);
+
+ fcport->disc_state = DSC_UPD_FCPORT;
+ fcport->login_retry = vha->hw->login_retry_count;
fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
- fcport->disc_state = DSC_LOGIN_COMPLETE;
fcport->deleted = 0;
fcport->logout_on_delete = 1;
fcport->login_retry = vha->hw->login_retry_count;
@@ -5289,6 +5308,36 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
}
}
qla2x00_set_fcport_state(fcport, FCS_ONLINE);
+
+ fcport->disc_state = DSC_LOGIN_COMPLETE;
+}
+
+void qla_register_fcport_fn(struct work_struct *work)
+{
+ fc_port_t *fcport = container_of(work, struct fc_port, reg_work);
+ u32 rscn_gen = fcport->rscn_gen;
+ u16 data[2];
+
+ if (IS_SW_RESV_ADDR(fcport->d_id))
+ return;
+
+ qla2x00_update_fcport(fcport->vha, fcport);
+
+ if (rscn_gen != fcport->rscn_gen) {
+ /* RSCN(s) came in while registration */
+ switch (fcport->next_disc_state) {
+ case DSC_DELETE_PEND:
+ qlt_schedule_sess_for_deletion(fcport);
+ break;
+ case DSC_ADISC:
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(fcport->vha, fcport,
+ data);
+ break;
+ default:
+ break;
+ }
+ }
}
/*
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index bef9faea5eee..07c5d7397d42 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -329,14 +329,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (time_after(jiffies, wait_time))
break;
- /*
- * Check if it's UNLOADING, cause we cannot poll in
- * this case, or else a NULL pointer dereference
- * is triggered.
- */
- if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)))
- return QLA_FUNCTION_TIMEOUT;
-
/* Check for pending interrupts. */
qla2x00_poll(ha->rsp_q_map[0]);
@@ -3077,7 +3069,7 @@ qla24xx_abort_command(srb_t *sp)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
"Entered %s.\n", __func__);
- if (vha->flags.qpairs_available && sp->qpair)
+ if (sp->qpair)
req = sp->qpair->req;
if (ql2xasynctmfenable)
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index db367e428095..7821c1695e82 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -477,6 +477,11 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
struct nvme_private *priv = fd->private;
struct qla_nvme_rport *qla_rport = rport->private;
+ if (!priv) {
+ /* nvme association has been torn down */
+ return rval;
+ }
+
fcport = qla_rport->fcport;
vha = fcport->vha;
@@ -560,7 +565,6 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
}
static struct nvme_fc_port_template qla_nvme_fc_transport = {
- .module = THIS_MODULE,
.localport_delete = qla_nvme_localport_delete,
.remoteport_delete = qla_nvme_remoteport_delete,
.create_queue = qla_nvme_alloc_queue,
@@ -672,7 +676,7 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha)
struct nvme_fc_port_template *tmpl;
struct qla_hw_data *ha;
struct nvme_fc_port_info pinfo;
- int ret = EINVAL;
+ int ret = -EINVAL;
if (!IS_ENABLED(CONFIG_NVME_FC))
return ret;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 3007eecfa509..7451355f20e0 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1107,7 +1107,8 @@ qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
return ret;
}
- if (qla82xx_flash_set_write_enable(ha))
+ ret = qla82xx_flash_set_write_enable(ha);
+ if (ret < 0)
goto done_write;
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index fff20a370767..7cbdd32a238d 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1028,8 +1028,6 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
"Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
- if (rval == QLA_INTERFACE_ERROR)
- goto qc24_free_sp_fail_command;
goto qc24_host_busy_free_sp;
}
@@ -1044,11 +1042,6 @@ qc24_host_busy:
qc24_target_busy:
return SCSI_MLQUEUE_TARGET_BUSY;
-qc24_free_sp_fail_command:
- sp->free(sp);
- CMD_SP(cmd) = NULL;
- qla2xxx_rel_qpair_sp(sp->qpair, sp);
-
qc24_fail_command:
cmd->scsi_done(cmd);
@@ -1997,6 +1990,11 @@ skip_pio:
/* Determine queue resources */
ha->max_req_queues = ha->max_rsp_queues = 1;
ha->msix_count = QLA_BASE_VECTORS;
+
+ /* Check if FW supports MQ or not */
+ if (!(ha->fw_attributes & BIT_6))
+ goto mqiobase_exit;
+
if (!ql2xmqsupport || !ql2xnvmeenable ||
(!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
goto mqiobase_exit;
@@ -2714,7 +2712,7 @@ static void qla2x00_iocb_work_fn(struct work_struct *work)
struct scsi_qla_host, iocb_work);
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- int i = 20;
+ int i = 2;
unsigned long flags;
if (test_bit(UNLOADING, &base_vha->dpc_flags))
@@ -3654,6 +3652,13 @@ qla2x00_remove_one(struct pci_dev *pdev)
}
qla2x00_wait_for_hba_ready(base_vha);
+ /*
+ * if UNLOADING flag is already set, then continue unload,
+ * where it was set first.
+ */
+ if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
+ return;
+
if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
if (ha->flags.fw_started)
qla2x00_abort_isp_cleanup(base_vha);
@@ -3671,15 +3676,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
qla2x00_wait_for_sess_deletion(base_vha);
- /*
- * if UNLOAD flag is already set, then continue unload,
- * where it was set first.
- */
- if (test_bit(UNLOADING, &base_vha->dpc_flags))
- return;
-
- set_bit(UNLOADING, &base_vha->dpc_flags);
-
qla_nvme_delete(base_vha);
dma_free_coherent(&ha->pdev->dev,
@@ -4603,7 +4599,6 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
spin_lock_init(&vha->work_lock);
spin_lock_init(&vha->cmd_list_lock);
- spin_lock_init(&vha->gnl.fcports_lock);
init_waitqueue_head(&vha->fcport_waitQ);
init_waitqueue_head(&vha->vref_waitq);
@@ -4647,6 +4642,9 @@ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
struct qla_work_evt *e;
uint8_t bail;
+ if (test_bit(UNLOADING, &vha->dpc_flags))
+ return NULL;
+
QLA_VHA_MARK_BUSY(vha, bail);
if (bail)
return NULL;
@@ -4786,16 +4784,25 @@ qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode,
return qla2x00_post_work(vha, e);
}
-int qla24xx_post_upd_fcport_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+void qla24xx_sched_upd_fcport(fc_port_t *fcport)
{
- struct qla_work_evt *e;
+ unsigned long flags;
- e = qla2x00_alloc_work(vha, QLA_EVT_UPD_FCPORT);
- if (!e)
- return QLA_FUNCTION_FAILED;
+ if (IS_SW_RESV_ADDR(fcport->d_id))
+ return;
- e->u.fcport.fcport = fcport;
- return qla2x00_post_work(vha, e);
+ spin_lock_irqsave(&fcport->vha->work_lock, flags);
+ if (fcport->disc_state == DSC_UPD_FCPORT) {
+ spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
+ return;
+ }
+ fcport->jiffies_at_registration = jiffies;
+ fcport->sec_since_registration = 0;
+ fcport->next_disc_state = DSC_DELETED;
+ fcport->disc_state = DSC_UPD_FCPORT;
+ spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
+
+ queue_work(system_unbound_wq, &fcport->reg_work);
}
static
@@ -5051,9 +5058,6 @@ qla2x00_do_work(struct scsi_qla_host *vha)
case QLA_EVT_GPSC:
qla24xx_async_gpsc(vha, e->u.fcport.fcport);
break;
- case QLA_EVT_UPD_FCPORT:
- qla2x00_update_fcport(vha, e->u.fcport.fcport);
- break;
case QLA_EVT_GNL:
qla24xx_async_gnl(vha, e->u.fcport.fcport);
break;
@@ -5845,13 +5849,6 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
struct pci_dev *pdev = ha->pdev;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
- /*
- * if UNLOAD flag is already set, then continue unload,
- * where it was set first.
- */
- if (test_bit(UNLOADING, &base_vha->dpc_flags))
- return;
-
ql_log(ql_log_warn, base_vha, 0x015b,
"Disabling adapter.\n");
@@ -5862,9 +5859,14 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
return;
}
- qla2x00_wait_for_sess_deletion(base_vha);
+ /*
+ * if UNLOADING flag is already set, then continue unload,
+ * where it was set first.
+ */
+ if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
+ return;
- set_bit(UNLOADING, &base_vha->dpc_flags);
+ qla2x00_wait_for_sess_deletion(base_vha);
qla2x00_delete_all_vps(ha, base_vha);
@@ -6086,6 +6088,7 @@ qla2x00_do_dpc(void *data)
if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE,
&base_vha->dpc_flags))) {
+ base_vha->flags.online = 1;
ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
"ISP abort scheduled.\n");
if (ha->isp_ops->abort_isp(base_vha)) {
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 95206e227730..eb6112eb475e 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -600,14 +600,9 @@ void qla2x00_async_nack_sp_done(void *s, int res)
sp->fcport->login_succ = 1;
vha->fcport_count++;
-
- ql_dbg(ql_dbg_disc, vha, 0x20f3,
- "%s %d %8phC post upd_fcport fcp_cnt %d\n",
- __func__, __LINE__,
- sp->fcport->port_name,
- vha->fcport_count);
- sp->fcport->disc_state = DSC_UPD_FCPORT;
- qla24xx_post_upd_fcport_work(vha, sp->fcport);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ qla24xx_sched_upd_fcport(sp->fcport);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
} else {
sp->fcport->login_retry = 0;
sp->fcport->disc_state = DSC_LOGIN_COMPLETE;
@@ -1227,19 +1222,39 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
{
struct qla_tgt *tgt = sess->tgt;
unsigned long flags;
+ u16 sec;
- if (sess->disc_state == DSC_DELETE_PEND)
+ switch (sess->disc_state) {
+ case DSC_DELETE_PEND:
return;
-
- if (sess->disc_state == DSC_DELETED) {
- if (tgt && tgt->tgt_stop && (tgt->sess_count == 0))
- wake_up_all(&tgt->waitQ);
- if (sess->vha->fcport_count == 0)
- wake_up_all(&sess->vha->fcport_waitQ);
-
+ case DSC_DELETED:
if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
- !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT])
+ !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) {
+ if (tgt && tgt->tgt_stop && tgt->sess_count == 0)
+ wake_up_all(&tgt->waitQ);
+
+ if (sess->vha->fcport_count == 0)
+ wake_up_all(&sess->vha->fcport_waitQ);
return;
+ }
+ break;
+ case DSC_UPD_FCPORT:
+ /*
+ * This port is not done reporting to upper layer.
+ * let it finish
+ */
+ sess->next_disc_state = DSC_DELETE_PEND;
+ sec = jiffies_to_msecs(jiffies -
+ sess->jiffies_at_registration)/1000;
+ if (sess->sec_since_registration < sec && sec && !(sec % 5)) {
+ sess->sec_since_registration = sec;
+ ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
+ "%s %8phC : Slow Rport registration(%d Sec)\n",
+ __func__, sess->port_name, sec);
+ }
+ return;
+ default:
+ break;
}
if (sess->deleted == QLA_SESS_DELETED)
@@ -4749,6 +4764,32 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
goto out;
}
+ if (sess->disc_state == DSC_UPD_FCPORT) {
+ u16 sec;
+
+ /*
+ * Remote port registration is still going on from
+ * previous login. Allow it to finish before we
+ * accept the new login.
+ */
+ sess->next_disc_state = DSC_DELETE_PEND;
+ sec = jiffies_to_msecs(jiffies -
+ sess->jiffies_at_registration) / 1000;
+ if (sess->sec_since_registration < sec && sec &&
+ !(sec % 5)) {
+ sess->sec_since_registration = sec;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC - Slow Rport registration (%d Sec)\n",
+ __func__, sess->port_name, sec);
+ }
+
+ if (!conflict_sess)
+ kmem_cache_free(qla_tgt_plogi_cachep, pla);
+
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ goto out;
+ }
+
qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
sess->d_id = port_id;
sess->login_gen++;
@@ -4908,6 +4949,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
if (sess != NULL) {
bool delete = false;
+ int sec;
spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
switch (sess->fw_login_state) {
case DSC_LS_PLOGI_PEND:
@@ -4920,9 +4962,24 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
}
switch (sess->disc_state) {
+ case DSC_UPD_FCPORT:
+ spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
+ flags);
+
+ sec = jiffies_to_msecs(jiffies -
+ sess->jiffies_at_registration)/1000;
+ if (sess->sec_since_registration < sec && sec &&
+ !(sec % 5)) {
+ sess->sec_since_registration = sec;
+ ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
+ "%s %8phC : Slow Rport registration(%d Sec)\n",
+ __func__, sess->port_name, sec);
+ }
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ return 0;
+
case DSC_LOGIN_PEND:
case DSC_GPDB:
- case DSC_UPD_FCPORT:
case DSC_LOGIN_COMPLETE:
case DSC_ADISC:
delete = false;
@@ -5959,10 +6016,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
case MODE_DUAL:
if (newfcport) {
if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) {
- ql_dbg(ql_dbg_disc, vha, 0x20fe,
- "%s %d %8phC post upd_fcport fcp_cnt %d\n",
- __func__, __LINE__, fcport->port_name, vha->fcport_count);
- qla24xx_post_upd_fcport_work(vha, fcport);
+ qla24xx_sched_upd_fcport(fcport);
} else {
ql_dbg(ql_dbg_disc, vha, 0x20ff,
"%s %d %8phC post gpsc fcp_cnt %d\n",
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 199d3ba1916d..67a74720c02c 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -124,7 +124,6 @@
(min(1270, ((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD_24XX + \
QLA_TGT_DATASEGS_PER_CONT_24XX*((ql) - 1)) : 0))
#endif
-#endif
#define GET_TARGET_ID(ha, iocb) ((HAS_EXTENDED_IDS(ha)) \
? le16_to_cpu((iocb)->u.isp2x.target.extended) \
@@ -257,6 +256,7 @@ struct ctio_to_2xxx {
#ifndef CTIO_RET_TYPE
#define CTIO_RET_TYPE 0x17 /* CTIO return entry */
#define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */
+#endif
struct fcp_hdr {
uint8_t r_ctl;
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 0ccd06f11f12..de3136294097 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -940,7 +940,8 @@ qla27xx_template_checksum(void *p, ulong size)
static inline int
qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp)
{
- return qla27xx_template_checksum(tmp, tmp->template_size) == 0;
+ return qla27xx_template_checksum(tmp,
+ le32_to_cpu(tmp->template_size)) == 0;
}
static inline int
@@ -956,7 +957,7 @@ qla27xx_execute_fwdt_template(struct scsi_qla_host *vha)
ulong len;
if (qla27xx_fwdt_template_valid(tmp)) {
- len = tmp->template_size;
+ len = le32_to_cpu(tmp->template_size);
tmp = memcpy(vha->hw->fw_dump, tmp, len);
ql27xx_edit_template(vha, tmp);
qla27xx_walk_template(vha, tmp, tmp, &len);
@@ -972,7 +973,7 @@ qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha)
ulong len = 0;
if (qla27xx_fwdt_template_valid(tmp)) {
- len = tmp->template_size;
+ len = le32_to_cpu(tmp->template_size);
qla27xx_walk_template(vha, tmp, NULL, &len);
}
@@ -984,7 +985,7 @@ qla27xx_fwdt_template_size(void *p)
{
struct qla27xx_fwdt_template *tmp = p;
- return tmp->template_size;
+ return le32_to_cpu(tmp->template_size);
}
ulong
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h
index 141c1c5e73f4..2d3e1a8349b3 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.h
+++ b/drivers/scsi/qla2xxx/qla_tmpl.h
@@ -13,7 +13,7 @@
struct __packed qla27xx_fwdt_template {
uint32_t template_type;
uint32_t entry_offset;
- uint32_t template_size;
+ __le32 template_size;
uint32_t reserved_1;
uint32_t entry_count;
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 654e1af7f542..b51dba35bcf7 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -960,6 +960,7 @@ static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item,
atomic_set(&tpg->lport_tpg_enabled, 0);
qlt_stop_phase1(vha->vha_tgt.qla_tgt);
+ qlt_stop_phase2(vha->vha_tgt.qla_tgt);
}
return count;
@@ -1122,6 +1123,7 @@ static ssize_t tcm_qla2xxx_npiv_tpg_enable_store(struct config_item *item,
atomic_set(&tpg->lport_tpg_enabled, 0);
qlt_stop_phase1(vha->vha_tgt.qla_tgt);
+ qlt_stop_phase2(vha->vha_tgt.qla_tgt);
}
return count;
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index f59b8982b288..4ba9f46fcf74 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -1221,7 +1221,7 @@ static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len)
le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error);
exit_host_stats:
if (ql_iscsi_stats)
- dma_free_coherent(&ha->pdev->dev, host_stats_size,
+ dma_free_coherent(&ha->pdev->dev, stats_size,
ql_iscsi_stats, iscsi_stats_dma);
ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n",
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index a1dbae806fde..d2b045eb7274 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -5384,6 +5384,12 @@ static int __init scsi_debug_init(void)
pr_err("submit_queues must be 1 or more\n");
return -EINVAL;
}
+
+ if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
+ pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
+ return -EINVAL;
+ }
+
sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
GFP_KERNEL);
if (sdebug_q_arr == NULL)
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index c501fb5190a3..fe5ae2b221c1 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -3446,6 +3446,78 @@ void sdev_enable_disk_events(struct scsi_device *sdev)
}
EXPORT_SYMBOL(sdev_enable_disk_events);
+static unsigned char designator_prio(const unsigned char *d)
+{
+ if (d[1] & 0x30)
+ /* not associated with LUN */
+ return 0;
+
+ if (d[3] == 0)
+ /* invalid length */
+ return 0;
+
+ /*
+ * Order of preference for lun descriptor:
+ * - SCSI name string
+ * - NAA IEEE Registered Extended
+ * - EUI-64 based 16-byte
+ * - EUI-64 based 12-byte
+ * - NAA IEEE Registered
+ * - NAA IEEE Extended
+ * - EUI-64 based 8-byte
+ * - SCSI name string (truncated)
+ * - T10 Vendor ID
+ * as longer descriptors reduce the likelyhood
+ * of identification clashes.
+ */
+
+ switch (d[1] & 0xf) {
+ case 8:
+ /* SCSI name string, variable-length UTF-8 */
+ return 9;
+ case 3:
+ switch (d[4] >> 4) {
+ case 6:
+ /* NAA registered extended */
+ return 8;
+ case 5:
+ /* NAA registered */
+ return 5;
+ case 4:
+ /* NAA extended */
+ return 4;
+ case 3:
+ /* NAA locally assigned */
+ return 1;
+ default:
+ break;
+ }
+ break;
+ case 2:
+ switch (d[3]) {
+ case 16:
+ /* EUI64-based, 16 byte */
+ return 7;
+ case 12:
+ /* EUI64-based, 12 byte */
+ return 6;
+ case 8:
+ /* EUI64-based, 8 byte */
+ return 3;
+ default:
+ break;
+ }
+ break;
+ case 1:
+ /* T10 vendor ID */
+ return 1;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
/**
* scsi_vpd_lun_id - return a unique device identification
* @sdev: SCSI device
@@ -3462,7 +3534,7 @@ EXPORT_SYMBOL(sdev_enable_disk_events);
*/
int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
{
- u8 cur_id_type = 0xff;
+ u8 cur_id_prio = 0;
u8 cur_id_size = 0;
const unsigned char *d, *cur_id_str;
const struct scsi_vpd *vpd_pg83;
@@ -3475,20 +3547,6 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
return -ENXIO;
}
- /*
- * Look for the correct descriptor.
- * Order of preference for lun descriptor:
- * - SCSI name string
- * - NAA IEEE Registered Extended
- * - EUI-64 based 16-byte
- * - EUI-64 based 12-byte
- * - NAA IEEE Registered
- * - NAA IEEE Extended
- * - T10 Vendor ID
- * as longer descriptors reduce the likelyhood
- * of identification clashes.
- */
-
/* The id string must be at least 20 bytes + terminating NULL byte */
if (id_len < 21) {
rcu_read_unlock();
@@ -3498,8 +3556,9 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
memset(id, 0, id_len);
d = vpd_pg83->data + 4;
while (d < vpd_pg83->data + vpd_pg83->len) {
- /* Skip designators not referring to the LUN */
- if ((d[1] & 0x30) != 0x00)
+ u8 prio = designator_prio(d);
+
+ if (prio == 0 || cur_id_prio > prio)
goto next_desig;
switch (d[1] & 0xf) {
@@ -3507,28 +3566,19 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
/* T10 Vendor ID */
if (cur_id_size > d[3])
break;
- /* Prefer anything */
- if (cur_id_type > 0x01 && cur_id_type != 0xff)
- break;
+ cur_id_prio = prio;
cur_id_size = d[3];
if (cur_id_size + 4 > id_len)
cur_id_size = id_len - 4;
cur_id_str = d + 4;
- cur_id_type = d[1] & 0xf;
id_size = snprintf(id, id_len, "t10.%*pE",
cur_id_size, cur_id_str);
break;
case 0x2:
/* EUI-64 */
- if (cur_id_size > d[3])
- break;
- /* Prefer NAA IEEE Registered Extended */
- if (cur_id_type == 0x3 &&
- cur_id_size == d[3])
- break;
+ cur_id_prio = prio;
cur_id_size = d[3];
cur_id_str = d + 4;
- cur_id_type = d[1] & 0xf;
switch (cur_id_size) {
case 8:
id_size = snprintf(id, id_len,
@@ -3546,17 +3596,14 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
cur_id_str);
break;
default:
- cur_id_size = 0;
break;
}
break;
case 0x3:
/* NAA */
- if (cur_id_size > d[3])
- break;
+ cur_id_prio = prio;
cur_id_size = d[3];
cur_id_str = d + 4;
- cur_id_type = d[1] & 0xf;
switch (cur_id_size) {
case 8:
id_size = snprintf(id, id_len,
@@ -3569,26 +3616,25 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
cur_id_str);
break;
default:
- cur_id_size = 0;
break;
}
break;
case 0x8:
/* SCSI name string */
- if (cur_id_size + 4 > d[3])
+ if (cur_id_size > d[3])
break;
/* Prefer others for truncated descriptor */
- if (cur_id_size && d[3] > id_len)
- break;
+ if (d[3] > id_len) {
+ prio = 2;
+ if (cur_id_prio > prio)
+ break;
+ }
+ cur_id_prio = prio;
cur_id_size = id_size = d[3];
cur_id_str = d + 4;
- cur_id_type = d[1] & 0xf;
if (cur_id_size >= id_len)
cur_id_size = id_len - 1;
memcpy(id, cur_id_str, cur_id_size);
- /* Decrease priority for truncated descriptor */
- if (cur_id_size != id_size)
- cur_id_size = 6;
break;
default:
break;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 9a7e3a3bd5ce..009a5b2aa3d0 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1722,15 +1722,16 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
*/
static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
{
- struct async_scan_data *data;
+ struct async_scan_data *data = NULL;
unsigned long flags;
if (strncmp(scsi_scan_type, "sync", 4) == 0)
return NULL;
+ mutex_lock(&shost->scan_mutex);
if (shost->async_scan) {
shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__);
- return NULL;
+ goto err;
}
data = kmalloc(sizeof(*data), GFP_KERNEL);
@@ -1741,7 +1742,6 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
goto err;
init_completion(&data->prev_finished);
- mutex_lock(&shost->scan_mutex);
spin_lock_irqsave(shost->host_lock, flags);
shost->async_scan = 1;
spin_unlock_irqrestore(shost->host_lock, flags);
@@ -1756,6 +1756,7 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
return data;
err:
+ mutex_unlock(&shost->scan_mutex);
kfree(data);
return NULL;
}
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index c0fb9e789080..e340b05278b6 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -119,7 +119,11 @@ show_transport_handle(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct iscsi_internal *priv = dev_to_iscsi_internal(dev);
- return sprintf(buf, "%llu\n", (unsigned long long)iscsi_handle(priv->iscsi_transport));
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ return sysfs_emit(buf, "%llu\n",
+ (unsigned long long)iscsi_handle(priv->iscsi_transport));
}
static DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL);
@@ -129,7 +133,7 @@ show_transport_##name(struct device *dev, \
struct device_attribute *attr,char *buf) \
{ \
struct iscsi_internal *priv = dev_to_iscsi_internal(dev); \
- return sprintf(buf, format"\n", priv->iscsi_transport->name); \
+ return sysfs_emit(buf, format"\n", priv->iscsi_transport->name);\
} \
static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
@@ -170,7 +174,7 @@ static ssize_t
show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
{
struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
- return sprintf(buf, "%llu\n", (unsigned long long) ep->id);
+ return sysfs_emit(buf, "%llu\n", (unsigned long long) ep->id);
}
static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
@@ -2010,7 +2014,7 @@ static void __iscsi_unbind_session(struct work_struct *work)
if (session->target_id == ISCSI_MAX_TARGET) {
spin_unlock_irqrestore(&session->lock, flags);
mutex_unlock(&ihost->mutex);
- return;
+ goto unbind_session_exit;
}
target_id = session->target_id;
@@ -2022,6 +2026,8 @@ static void __iscsi_unbind_session(struct work_struct *work)
ida_simple_remove(&iscsi_sess_ida, target_id);
scsi_remove_target(&session->dev);
+
+unbind_session_exit:
iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n");
}
@@ -2761,6 +2767,9 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
struct iscsi_cls_session *session;
int err = 0, value = 0;
+ if (ev->u.set_param.len > PAGE_SIZE)
+ return -EINVAL;
+
session = iscsi_session_lookup(ev->u.set_param.sid);
conn = iscsi_conn_lookup(ev->u.set_param.sid, ev->u.set_param.cid);
if (!conn || !session)
@@ -2908,6 +2917,9 @@ iscsi_set_host_param(struct iscsi_transport *transport,
if (!transport->set_host_param)
return -ENOSYS;
+ if (ev->u.set_host_param.len > PAGE_SIZE)
+ return -EINVAL;
+
shost = scsi_host_lookup(ev->u.set_host_param.host_no);
if (!shost) {
printk(KERN_ERR "set_host_param could not find host no %u\n",
@@ -3170,7 +3182,7 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport,
pr_err("%s could not find host no %u\n",
__func__, ev->u.set_flashnode.host_no);
err = -ENODEV;
- goto put_host;
+ goto exit_set_fnode;
}
idx = ev->u.set_flashnode.flashnode_idx;
@@ -3495,6 +3507,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
{
int err = 0;
u32 portid;
+ u32 pdu_len;
struct iscsi_uevent *ev = nlmsg_data(nlh);
struct iscsi_transport *transport = NULL;
struct iscsi_internal *priv;
@@ -3502,6 +3515,9 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
struct iscsi_cls_conn *conn;
struct iscsi_endpoint *ep = NULL;
+ if (!netlink_capable(skb, CAP_SYS_ADMIN))
+ return -EPERM;
+
if (nlh->nlmsg_type == ISCSI_UEVENT_PATH_UPDATE)
*group = ISCSI_NL_GRP_UIP;
else
@@ -3609,6 +3625,14 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
err = -EINVAL;
break;
case ISCSI_UEVENT_SEND_PDU:
+ pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
+
+ if ((ev->u.send_pdu.hdr_size > pdu_len) ||
+ (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) {
+ err = -EINVAL;
+ break;
+ }
+
conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid);
if (conn)
ev->r.retcode = transport->send_pdu(conn,
@@ -4015,7 +4039,7 @@ show_priv_session_state(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
- return sprintf(buf, "%s\n", iscsi_session_state_name(session->state));
+ return sysfs_emit(buf, "%s\n", iscsi_session_state_name(session->state));
}
static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
NULL);
@@ -4024,7 +4048,7 @@ show_priv_session_creator(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
- return sprintf(buf, "%d\n", session->creator);
+ return sysfs_emit(buf, "%d\n", session->creator);
}
static ISCSI_CLASS_ATTR(priv_sess, creator, S_IRUGO, show_priv_session_creator,
NULL);
@@ -4033,7 +4057,7 @@ show_priv_session_target_id(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
- return sprintf(buf, "%d\n", session->target_id);
+ return sysfs_emit(buf, "%d\n", session->target_id);
}
static ISCSI_CLASS_ATTR(priv_sess, target_id, S_IRUGO,
show_priv_session_target_id, NULL);
@@ -4046,8 +4070,8 @@ show_priv_session_##field(struct device *dev, \
struct iscsi_cls_session *session = \
iscsi_dev_to_session(dev->parent); \
if (session->field == -1) \
- return sprintf(buf, "off\n"); \
- return sprintf(buf, format"\n", session->field); \
+ return sysfs_emit(buf, "off\n"); \
+ return sysfs_emit(buf, format"\n", session->field); \
}
#define iscsi_priv_session_attr_store(field) \
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 40b85b752b79..efb9c3d90213 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -130,12 +130,16 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd,
sshdr = &sshdr_tmp;
for(i = 0; i < DV_RETRIES; i++) {
+ /*
+ * The purpose of the RQF_PM flag below is to bypass the
+ * SDEV_QUIESCE state.
+ */
result = scsi_execute(sdev, cmd, dir, buffer, bufflen, sense,
sshdr, DV_TIMEOUT, /* retries */ 1,
REQ_FAILFAST_DEV |
REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER,
- 0, NULL);
+ RQF_PM, NULL);
if (driver_byte(result) != DRIVER_SENSE ||
sshdr->sense_key != UNIT_ATTENTION)
break;
@@ -352,7 +356,7 @@ store_spi_transport_##field(struct device *dev, \
struct spi_transport_attrs *tp \
= (struct spi_transport_attrs *)&starget->starget_data; \
\
- if (i->f->set_##field) \
+ if (!i->f->set_##field) \
return -EINVAL; \
val = simple_strtoul(buf, NULL, 0); \
if (val > tp->max_##field) \
@@ -1018,23 +1022,26 @@ spi_dv_device(struct scsi_device *sdev)
*/
lock_system_sleep();
+ if (scsi_autopm_get_device(sdev))
+ goto unlock_system_sleep;
+
if (unlikely(spi_dv_in_progress(starget)))
- goto unlock;
+ goto put_autopm;
if (unlikely(scsi_device_get(sdev)))
- goto unlock;
+ goto put_autopm;
spi_dv_in_progress(starget) = 1;
buffer = kzalloc(len, GFP_KERNEL);
if (unlikely(!buffer))
- goto out_put;
+ goto put_sdev;
/* We need to verify that the actual device will quiesce; the
* later target quiesce is just a nice to have */
if (unlikely(scsi_device_quiesce(sdev)))
- goto out_free;
+ goto free_buffer;
scsi_target_quiesce(starget);
@@ -1054,12 +1061,16 @@ spi_dv_device(struct scsi_device *sdev)
spi_initial_dv(starget) = 1;
- out_free:
+free_buffer:
kfree(buffer);
- out_put:
+
+put_sdev:
spi_dv_in_progress(starget) = 0;
scsi_device_put(sdev);
-unlock:
+put_autopm:
+ scsi_autopm_put_device(sdev);
+
+unlock_system_sleep:
unlock_system_sleep();
}
EXPORT_SYMBOL(spi_dv_device);
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index 4e46fdb2d7c9..9ac89462a8e0 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -555,7 +555,14 @@ int srp_reconnect_rport(struct srp_rport *rport)
res = mutex_lock_interruptible(&rport->mutex);
if (res)
goto out;
- scsi_target_block(&shost->shost_gendev);
+ if (rport->state != SRP_RPORT_FAIL_FAST && rport->state != SRP_RPORT_LOST)
+ /*
+ * sdev state must be SDEV_TRANSPORT_OFFLINE, transition
+ * to SDEV_BLOCK is illegal. Calling scsi_target_unblock()
+ * later is ok though, scsi_internal_device_unblock_nowait()
+ * treats SDEV_TRANSPORT_OFFLINE like SDEV_BLOCK.
+ */
+ scsi_target_block(&shost->shost_gendev);
res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
pr_debug("%s (state %d): transport.reconnect() returned %d\n",
dev_name(&shost->shost_gendev), rport->state, res);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index f2f14d8d5943..342352d8e7bf 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3348,7 +3348,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
}
blk_pm_runtime_init(sdp->request_queue, dev);
- device_add_disk(dev, gd);
+ device_add_disk(dev, gd, NULL);
if (sdkp->capacity)
sd_dif_config_host(sdkp);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 8a254bb46a9b..6bb45ae19d58 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -694,8 +694,10 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
hp->flags = input_size; /* structure abuse ... */
hp->pack_id = old_hdr.pack_id;
hp->usr_ptr = NULL;
- if (__copy_from_user(cmnd, buf, cmd_size))
+ if (__copy_from_user(cmnd, buf, cmd_size)) {
+ sg_remove_request(sfp, srp);
return -EFAULT;
+ }
/*
* SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
* but is is possible that the app intended SG_DXFER_TO_DEV, because there
@@ -808,8 +810,10 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
"sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
(int) cmnd[0], (int) hp->cmd_len));
- if (hp->dxfer_len >= SZ_256M)
+ if (hp->dxfer_len >= SZ_256M) {
+ sg_remove_request(sfp, srp);
return -EINVAL;
+ }
k = sg_start_req(srp, cmnd);
if (k) {
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index b209a35e482e..01dfb97b0778 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -50,9 +50,9 @@ static void pqi_free_sas_phy(struct pqi_sas_phy *pqi_sas_phy)
struct sas_phy *phy = pqi_sas_phy->phy;
sas_port_delete_phy(pqi_sas_phy->parent_port->port, phy);
- sas_phy_free(phy);
if (pqi_sas_phy->added_to_port)
list_del(&pqi_sas_phy->phy_list_entry);
+ sas_phy_delete(phy);
kfree(pqi_sas_phy);
}
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index 3102a75984d3..aed91afb79b6 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -71,6 +71,7 @@ static int snirm710_probe(struct platform_device *dev)
struct NCR_700_Host_Parameters *hostdata;
struct Scsi_Host *host;
struct resource *res;
+ int rc;
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!res)
@@ -96,7 +97,9 @@ static int snirm710_probe(struct platform_device *dev)
goto out_kfree;
host->this_id = 7;
host->base = base;
- host->irq = platform_get_irq(dev, 0);
+ host->irq = rc = platform_get_irq(dev, 0);
+ if (rc < 0)
+ goto out_put_host;
if(request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "snirm710", host)) {
printk(KERN_ERR "snirm710: request_irq failed!\n");
goto out_put_host;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index d0389b20574d..45c8bf39ad23 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -748,7 +748,7 @@ static int sr_probe(struct device *dev)
cd->cdi.disk = disk;
if (register_cdrom(&cd->cdi))
- goto fail_put;
+ goto fail_minor;
/*
* Initialize block layer runtime PM stuffs before the
@@ -758,7 +758,7 @@ static int sr_probe(struct device *dev)
dev_set_drvdata(dev, cd);
disk->flags |= GENHD_FL_REMOVABLE;
- device_add_disk(&sdev->sdev_gendev, disk);
+ device_add_disk(&sdev->sdev_gendev, disk, NULL);
sdev_printk(KERN_DEBUG, sdev,
"Attached scsi CD-ROM %s\n", cd->cdi.name);
@@ -766,6 +766,10 @@ static int sr_probe(struct device *dev)
return 0;
+fail_minor:
+ spin_lock(&sr_index_lock);
+ clear_bit(minor, sr_index_bits);
+ spin_unlock(&sr_index_lock);
fail_put:
put_disk(disk);
fail_free:
diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c
index e3b0ce25162b..b9db2ec6d036 100644
--- a/drivers/scsi/sr_vendor.c
+++ b/drivers/scsi/sr_vendor.c
@@ -66,9 +66,6 @@
void sr_vendor_init(Scsi_CD *cd)
{
-#ifndef CONFIG_BLK_DEV_SR_VENDOR
- cd->vendor = VENDOR_SCSI3;
-#else
const char *vendor = cd->device->vendor;
const char *model = cd->device->model;
@@ -100,7 +97,6 @@ void sr_vendor_init(Scsi_CD *cd)
cd->vendor = VENDOR_TOSHIBA;
}
-#endif
}
@@ -114,10 +110,8 @@ int sr_set_blocklength(Scsi_CD *cd, int blocklength)
struct ccs_modesel_head *modesel;
int rc, density = 0;
-#ifdef CONFIG_BLK_DEV_SR_VENDOR
if (cd->vendor == VENDOR_TOSHIBA)
density = (blocklength > 2048) ? 0x81 : 0x83;
-#endif
buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
if (!buffer)
@@ -205,7 +199,6 @@ int sr_cd_check(struct cdrom_device_info *cdi)
}
break;
-#ifdef CONFIG_BLK_DEV_SR_VENDOR
case VENDOR_NEC:{
unsigned long min, sec, frame;
cgc.cmd[0] = 0xde;
@@ -298,7 +291,6 @@ int sr_cd_check(struct cdrom_device_info *cdi)
sector = buffer[11] + (buffer[10] << 8) +
(buffer[9] << 16) + (buffer[8] << 24);
break;
-#endif /* CONFIG_BLK_DEV_SR_VENDOR */
default:
/* should not happen */
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 307df2fa39a3..5078db7743cd 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -1265,8 +1265,8 @@ static int st_open(struct inode *inode, struct file *filp)
spin_lock(&st_use_lock);
if (STp->in_use) {
spin_unlock(&st_use_lock);
- scsi_tape_put(STp);
DEBC_printk(STp, "Device already in use.\n");
+ scsi_tape_put(STp);
return (-EBUSY);
}
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 0b1421cdf8a0..f9aa95e48eee 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -233,7 +233,9 @@ static int esp_sun3x_probe(struct platform_device *dev)
if (!esp->command_block)
goto fail_unmap_regs_dma;
- host->irq = platform_get_irq(dev, 0);
+ host->irq = err = platform_get_irq(dev, 0);
+ if (err < 0)
+ goto fail_unmap_command_block;
err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED,
"SUN3X ESP", esp);
if (err < 0)
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
index c2cee73a8560..2bb8bdc2539b 100644
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -543,21 +543,24 @@ static int ufs_hisi_init_common(struct ufs_hba *hba)
host->hba = hba;
ufshcd_set_variant(hba, host);
- host->rst = devm_reset_control_get(dev, "rst");
+ host->rst = devm_reset_control_get(dev, "rst");
if (IS_ERR(host->rst)) {
dev_err(dev, "%s: failed to get reset control\n", __func__);
- return PTR_ERR(host->rst);
+ err = PTR_ERR(host->rst);
+ goto error;
}
ufs_hisi_set_pm_lvl(hba);
err = ufs_hisi_get_resource(host);
- if (err) {
- ufshcd_set_variant(hba, NULL);
- return err;
- }
+ if (err)
+ goto error;
return 0;
+
+error:
+ ufshcd_set_variant(hba, NULL);
+ return err;
}
static int ufs_hi3660_init(struct ufs_hba *hba)
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 75ee5906b966..798a74535ea7 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -1581,9 +1581,6 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
*/
}
mask <<= offset;
-
- pm_runtime_get_sync(host->hba->dev);
- ufshcd_hold(host->hba, false);
ufshcd_rmwl(host->hba, TEST_BUS_SEL,
(u32)host->testbus.select_major << 19,
REG_UFS_CFG1);
@@ -1596,8 +1593,6 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
* committed before returning.
*/
mb();
- ufshcd_release(host->hba);
- pm_runtime_put_sync(host->hba->dev);
return 0;
}
@@ -1635,11 +1630,11 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
/* sleep a bit intermittently as we are dumping too much data */
ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
- usleep_range(1000, 1100);
+ udelay(1000);
ufs_qcom_testbus_read(hba);
- usleep_range(1000, 1100);
+ udelay(1000);
ufs_qcom_print_unipro_testbus(hba);
- usleep_range(1000, 1100);
+ udelay(1000);
}
/**
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index 5d2dfdb41a6f..758d3a67047d 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -21,6 +21,7 @@
#define UFS_ANY_VENDOR 0xFFFF
#define UFS_ANY_MODEL "ANY_MODEL"
+#define UFS_VENDOR_MICRON 0x12C
#define UFS_VENDOR_TOSHIBA 0x198
#define UFS_VENDOR_SAMSUNG 0x1CE
#define UFS_VENDOR_SKHYNIX 0x1AD
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index ffe6f82182ba..68f4f67c5ff8 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -96,6 +96,30 @@ static int ufshcd_pci_resume(struct device *dev)
{
return ufshcd_system_resume(dev_get_drvdata(dev));
}
+
+/**
+ * ufshcd_pci_poweroff - suspend-to-disk poweroff function
+ * @dev: pointer to PCI device handle
+ *
+ * Returns 0 if successful
+ * Returns non-zero otherwise
+ */
+static int ufshcd_pci_poweroff(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int spm_lvl = hba->spm_lvl;
+ int ret;
+
+ /*
+ * For poweroff we need to set the UFS device to PowerDown mode.
+ * Force spm_lvl to ensure that.
+ */
+ hba->spm_lvl = 5;
+ ret = ufshcd_system_suspend(hba);
+ hba->spm_lvl = spm_lvl;
+ return ret;
+}
+
#endif /* !CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
@@ -190,8 +214,14 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
static const struct dev_pm_ops ufshcd_pci_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ufshcd_pci_suspend,
- ufshcd_pci_resume)
+#ifdef CONFIG_PM_SLEEP
+ .suspend = ufshcd_pci_suspend,
+ .resume = ufshcd_pci_resume,
+ .freeze = ufshcd_pci_suspend,
+ .thaw = ufshcd_pci_resume,
+ .poweroff = ufshcd_pci_poweroff,
+ .restore = ufshcd_pci_resume,
+#endif
SET_RUNTIME_PM_OPS(ufshcd_pci_runtime_suspend,
ufshcd_pci_runtime_resume,
ufshcd_pci_runtime_idle)
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index b3dee24917a8..b18430efb00f 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -218,6 +218,8 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
static struct ufs_dev_fix ufs_fixups[] = {
/* UFS cards deviations table */
+ UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
@@ -351,27 +353,27 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba,
u8 opcode = 0;
u32 intr, doorbell;
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ struct scsi_cmnd *cmd = lrbp->cmd;
int transfer_len = -1;
if (!trace_ufshcd_command_enabled()) {
/* trace UPIU W/O tracing command */
- if (lrbp->cmd)
+ if (cmd)
ufshcd_add_cmd_upiu_trace(hba, tag, str);
return;
}
- if (lrbp->cmd) { /* data phase exists */
+ if (cmd) { /* data phase exists */
/* trace UPIU also */
ufshcd_add_cmd_upiu_trace(hba, tag, str);
- opcode = (u8)(*lrbp->cmd->cmnd);
+ opcode = cmd->cmnd[0];
if ((opcode == READ_10) || (opcode == WRITE_10)) {
/*
* Currently we only fully trace read(10) and write(10)
* commands
*/
- if (lrbp->cmd->request && lrbp->cmd->request->bio)
- lba =
- lrbp->cmd->request->bio->bi_iter.bi_sector;
+ if (cmd->request && cmd->request->bio)
+ lba = cmd->request->bio->bi_iter.bi_sector;
transfer_len = be32_to_cpu(
lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
}
@@ -1279,8 +1281,15 @@ static int ufshcd_devfreq_target(struct device *dev,
}
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ pm_runtime_get_noresume(hba->dev);
+ if (!pm_runtime_active(hba->dev)) {
+ pm_runtime_put_noidle(hba->dev);
+ ret = -EAGAIN;
+ goto out;
+ }
start = ktime_get();
ret = ufshcd_devfreq_scale(hba, scale_up);
+ pm_runtime_put(hba->dev);
trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
(scale_up ? "up" : "down"),
@@ -1538,6 +1547,7 @@ unblock_reqs:
int ufshcd_hold(struct ufs_hba *hba, bool async)
{
int rc = 0;
+ bool flush_result;
unsigned long flags;
if (!ufshcd_is_clkgating_allowed(hba))
@@ -1563,8 +1573,15 @@ start:
*/
if (ufshcd_can_hibern8_during_gating(hba) &&
ufshcd_is_link_hibern8(hba)) {
+ if (async) {
+ rc = -EAGAIN;
+ hba->clk_gating.active_reqs--;
+ break;
+ }
spin_unlock_irqrestore(hba->host->host_lock, flags);
- flush_work(&hba->clk_gating.ungate_work);
+ flush_result = flush_work(&hba->clk_gating.ungate_work);
+ if (hba->clk_gating.is_suspended && !flush_result)
+ goto out;
spin_lock_irqsave(hba->host->host_lock, flags);
goto start;
}
@@ -1582,12 +1599,12 @@ start:
* work and to enable clocks.
*/
case CLKS_OFF:
- ufshcd_scsi_block_requests(hba);
hba->clk_gating.state = REQ_CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
- queue_work(hba->clk_gating.clk_gating_workq,
- &hba->clk_gating.ungate_work);
+ if (queue_work(hba->clk_gating.clk_gating_workq,
+ &hba->clk_gating.ungate_work))
+ ufshcd_scsi_block_requests(hba);
/*
* fall through to check if we should wait for this
* work to be done or not.
@@ -1900,12 +1917,12 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
{
hba->lrb[task_tag].issue_time_stamp = ktime_get();
hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
+ ufshcd_add_command_trace(hba, task_tag, "send");
ufshcd_clk_scaling_start_busy(hba);
__set_bit(task_tag, &hba->outstanding_reqs);
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
/* Make sure that doorbell is committed immediately */
wmb();
- ufshcd_add_command_trace(hba, task_tag, "send");
}
/**
@@ -2500,6 +2517,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
err = ufshcd_map_sg(hba, lrbp);
if (err) {
+ ufshcd_release(hba);
lrbp->cmd = NULL;
clear_bit_unlock(tag, &hba->lrb_in_use);
goto out;
@@ -3565,7 +3583,7 @@ static int ufshcd_dme_enable(struct ufs_hba *hba)
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
if (ret)
dev_err(hba->dev,
- "dme-reset: error code %d\n", ret);
+ "dme-enable: error code %d\n", ret);
return ret;
}
@@ -5116,7 +5134,6 @@ static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
err = ufshcd_enable_auto_bkops(hba);
else
err = ufshcd_disable_auto_bkops(hba);
- hba->urgent_bkops_lvl = curr_status;
out:
return err;
}
@@ -5595,7 +5612,7 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
*/
static irqreturn_t ufshcd_intr(int irq, void *__hba)
{
- u32 intr_status, enabled_intr_status;
+ u32 intr_status, enabled_intr_status = 0;
irqreturn_t retval = IRQ_NONE;
struct ufs_hba *hba = __hba;
int retries = hba->nutrs;
@@ -5609,7 +5626,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
* read, make sure we handle them by checking the interrupt status
* again in a loop until we process all of the reqs before returning.
*/
- do {
+ while (intr_status && retries--) {
enabled_intr_status =
intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
if (intr_status)
@@ -5620,7 +5637,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
}
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
- } while (intr_status && --retries);
+ }
spin_unlock(hba->host->host_lock);
return retval;
@@ -5755,19 +5772,16 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host;
struct ufs_hba *hba;
- unsigned int tag;
u32 pos;
int err;
- u8 resp = 0xF;
- struct ufshcd_lrb *lrbp;
+ u8 resp = 0xF, lun;
unsigned long flags;
host = cmd->device->host;
hba = shost_priv(host);
- tag = cmd->request->tag;
- lrbp = &hba->lrb[tag];
- err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
+ lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
+ err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
if (!err)
err = resp;
@@ -5776,7 +5790,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
/* clear the commands that were pending for corresponding LUN */
for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
- if (hba->lrb[pos].lun == lrbp->lun) {
+ if (hba->lrb[pos].lun == lun) {
err = ufshcd_clear_cmd(hba, pos);
if (err)
break;
@@ -5920,7 +5934,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
/* command completed already */
dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
__func__, tag);
- goto out;
+ goto cleanup;
} else {
dev_err(hba->dev,
"%s: no response from device. tag = %d, err %d\n",
@@ -5954,6 +5968,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
goto out;
}
+cleanup:
scsi_dma_unmap(cmd);
spin_lock_irqsave(host->host_lock, flags);
@@ -7915,11 +7930,7 @@ int ufshcd_shutdown(struct ufs_hba *hba)
if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
goto out;
- if (pm_runtime_suspended(hba->dev)) {
- ret = ufshcd_runtime_resume(hba);
- if (ret)
- goto out;
- }
+ pm_runtime_get_sync(hba->dev);
ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
out:
diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
index 95b00d28ad6e..3e63e4ce45b0 100644
--- a/drivers/slimbus/core.c
+++ b/drivers/slimbus/core.c
@@ -236,6 +236,7 @@ EXPORT_SYMBOL_GPL(slim_register_controller);
/* slim_remove_device: Remove the effect of slim_add_device() */
static void slim_remove_device(struct slim_device *sbdev)
{
+ of_node_put(sbdev->dev.of_node);
device_unregister(&sbdev->dev);
}
@@ -254,8 +255,6 @@ int slim_unregister_controller(struct slim_controller *ctrl)
{
/* Remove all clients */
device_for_each_child(ctrl->dev, NULL, slim_ctrl_remove_device);
- /* Enter Clock Pause */
- slim_ctrl_clk_pause(ctrl, false, 0);
ida_simple_remove(&ctrl_ida, ctrl->id);
return 0;
@@ -296,8 +295,8 @@ void slim_report_absent(struct slim_device *sbdev)
mutex_lock(&ctrl->lock);
sbdev->is_laddr_valid = false;
mutex_unlock(&ctrl->lock);
-
- ida_simple_remove(&ctrl->laddr_ida, sbdev->laddr);
+ if (!ctrl->get_laddr)
+ ida_simple_remove(&ctrl->laddr_ida, sbdev->laddr);
slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_DOWN);
}
EXPORT_SYMBOL_GPL(slim_report_absent);
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index 9221ba7b7863..44021620d101 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -1200,6 +1200,9 @@ static int qcom_slim_ngd_runtime_resume(struct device *dev)
struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
int ret = 0;
+ if (!ctrl->qmi.handle)
+ return 0;
+
if (ctrl->state >= QCOM_SLIM_NGD_CTRL_ASLEEP)
ret = qcom_slim_ngd_power_up(ctrl);
if (ret) {
@@ -1272,9 +1275,13 @@ static void qcom_slim_ngd_qmi_del_server(struct qmi_handle *hdl,
{
struct qcom_slim_ngd_qmi *qmi =
container_of(hdl, struct qcom_slim_ngd_qmi, svc_event_hdl);
+ struct qcom_slim_ngd_ctrl *ctrl =
+ container_of(qmi, struct qcom_slim_ngd_ctrl, qmi);
qmi->svc_info.sq_node = 0;
qmi->svc_info.sq_port = 0;
+
+ qcom_slim_ngd_enable(ctrl, false);
}
static struct qmi_ops qcom_slim_ngd_qmi_svc_event_ops = {
@@ -1350,7 +1357,6 @@ static int of_qcom_slim_ngd_register(struct device *parent,
ngd->pdev->driver_override = QCOM_SLIM_NGD_DRV_NAME;
ngd->pdev->dev.of_node = node;
ctrl->ngd = ngd;
- platform_set_drvdata(ngd->pdev, ctrl);
platform_device_add(ngd->pdev);
ngd->base = ctrl->base + ngd->id * data->offset +
@@ -1365,12 +1371,13 @@ static int of_qcom_slim_ngd_register(struct device *parent,
static int qcom_slim_ngd_probe(struct platform_device *pdev)
{
- struct qcom_slim_ngd_ctrl *ctrl = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
+ struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev->parent);
int ret;
ctrl->ctrl.dev = dev;
+ platform_set_drvdata(pdev, ctrl);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, QCOM_SLIM_NGD_AUTOSUSPEND);
pm_runtime_set_suspended(dev);
@@ -1489,6 +1496,9 @@ static int __maybe_unused qcom_slim_ngd_runtime_suspend(struct device *dev)
struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
int ret = 0;
+ if (!ctrl->qmi.handle)
+ return 0;
+
ret = qcom_slim_qmi_power_request(ctrl, false);
if (ret && ret != -EBUSY)
dev_info(ctrl->dev, "slim resource not idle:%d\n", ret);
diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c
index 4dd03b099c89..76117405d841 100644
--- a/drivers/soc/atmel/soc.c
+++ b/drivers/soc/atmel/soc.c
@@ -254,8 +254,21 @@ struct soc_device * __init at91_soc_init(const struct at91_soc *socs)
return soc_dev;
}
+static const struct of_device_id at91_soc_allowed_list[] __initconst = {
+ { .compatible = "atmel,at91rm9200", },
+ { .compatible = "atmel,at91sam9", },
+ { .compatible = "atmel,sama5", },
+ { .compatible = "atmel,samv7", },
+ { }
+};
+
static int __init atmel_soc_device_init(void)
{
+ struct device_node *np = of_find_node_by_path("/");
+
+ if (!of_match_node(at91_soc_allowed_list, np))
+ return 0;
+
at91_soc_init(socs);
return 0;
diff --git a/drivers/soc/fsl/dpio/dpio-driver.c b/drivers/soc/fsl/dpio/dpio-driver.c
index b60b77bfaffa..ea6f8904c01b 100644
--- a/drivers/soc/fsl/dpio/dpio-driver.c
+++ b/drivers/soc/fsl/dpio/dpio-driver.c
@@ -53,7 +53,6 @@ static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu)
struct dpio_priv *priv;
int error;
struct fsl_mc_device_irq *irq;
- cpumask_t mask;
priv = dev_get_drvdata(&dpio_dev->dev);
@@ -72,9 +71,7 @@ static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu)
}
/* set the affinity hint */
- cpumask_clear(&mask);
- cpumask_set_cpu(cpu, &mask);
- if (irq_set_affinity_hint(irq->msi_desc->irq, &mask))
+ if (irq_set_affinity_hint(irq->msi_desc->irq, cpumask_of(cpu)))
dev_err(&dpio_dev->dev,
"irq_set_affinity failed irq %d cpu %d\n",
irq->msi_desc->irq, cpu);
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index a4ac6073c555..d7bf456fd10e 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -184,7 +184,7 @@ struct qm_eqcr_entry {
__be32 tag;
struct qm_fd fd;
u8 __reserved3[32];
-} __packed;
+} __packed __aligned(8);
#define QM_EQCR_VERB_VBIT 0x80
#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
#define QM_EQCR_VERB_CMD_ENQUEUE 0x01
diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c
index d160fc2a7b7a..56c019ec7f14 100644
--- a/drivers/soc/imx/gpc.c
+++ b/drivers/soc/imx/gpc.c
@@ -93,8 +93,8 @@ static int imx6_pm_domain_power_off(struct generic_pm_domain *genpd)
static int imx6_pm_domain_power_on(struct generic_pm_domain *genpd)
{
struct imx_pm_domain *pd = to_imx_pm_domain(genpd);
- int i, ret, sw, sw2iso;
- u32 val;
+ int i, ret;
+ u32 val, req;
if (pd->supply) {
ret = regulator_enable(pd->supply);
@@ -113,17 +113,18 @@ static int imx6_pm_domain_power_on(struct generic_pm_domain *genpd)
regmap_update_bits(pd->regmap, pd->reg_offs + GPC_PGC_CTRL_OFFS,
0x1, 0x1);
- /* Read ISO and ISO2SW power up delays */
- regmap_read(pd->regmap, pd->reg_offs + GPC_PGC_PUPSCR_OFFS, &val);
- sw = val & 0x3f;
- sw2iso = (val >> 8) & 0x3f;
-
/* Request GPC to power up domain */
- val = BIT(pd->cntr_pdn_bit + 1);
- regmap_update_bits(pd->regmap, GPC_CNTR, val, val);
+ req = BIT(pd->cntr_pdn_bit + 1);
+ regmap_update_bits(pd->regmap, GPC_CNTR, req, req);
- /* Wait ISO + ISO2SW IPG clock cycles */
- udelay(DIV_ROUND_UP(sw + sw2iso, pd->ipg_rate_mhz));
+ /* Wait for the PGC to handle the request */
+ ret = regmap_read_poll_timeout(pd->regmap, GPC_CNTR, val, !(val & req),
+ 1, 50);
+ if (ret)
+ pr_err("powerup request on domain %s timed out\n", genpd->name);
+
+ /* Wait for reset to propagate through peripherals */
+ usleep_range(5, 10);
/* Disable reset clocks for all devices in the domain */
for (i = 0; i < pd->num_clks; i++)
@@ -345,6 +346,7 @@ static const struct regmap_config imx_gpc_regmap_config = {
.rd_table = &access_table,
.wr_table = &access_table,
.max_register = 0x2ac,
+ .fast_io = true,
};
static struct generic_pm_domain *imx_gpc_onecell_domains[] = {
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
index 5b24bb4bfbf6..ef54f1638d20 100644
--- a/drivers/soc/mediatek/mtk-scpsys.c
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -454,6 +454,7 @@ static void mtk_register_power_domains(struct platform_device *pdev,
for (i = 0; i < num; i++) {
struct scp_domain *scpd = &scp->domains[i];
struct generic_pm_domain *genpd = &scpd->genpd;
+ bool on;
/*
* Initially turn on all domains to make the domains usable
@@ -461,9 +462,9 @@ static void mtk_register_power_domains(struct platform_device *pdev,
* software. The unused domains will be switched off during
* late_init time.
*/
- genpd->power_on(genpd);
+ on = !WARN_ON(genpd->power_on(genpd) < 0);
- pm_genpd_init(genpd, NULL, false);
+ pm_genpd_init(genpd, NULL, !on);
}
/*
diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
index 1c488024c698..47dffe7736ff 100644
--- a/drivers/soc/qcom/mdt_loader.c
+++ b/drivers/soc/qcom/mdt_loader.c
@@ -168,6 +168,14 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
break;
}
+ if (phdr->p_filesz > phdr->p_memsz) {
+ dev_err(dev,
+ "refusing to load segment %d with p_filesz > p_memsz\n",
+ i);
+ ret = -EINVAL;
+ break;
+ }
+
ptr = mem_region + offset;
if (phdr->p_filesz) {
@@ -179,6 +187,15 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
break;
}
+ if (seg_fw->size != phdr->p_filesz) {
+ dev_err(dev,
+ "failed to load segment %d from truncated file %s\n",
+ i, fw_name);
+ release_firmware(seg_fw);
+ ret = -EINVAL;
+ break;
+ }
+
release_firmware(seg_fw);
}
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
index ee89ffb6dde8..7369b061929b 100644
--- a/drivers/soc/qcom/qcom-geni-se.c
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -275,6 +275,7 @@ static void geni_se_select_fifo_mode(struct geni_se *se)
static void geni_se_select_dma_mode(struct geni_se *se)
{
+ u32 proto = geni_se_read_proto(se);
u32 val;
writel_relaxed(0, se->base + SE_GSI_EVENT_EN);
@@ -284,6 +285,18 @@ static void geni_se_select_dma_mode(struct geni_se *se)
writel_relaxed(0xffffffff, se->base + SE_DMA_RX_IRQ_CLR);
writel_relaxed(0xffffffff, se->base + SE_IRQ_EN);
+ val = readl_relaxed(se->base + SE_GENI_M_IRQ_EN);
+ if (proto != GENI_SE_UART) {
+ val &= ~(M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN);
+ val &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
+ }
+ writel_relaxed(val, se->base + SE_GENI_M_IRQ_EN);
+
+ val = readl_relaxed(se->base + SE_GENI_S_IRQ_EN);
+ if (proto != GENI_SE_UART)
+ val &= ~S_CMD_DONE_EN;
+ writel_relaxed(val, se->base + SE_GENI_S_IRQ_EN);
+
val = readl_relaxed(se->base + SE_GENI_DMA_MODE_EN);
val |= GENI_DMA_MODE_EN;
writel_relaxed(val, se->base + SE_GENI_DMA_MODE_EN);
@@ -633,7 +646,7 @@ int geni_se_tx_dma_prep(struct geni_se *se, void *buf, size_t len,
writel_relaxed(lower_32_bits(*iova), se->base + SE_DMA_TX_PTR_L);
writel_relaxed(upper_32_bits(*iova), se->base + SE_DMA_TX_PTR_H);
writel_relaxed(GENI_SE_DMA_EOT_BUF, se->base + SE_DMA_TX_ATTR);
- writel_relaxed(len, se->base + SE_DMA_TX_LEN);
+ writel(len, se->base + SE_DMA_TX_LEN);
return 0;
}
EXPORT_SYMBOL(geni_se_tx_dma_prep);
@@ -667,7 +680,7 @@ int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len,
writel_relaxed(upper_32_bits(*iova), se->base + SE_DMA_RX_PTR_H);
/* RX does not have EOT buffer type bit. So just reset RX_ATTR */
writel_relaxed(0, se->base + SE_DMA_RX_ATTR);
- writel_relaxed(len, se->base + SE_DMA_RX_LEN);
+ writel(len, se->base + SE_DMA_RX_LEN);
return 0;
}
EXPORT_SYMBOL(geni_se_rx_dma_prep);
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index 75bd9a83aef0..519d19f57eee 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -148,7 +148,7 @@ int rpmh_rsc_invalidate(struct rsc_drv *drv)
static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv,
const struct tcs_request *msg)
{
- int type, ret;
+ int type;
struct tcs_group *tcs;
switch (msg->state) {
@@ -169,19 +169,10 @@ static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv,
* If we are making an active request on a RSC that does not have a
* dedicated TCS for active state use, then re-purpose a wake TCS to
* send active votes.
- * NOTE: The driver must be aware that this RSC does not have a
- * dedicated AMC, and therefore would invalidate the sleep and wake
- * TCSes before making an active state request.
*/
tcs = get_tcs_of_type(drv, type);
- if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs) {
+ if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs)
tcs = get_tcs_of_type(drv, WAKE_TCS);
- if (tcs->num_tcs) {
- ret = rpmh_rsc_invalidate(drv);
- if (ret)
- return ERR_PTR(ret);
- }
- }
return tcs;
}
@@ -201,6 +192,42 @@ static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv,
return NULL;
}
+static void __tcs_set_trigger(struct rsc_drv *drv, int tcs_id, bool trigger)
+{
+ u32 enable;
+
+ /*
+ * HW req: Clear the DRV_CONTROL and enable TCS again
+ * While clearing ensure that the AMC mode trigger is cleared
+ * and then the mode enable is cleared.
+ */
+ enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id, 0);
+ enable &= ~TCS_AMC_MODE_TRIGGER;
+ write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+ enable &= ~TCS_AMC_MODE_ENABLE;
+ write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+
+ if (trigger) {
+ /* Enable the AMC mode on the TCS and then trigger the TCS */
+ enable = TCS_AMC_MODE_ENABLE;
+ write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+ enable |= TCS_AMC_MODE_TRIGGER;
+ write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+ }
+}
+
+static void enable_tcs_irq(struct rsc_drv *drv, int tcs_id, bool enable)
+{
+ u32 data;
+
+ data = read_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, 0);
+ if (enable)
+ data |= BIT(tcs_id);
+ else
+ data &= ~BIT(tcs_id);
+ write_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, data);
+}
+
/**
* tcs_tx_done: TX Done interrupt handler
*/
@@ -237,6 +264,14 @@ static irqreturn_t tcs_tx_done(int irq, void *p)
}
trace_rpmh_tx_done(drv, i, req, err);
+
+ /*
+ * If wake tcs was re-purposed for sending active
+ * votes, clear AMC trigger & enable modes and
+ * disable interrupt for this TCS
+ */
+ if (!drv->tcs[ACTIVE_TCS].num_tcs)
+ __tcs_set_trigger(drv, i, false);
skip:
/* Reclaim the TCS */
write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i, 0);
@@ -244,6 +279,13 @@ skip:
write_tcs_reg(drv, RSC_DRV_IRQ_CLEAR, 0, BIT(i));
spin_lock(&drv->lock);
clear_bit(i, drv->tcs_in_use);
+ /*
+ * Disable interrupt for WAKE TCS to avoid being
+ * spammed with interrupts coming when the solver
+ * sends its wake votes.
+ */
+ if (!drv->tcs[ACTIVE_TCS].num_tcs)
+ enable_tcs_irq(drv, i, false);
spin_unlock(&drv->lock);
if (req)
rpmh_tx_done(req, err);
@@ -285,28 +327,6 @@ static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, cmd_enable);
}
-static void __tcs_trigger(struct rsc_drv *drv, int tcs_id)
-{
- u32 enable;
-
- /*
- * HW req: Clear the DRV_CONTROL and enable TCS again
- * While clearing ensure that the AMC mode trigger is cleared
- * and then the mode enable is cleared.
- */
- enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id, 0);
- enable &= ~TCS_AMC_MODE_TRIGGER;
- write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
- enable &= ~TCS_AMC_MODE_ENABLE;
- write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
-
- /* Enable the AMC mode on the TCS and then trigger the TCS */
- enable = TCS_AMC_MODE_ENABLE;
- write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
- enable |= TCS_AMC_MODE_TRIGGER;
- write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
-}
-
static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
const struct tcs_request *msg)
{
@@ -377,10 +397,20 @@ static int tcs_write(struct rsc_drv *drv, const struct tcs_request *msg)
tcs->req[tcs_id - tcs->offset] = msg;
set_bit(tcs_id, drv->tcs_in_use);
+ if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) {
+ /*
+ * Clear previously programmed WAKE commands in selected
+ * repurposed TCS to avoid triggering them. tcs->slots will be
+ * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate()
+ */
+ write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
+ write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
+ enable_tcs_irq(drv, tcs_id, true);
+ }
spin_unlock(&drv->lock);
__tcs_buffer_write(drv, tcs_id, 0, msg);
- __tcs_trigger(drv, tcs_id);
+ __tcs_set_trigger(drv, tcs_id, true);
done_write:
spin_unlock_irqrestore(&tcs->lock, flags);
@@ -685,6 +715,7 @@ static struct platform_driver rpmh_driver = {
.driver = {
.name = "rpmh",
.of_match_table = rpmh_drv_match,
+ .suppress_bind_attrs = true,
},
};
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index ab8f731a3426..8e19a8bdfd2c 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -119,6 +119,7 @@ static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
{
struct cache_req *req;
unsigned long flags;
+ u32 old_sleep_val, old_wake_val;
spin_lock_irqsave(&ctrlr->cache_lock, flags);
req = __find_req(ctrlr, cmd->addr);
@@ -133,26 +134,27 @@ static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
req->addr = cmd->addr;
req->sleep_val = req->wake_val = UINT_MAX;
- INIT_LIST_HEAD(&req->list);
list_add_tail(&req->list, &ctrlr->cache);
existing:
+ old_sleep_val = req->sleep_val;
+ old_wake_val = req->wake_val;
+
switch (state) {
case RPMH_ACTIVE_ONLY_STATE:
- if (req->sleep_val != UINT_MAX)
- req->wake_val = cmd->data;
- break;
case RPMH_WAKE_ONLY_STATE:
req->wake_val = cmd->data;
break;
case RPMH_SLEEP_STATE:
req->sleep_val = cmd->data;
break;
- default:
- break;
}
- ctrlr->dirty = true;
+ ctrlr->dirty |= (req->sleep_val != old_sleep_val ||
+ req->wake_val != old_wake_val) &&
+ req->sleep_val != UINT_MAX &&
+ req->wake_val != UINT_MAX;
+
unlock:
spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
@@ -288,6 +290,7 @@ static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
spin_lock_irqsave(&ctrlr->cache_lock, flags);
list_add_tail(&req->list, &ctrlr->batch_cache);
+ ctrlr->dirty = true;
spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
}
@@ -315,18 +318,6 @@ static int flush_batch(struct rpmh_ctrlr *ctrlr)
return ret;
}
-static void invalidate_batch(struct rpmh_ctrlr *ctrlr)
-{
- struct batch_cache_req *req, *tmp;
- unsigned long flags;
-
- spin_lock_irqsave(&ctrlr->cache_lock, flags);
- list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
- kfree(req);
- INIT_LIST_HEAD(&ctrlr->batch_cache);
- spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
-}
-
/**
* rpmh_write_batch: Write multiple sets of RPMH commands and wait for the
* batch to finish.
@@ -466,6 +457,13 @@ int rpmh_flush(const struct device *dev)
return 0;
}
+ /* Invalidate the TCSes first to avoid stale data */
+ do {
+ ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
+ } while (ret == -EAGAIN);
+ if (ret)
+ return ret;
+
/* First flush the cached batch requests */
ret = flush_batch(ctrlr);
if (ret)
@@ -497,25 +495,25 @@ int rpmh_flush(const struct device *dev)
EXPORT_SYMBOL(rpmh_flush);
/**
- * rpmh_invalidate: Invalidate all sleep and active sets
- * sets.
+ * rpmh_invalidate: Invalidate sleep and wake sets in batch_cache
*
* @dev: The device making the request
*
- * Invalidate the sleep and active values in the TCS blocks.
+ * Invalidate the sleep and wake values in batch_cache.
*/
int rpmh_invalidate(const struct device *dev)
{
struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
- int ret;
+ struct batch_cache_req *req, *tmp;
+ unsigned long flags;
- invalidate_batch(ctrlr);
+ spin_lock_irqsave(&ctrlr->cache_lock, flags);
+ list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
+ kfree(req);
+ INIT_LIST_HEAD(&ctrlr->batch_cache);
ctrlr->dirty = true;
+ spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
- do {
- ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
- } while (ret == -EAGAIN);
-
- return ret;
+ return 0;
}
EXPORT_SYMBOL(rpmh_invalidate);
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index c22503cd1edf..7908e7f2850f 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -326,15 +326,16 @@ static int qcom_smp2p_inbound_entry(struct qcom_smp2p *smp2p,
static int smp2p_update_bits(void *data, u32 mask, u32 value)
{
struct smp2p_entry *entry = data;
+ unsigned long flags;
u32 orig;
u32 val;
- spin_lock(&entry->lock);
+ spin_lock_irqsave(&entry->lock, flags);
val = orig = readl(entry->value);
val &= ~mask;
val |= value;
writel(val, entry->value);
- spin_unlock(&entry->lock);
+ spin_unlock_irqrestore(&entry->lock, flags);
if (val != orig)
qcom_smp2p_kick(entry->smp2p);
diff --git a/drivers/soc/tegra/fuse/speedo-tegra210.c b/drivers/soc/tegra/fuse/speedo-tegra210.c
index 5373f4c16b54..4403b89561fd 100644
--- a/drivers/soc/tegra/fuse/speedo-tegra210.c
+++ b/drivers/soc/tegra/fuse/speedo-tegra210.c
@@ -105,7 +105,7 @@ static int get_process_id(int value, const u32 *speedos, unsigned int num)
unsigned int i;
for (i = 0; i < num; i++)
- if (value < speedos[num])
+ if (value < speedos[i])
return i;
return -EINVAL;
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
index 224d7ddeeb76..eb2e87229c1d 100644
--- a/drivers/soc/ti/knav_dma.c
+++ b/drivers/soc/ti/knav_dma.c
@@ -759,8 +759,9 @@ static int knav_dma_probe(struct platform_device *pdev)
pm_runtime_enable(kdev->dev);
ret = pm_runtime_get_sync(kdev->dev);
if (ret < 0) {
+ pm_runtime_put_noidle(kdev->dev);
dev_err(kdev->dev, "unable to enable pktdma, err %d\n", ret);
- return ret;
+ goto err_pm_disable;
}
/* Initialise all packet dmas */
@@ -774,7 +775,8 @@ static int knav_dma_probe(struct platform_device *pdev)
if (list_empty(&kdev->list)) {
dev_err(dev, "no valid dma instance\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_put_sync;
}
debugfs_create_file("knav_dma", S_IFREG | S_IRUGO, NULL, NULL,
@@ -782,6 +784,13 @@ static int knav_dma_probe(struct platform_device *pdev)
device_ready = true;
return ret;
+
+err_put_sync:
+ pm_runtime_put_sync(kdev->dev);
+err_pm_disable:
+ pm_runtime_disable(kdev->dev);
+
+ return ret;
}
static int knav_dma_remove(struct platform_device *pdev)
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index ef36acc0e708..9f5ce52e6c16 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -1799,6 +1799,7 @@ static int knav_queue_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
dev_err(dev, "Failed to enable QMSS\n");
return ret;
}
@@ -1866,9 +1867,10 @@ static int knav_queue_probe(struct platform_device *pdev)
if (ret)
goto err;
- regions = of_get_child_by_name(node, "descriptor-regions");
+ regions = of_get_child_by_name(node, "descriptor-regions");
if (!regions) {
dev_err(dev, "descriptor-regions not specified\n");
+ ret = -ENODEV;
goto err;
}
ret = knav_queue_setup_regions(kdev, regions);
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index df172bf3925f..0089b606b70d 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -514,7 +514,7 @@ static int sdw_program_device_num(struct sdw_bus *bus)
struct sdw_slave *slave, *_s;
struct sdw_slave_id id;
struct sdw_msg msg;
- bool found = false;
+ bool found;
int count = 0, ret;
u64 addr;
@@ -545,6 +545,7 @@ static int sdw_program_device_num(struct sdw_bus *bus)
sdw_extract_slave_id(bus, addr, &id);
+ found = false;
/* Now compare with entries */
list_for_each_entry_safe(slave, _s, &bus->slaves, node) {
if (sdw_compare_devid(slave, id) == 0) {
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index 907a548645b7..42bc701e2304 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -1182,8 +1182,16 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
}
ret = sdw_config_stream(&slave->dev, stream, stream_config, true);
- if (ret)
+ if (ret) {
+ /*
+ * sdw_release_master_stream will release s_rt in slave_rt_list in
+ * stream_error case, but s_rt is only added to slave_rt_list
+ * when sdw_config_stream is successful, so free s_rt explicitly
+ * when sdw_config_stream is failed.
+ */
+ kfree(s_rt);
goto stream_error;
+ }
list_add_tail(&s_rt->m_rt_node, &m_rt->slave_rt_list);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 671d078349cc..0a7fd56c1ed9 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -817,4 +817,7 @@ config SPI_SLAVE_SYSTEM_CONTROL
endif # SPI_SLAVE
+config SPI_DYNAMIC
+ def_bool ACPI || OF_DYNAMIC || SPI_SLAVE
+
endif # SPI
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 5a9d7e252a77..2254e36c7f46 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1605,7 +1605,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
if (ret == 0) {
as->use_dma = true;
} else if (ret == -EPROBE_DEFER) {
- return ret;
+ goto out_unmap_regs;
}
} else if (as->caps.has_pdc_support) {
as->use_pdc = true;
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 285a6f463013..4ee92f7ca20b 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -681,7 +681,7 @@ static void read_from_hw(struct bcm_qspi *qspi, int slots)
if (buf)
buf[tp.byte] = read_rxram_slot_u8(qspi, slot);
dev_dbg(&qspi->pdev->dev, "RD %02x\n",
- buf ? buf[tp.byte] : 0xff);
+ buf ? buf[tp.byte] : 0x0);
} else {
u16 *buf = tp.trans->rx_buf;
@@ -689,7 +689,7 @@ static void read_from_hw(struct bcm_qspi *qspi, int slots)
buf[tp.byte / 2] = read_rxram_slot_u16(qspi,
slot);
dev_dbg(&qspi->pdev->dev, "RD %04x\n",
- buf ? buf[tp.byte] : 0xffff);
+ buf ? buf[tp.byte / 2] : 0x0);
}
update_qspi_trans_byte_count(qspi, &tp,
@@ -744,13 +744,13 @@ static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi)
while (!tstatus && slot < MSPI_NUM_CDRAM) {
if (tp.trans->bits_per_word <= 8) {
const u8 *buf = tp.trans->tx_buf;
- u8 val = buf ? buf[tp.byte] : 0xff;
+ u8 val = buf ? buf[tp.byte] : 0x00;
write_txram_slot_u8(qspi, slot, val);
dev_dbg(&qspi->pdev->dev, "WR %02x\n", val);
} else {
const u16 *buf = tp.trans->tx_buf;
- u16 val = buf ? buf[tp.byte / 2] : 0xffff;
+ u16 val = buf ? buf[tp.byte / 2] : 0x0000;
write_txram_slot_u16(qspi, slot, val);
dev_dbg(&qspi->pdev->dev, "WR %04x\n", val);
@@ -1223,7 +1223,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
if (!of_match_node(bcm_qspi_of_match, dev->of_node))
return -ENODEV;
- master = spi_alloc_master(dev, sizeof(struct bcm_qspi));
+ master = devm_spi_alloc_master(dev, sizeof(struct bcm_qspi));
if (!master) {
dev_err(dev, "error allocating spi_master\n");
return -ENOMEM;
@@ -1257,21 +1257,17 @@ int bcm_qspi_probe(struct platform_device *pdev,
if (res) {
qspi->base[MSPI] = devm_ioremap_resource(dev, res);
- if (IS_ERR(qspi->base[MSPI])) {
- ret = PTR_ERR(qspi->base[MSPI]);
- goto qspi_resource_err;
- }
+ if (IS_ERR(qspi->base[MSPI]))
+ return PTR_ERR(qspi->base[MSPI]);
} else {
- goto qspi_resource_err;
+ return 0;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
if (res) {
qspi->base[BSPI] = devm_ioremap_resource(dev, res);
- if (IS_ERR(qspi->base[BSPI])) {
- ret = PTR_ERR(qspi->base[BSPI]);
- goto qspi_resource_err;
- }
+ if (IS_ERR(qspi->base[BSPI]))
+ return PTR_ERR(qspi->base[BSPI]);
qspi->bspi_mode = true;
} else {
qspi->bspi_mode = false;
@@ -1282,18 +1278,14 @@ int bcm_qspi_probe(struct platform_device *pdev,
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs_reg");
if (res) {
qspi->base[CHIP_SELECT] = devm_ioremap_resource(dev, res);
- if (IS_ERR(qspi->base[CHIP_SELECT])) {
- ret = PTR_ERR(qspi->base[CHIP_SELECT]);
- goto qspi_resource_err;
- }
+ if (IS_ERR(qspi->base[CHIP_SELECT]))
+ return PTR_ERR(qspi->base[CHIP_SELECT]);
}
qspi->dev_ids = kcalloc(num_irqs, sizeof(struct bcm_qspi_dev_id),
GFP_KERNEL);
- if (!qspi->dev_ids) {
- ret = -ENOMEM;
- goto qspi_resource_err;
- }
+ if (!qspi->dev_ids)
+ return -ENOMEM;
for (val = 0; val < num_irqs; val++) {
irq = -1;
@@ -1369,7 +1361,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
qspi->xfer_mode.addrlen = -1;
qspi->xfer_mode.hp = -1;
- ret = devm_spi_register_master(&pdev->dev, master);
+ ret = spi_register_master(master);
if (ret < 0) {
dev_err(dev, "can't register master\n");
goto qspi_reg_err;
@@ -1382,8 +1374,6 @@ qspi_reg_err:
clk_disable_unprepare(qspi->clk);
qspi_probe_err:
kfree(qspi->dev_ids);
-qspi_resource_err:
- spi_master_put(master);
return ret;
}
/* probe function to be called by SoC specific platform driver probe */
@@ -1393,10 +1383,10 @@ int bcm_qspi_remove(struct platform_device *pdev)
{
struct bcm_qspi *qspi = platform_get_drvdata(pdev);
+ spi_unregister_master(qspi->master);
bcm_qspi_hw_uninit(qspi);
clk_disable_unprepare(qspi->clk);
kfree(qspi->dev_ids);
- spi_unregister_master(qspi->master);
return 0;
}
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index eab27d41ba83..6824beae18e4 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -737,7 +737,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
struct resource *res;
int err;
- master = spi_alloc_master(&pdev->dev, sizeof(*bs));
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(*bs));
if (!master) {
dev_err(&pdev->dev, "spi_alloc_master() failed\n");
return -ENOMEM;
@@ -759,23 +759,20 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
bs->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(bs->regs)) {
- err = PTR_ERR(bs->regs);
- goto out_master_put;
- }
+ if (IS_ERR(bs->regs))
+ return PTR_ERR(bs->regs);
bs->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(bs->clk)) {
err = PTR_ERR(bs->clk);
dev_err(&pdev->dev, "could not get clk: %d\n", err);
- goto out_master_put;
+ return err;
}
bs->irq = platform_get_irq(pdev, 0);
if (bs->irq <= 0) {
dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq);
- err = bs->irq ? bs->irq : -ENODEV;
- goto out_master_put;
+ return bs->irq ? bs->irq : -ENODEV;
}
clk_prepare_enable(bs->clk);
@@ -790,21 +787,20 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
dev_name(&pdev->dev), master);
if (err) {
dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
- goto out_clk_disable;
+ goto out_dma_release;
}
- err = devm_spi_register_master(&pdev->dev, master);
+ err = spi_register_master(master);
if (err) {
dev_err(&pdev->dev, "could not register SPI master: %d\n", err);
- goto out_clk_disable;
+ goto out_dma_release;
}
return 0;
-out_clk_disable:
+out_dma_release:
+ bcm2835_dma_release(master);
clk_disable_unprepare(bs->clk);
-out_master_put:
- spi_master_put(master);
return err;
}
@@ -813,6 +809,8 @@ static int bcm2835_spi_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ spi_unregister_master(master);
+
/* Clear FIFOs, and disable the HW block */
bcm2835_wr(bs, BCM2835_SPI_CS,
BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index c63ed402cf86..8ea7e31b8c2f 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -407,7 +407,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
unsigned long clk_hz;
int err;
- master = spi_alloc_master(&pdev->dev, sizeof(*bs));
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(*bs));
if (!master) {
dev_err(&pdev->dev, "spi_alloc_master() failed\n");
return -ENOMEM;
@@ -439,30 +439,27 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
/* the main area */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
bs->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(bs->regs)) {
- err = PTR_ERR(bs->regs);
- goto out_master_put;
- }
+ if (IS_ERR(bs->regs))
+ return PTR_ERR(bs->regs);
bs->clk = devm_clk_get(&pdev->dev, NULL);
if ((!bs->clk) || (IS_ERR(bs->clk))) {
err = PTR_ERR(bs->clk);
dev_err(&pdev->dev, "could not get clk: %d\n", err);
- goto out_master_put;
+ return err;
}
bs->irq = platform_get_irq(pdev, 0);
if (bs->irq <= 0) {
dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq);
- err = bs->irq ? bs->irq : -ENODEV;
- goto out_master_put;
+ return bs->irq ? bs->irq : -ENODEV;
}
/* this also enables the HW block */
err = clk_prepare_enable(bs->clk);
if (err) {
dev_err(&pdev->dev, "could not prepare clock: %d\n", err);
- goto out_master_put;
+ return err;
}
/* just checking if the clock returns a sane value */
@@ -485,7 +482,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
goto out_clk_disable;
}
- err = devm_spi_register_master(&pdev->dev, master);
+ err = spi_register_master(master);
if (err) {
dev_err(&pdev->dev, "could not register SPI master: %d\n", err);
goto out_clk_disable;
@@ -495,8 +492,6 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
out_clk_disable:
clk_disable_unprepare(bs->clk);
-out_master_put:
- spi_master_put(master);
return err;
}
@@ -505,6 +500,8 @@ static int bcm2835aux_spi_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+ spi_unregister_master(master);
+
bcm2835aux_spi_reset_hw(bs);
/* disable the HW block by releasing the clock */
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
index 1669c554ea34..2ad7b3f3666b 100644
--- a/drivers/spi/spi-bcm63xx-hsspi.c
+++ b/drivers/spi/spi-bcm63xx-hsspi.c
@@ -487,8 +487,10 @@ static int bcm63xx_hsspi_resume(struct device *dev)
if (bs->pll_clk) {
ret = clk_prepare_enable(bs->pll_clk);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(bs->clk);
return ret;
+ }
}
spi_master_resume(master);
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index 94cc0a152449..f5055ceb7529 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -119,6 +119,7 @@ struct cdns_spi {
void __iomem *regs;
struct clk *ref_clk;
struct clk *pclk;
+ unsigned int clk_rate;
u32 speed_hz;
const u8 *txbuf;
u8 *rxbuf;
@@ -258,7 +259,7 @@ static void cdns_spi_config_clock_freq(struct spi_device *spi,
u32 ctrl_reg, baud_rate_val;
unsigned long frequency;
- frequency = clk_get_rate(xspi->ref_clk);
+ frequency = xspi->clk_rate;
ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
@@ -628,8 +629,9 @@ static int cdns_spi_probe(struct platform_device *pdev)
master->auto_runtime_pm = true;
master->mode_bits = SPI_CPOL | SPI_CPHA;
+ xspi->clk_rate = clk_get_rate(xspi->ref_clk);
/* Set to default valid value */
- master->max_speed_hz = clk_get_rate(xspi->ref_clk) / 4;
+ master->max_speed_hz = xspi->clk_rate / 4;
xspi->speed_hz = master->max_speed_hz;
master->bits_per_word_mask = SPI_BPW_MASK(8);
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index a02099c90c5c..b56038945f41 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -1086,13 +1086,13 @@ static int davinci_spi_remove(struct platform_device *pdev)
spi_bitbang_stop(&dspi->bitbang);
clk_disable_unprepare(dspi->clk);
- spi_master_put(master);
if (dspi->dma_rx) {
dma_release_channel(dspi->dma_rx);
dma_release_channel(dspi->dma_tx);
}
+ spi_master_put(master);
return 0;
}
diff --git a/drivers/spi/spi-dln2.c b/drivers/spi/spi-dln2.c
index b62a99caacc0..a41adea48618 100644
--- a/drivers/spi/spi-dln2.c
+++ b/drivers/spi/spi-dln2.c
@@ -783,7 +783,7 @@ exit_free_master:
static int dln2_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
+ struct spi_master *master = platform_get_drvdata(pdev);
struct dln2_spi *dln2 = spi_master_get_devdata(master);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index 3db905f5f345..10f328558d55 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -155,6 +155,7 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws,
if (!xfer->tx_buf)
return NULL;
+ memset(&txconf, 0, sizeof(txconf));
txconf.direction = DMA_MEM_TO_DEV;
txconf.dst_addr = dws->dma_addr;
txconf.dst_maxburst = 16;
@@ -201,6 +202,7 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws,
if (!xfer->rx_buf)
return NULL;
+ memset(&rxconf, 0, sizeof(rxconf));
rxconf.direction = DMA_DEV_TO_MEM;
rxconf.src_addr = dws->dma_addr;
rxconf.src_maxburst = 16;
@@ -226,19 +228,23 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws,
static int mid_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer)
{
- u16 dma_ctrl = 0;
+ u16 imr = 0, dma_ctrl = 0;
dw_writel(dws, DW_SPI_DMARDLR, 0xf);
dw_writel(dws, DW_SPI_DMATDLR, 0x10);
- if (xfer->tx_buf)
+ if (xfer->tx_buf) {
dma_ctrl |= SPI_DMA_TDMAE;
- if (xfer->rx_buf)
+ imr |= SPI_INT_TXOI;
+ }
+ if (xfer->rx_buf) {
dma_ctrl |= SPI_DMA_RDMAE;
+ imr |= SPI_INT_RXUI | SPI_INT_RXOI;
+ }
dw_writel(dws, DW_SPI_DMACR, dma_ctrl);
/* Set the interrupt mask */
- spi_umask_intr(dws, SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI);
+ spi_umask_intr(dws, imr);
dws->transfer_handler = dma_transfer;
@@ -268,7 +274,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
dma_async_issue_pending(dws->txchan);
}
- return 0;
+ return 1;
}
static void mid_spi_dma_stop(struct dw_spi *dws)
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 5a47e28e38c1..b1c137261d0f 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -304,6 +304,9 @@ static int dw_spi_transfer_one(struct spi_controller *master,
dws->len = transfer->len;
spin_unlock_irqrestore(&dws->buf_lock, flags);
+ /* Ensure dw->rx and dw->rx_end are visible */
+ smp_mb();
+
spi_enable_chip(dws, 0);
/* Handle per transfer options for bpw and speed */
@@ -380,11 +383,8 @@ static int dw_spi_transfer_one(struct spi_controller *master,
spi_enable_chip(dws, 1);
- if (dws->dma_mapped) {
- ret = dws->dma_ops->dma_transfer(dws, transfer);
- if (ret < 0)
- return ret;
- }
+ if (dws->dma_mapped)
+ return dws->dma_ops->dma_transfer(dws, transfer);
if (chip->poll_mode)
return poll_transfer(dws);
@@ -530,10 +530,11 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
dws->dma_inited = 0;
} else {
master->can_dma = dws->dma_ops->can_dma;
+ master->flags |= SPI_CONTROLLER_MUST_TX;
}
}
- ret = devm_spi_register_controller(dev, master);
+ ret = spi_register_controller(master);
if (ret) {
dev_err(&master->dev, "problem registering spi master\n");
goto err_dma_exit;
@@ -557,6 +558,8 @@ void dw_spi_remove_host(struct dw_spi *dws)
{
dw_spi_debugfs_remove(dws);
+ spi_unregister_controller(dws->master);
+
if (dws->dma_ops && dws->dma_ops->dma_exit)
dws->dma_ops->dma_exit(dws);
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 3082e72e4f6c..1b003dba86f9 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright 2013 Freescale Semiconductor, Inc.
+// Copyright 2020 NXP
//
// Freescale DSPI driver
// This file contains a driver for the Freescale DSPI
@@ -43,6 +44,9 @@
#define SPI_MCR_CLR_TXF (1 << 11)
#define SPI_MCR_CLR_RXF (1 << 10)
#define SPI_MCR_XSPI (1 << 3)
+#define SPI_MCR_DIS_TXF (1 << 13)
+#define SPI_MCR_DIS_RXF (1 << 12)
+#define SPI_MCR_HALT (1 << 0)
#define SPI_TCR 0x08
#define SPI_TCR_GET_TCNT(x) (((x) & 0xffff0000) >> 16)
@@ -67,7 +71,7 @@
#define SPI_SR 0x2c
#define SPI_SR_EOQF 0x10000000
#define SPI_SR_TCFQF 0x80000000
-#define SPI_SR_CLEAR 0xdaad0000
+#define SPI_SR_CLEAR 0x9aaf0000
#define SPI_RSER_TFFFE BIT(25)
#define SPI_RSER_TFFFD BIT(24)
@@ -874,9 +878,11 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
trans_mode);
}
}
+
+ return IRQ_HANDLED;
}
- return IRQ_HANDLED;
+ return IRQ_NONE;
}
static const struct of_device_id fsl_dspi_dt_ids[] = {
@@ -893,6 +899,8 @@ static int dspi_suspend(struct device *dev)
struct spi_master *master = dev_get_drvdata(dev);
struct fsl_dspi *dspi = spi_master_get_devdata(master);
+ if (dspi->irq)
+ disable_irq(dspi->irq);
spi_master_suspend(master);
clk_disable_unprepare(dspi->clk);
@@ -913,6 +921,8 @@ static int dspi_resume(struct device *dev)
if (ret)
return ret;
spi_master_resume(master);
+ if (dspi->irq)
+ enable_irq(dspi->irq);
return 0;
}
@@ -1090,8 +1100,8 @@ static int dspi_probe(struct platform_device *pdev)
goto out_clk_put;
}
- ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 0,
- pdev->name, dspi);
+ ret = request_threaded_irq(dspi->irq, dspi_interrupt, NULL,
+ IRQF_SHARED, pdev->name, dspi);
if (ret < 0) {
dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
goto out_clk_put;
@@ -1101,7 +1111,7 @@ static int dspi_probe(struct platform_device *pdev)
ret = dspi_request_dma(dspi, res->start);
if (ret < 0) {
dev_err(&pdev->dev, "can't get dma channels\n");
- goto out_clk_put;
+ goto out_free_irq;
}
}
@@ -1114,11 +1124,14 @@ static int dspi_probe(struct platform_device *pdev)
ret = spi_register_master(master);
if (ret != 0) {
dev_err(&pdev->dev, "Problem registering DSPI master\n");
- goto out_clk_put;
+ goto out_free_irq;
}
return ret;
+out_free_irq:
+ if (dspi->irq)
+ free_irq(dspi->irq, dspi);
out_clk_put:
clk_disable_unprepare(dspi->clk);
out_master_put:
@@ -1133,13 +1146,29 @@ static int dspi_remove(struct platform_device *pdev)
struct fsl_dspi *dspi = spi_master_get_devdata(master);
/* Disconnect from the SPI framework */
+ spi_unregister_controller(dspi->master);
+
+ /* Disable RX and TX */
+ regmap_update_bits(dspi->regmap, SPI_MCR,
+ SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF,
+ SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF);
+
+ /* Stop Running */
+ regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, SPI_MCR_HALT);
+
dspi_release_dma(dspi);
+ if (dspi->irq)
+ free_irq(dspi->irq, dspi);
clk_disable_unprepare(dspi->clk);
- spi_unregister_master(dspi->master);
return 0;
}
+static void dspi_shutdown(struct platform_device *pdev)
+{
+ dspi_remove(pdev);
+}
+
static struct platform_driver fsl_dspi_driver = {
.driver.name = DRIVER_NAME,
.driver.of_match_table = fsl_dspi_dt_ids,
@@ -1147,6 +1176,7 @@ static struct platform_driver fsl_dspi_driver = {
.driver.pm = &dspi_pm,
.probe = dspi_probe,
.remove = dspi_remove,
+ .shutdown = dspi_shutdown,
};
module_platform_driver(fsl_dspi_driver);
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 1e8ff6256079..b8dd75b8518b 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -559,13 +559,14 @@ static void fsl_espi_cpu_irq(struct fsl_espi *espi, u32 events)
static irqreturn_t fsl_espi_irq(s32 irq, void *context_data)
{
struct fsl_espi *espi = context_data;
- u32 events;
+ u32 events, mask;
spin_lock(&espi->lock);
/* Get interrupt events(tx/rx) */
events = fsl_espi_read_reg(espi, ESPI_SPIE);
- if (!events) {
+ mask = fsl_espi_read_reg(espi, ESPI_SPIM);
+ if (!(events & mask)) {
spin_unlock(&espi->lock);
return IRQ_NONE;
}
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 77838d8fd9bb..341d2953d7fc 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -382,7 +382,7 @@ static int spi_gpio_probe(struct platform_device *pdev)
return -ENODEV;
#endif
- master = spi_alloc_master(&pdev->dev, sizeof(*spi_gpio));
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(*spi_gpio));
if (!master)
return -ENOMEM;
@@ -438,11 +438,7 @@ static int spi_gpio_probe(struct platform_device *pdev)
}
spi_gpio->bitbang.setup_transfer = spi_bitbang_setup_transfer;
- status = spi_bitbang_start(&spi_gpio->bitbang);
- if (status)
- spi_master_put(master);
-
- return status;
+ return spi_bitbang_start(&spi_gpio->bitbang);
}
static int spi_gpio_remove(struct platform_device *pdev)
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index e4b31d6e6e33..25a545c985d4 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -774,8 +774,10 @@ static int img_spfi_resume(struct device *dev)
int ret;
ret = pm_runtime_get_sync(dev);
- if (ret)
+ if (ret) {
+ pm_runtime_put_noidle(dev);
return ret;
+ }
spfi_reset(spfi);
pm_runtime_put(dev);
diff --git a/drivers/spi/spi-lantiq-ssc.c b/drivers/spi/spi-lantiq-ssc.c
index d5976615d924..dc740b5f720b 100644
--- a/drivers/spi/spi-lantiq-ssc.c
+++ b/drivers/spi/spi-lantiq-ssc.c
@@ -187,6 +187,7 @@ struct lantiq_ssc_spi {
unsigned int tx_fifo_size;
unsigned int rx_fifo_size;
unsigned int base_cs;
+ unsigned int fdx_tx_level;
};
static u32 lantiq_ssc_readl(const struct lantiq_ssc_spi *spi, u32 reg)
@@ -484,6 +485,7 @@ static void tx_fifo_write(struct lantiq_ssc_spi *spi)
u32 data;
unsigned int tx_free = tx_fifo_free(spi);
+ spi->fdx_tx_level = 0;
while (spi->tx_todo && tx_free) {
switch (spi->bits_per_word) {
case 2 ... 8:
@@ -512,6 +514,7 @@ static void tx_fifo_write(struct lantiq_ssc_spi *spi)
lantiq_ssc_writel(spi, data, LTQ_SPI_TB);
tx_free--;
+ spi->fdx_tx_level++;
}
}
@@ -523,6 +526,13 @@ static void rx_fifo_read_full_duplex(struct lantiq_ssc_spi *spi)
u32 data;
unsigned int rx_fill = rx_fifo_level(spi);
+ /*
+ * Wait until all expected data to be shifted in.
+ * Otherwise, rx overrun may occur.
+ */
+ while (rx_fill != spi->fdx_tx_level)
+ rx_fill = rx_fifo_level(spi);
+
while (rx_fill) {
data = lantiq_ssc_readl(spi, LTQ_SPI_RB);
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index bed7403bb6b3..b9a7117b6dce 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -99,7 +99,7 @@ static struct spi_test spi_tests[] = {
{
.description = "tx/rx-transfer - crossing PAGE_SIZE",
.fill_option = FILL_COUNT_8,
- .iterate_len = { ITERATE_MAX_LEN },
+ .iterate_len = { ITERATE_LEN },
.iterate_tx_align = ITERATE_ALIGN,
.iterate_rx_align = ITERATE_ALIGN,
.transfer_count = 1,
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 0c2867deb36f..da28c52c9da1 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -41,7 +41,6 @@
#define SPI_CFG0_SCK_LOW_OFFSET 8
#define SPI_CFG0_CS_HOLD_OFFSET 16
#define SPI_CFG0_CS_SETUP_OFFSET 24
-#define SPI_ADJUST_CFG0_SCK_LOW_OFFSET 16
#define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0
#define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16
@@ -53,6 +52,8 @@
#define SPI_CFG1_CS_IDLE_MASK 0xff
#define SPI_CFG1_PACKET_LOOP_MASK 0xff00
#define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
+#define SPI_CFG2_SCK_HIGH_OFFSET 0
+#define SPI_CFG2_SCK_LOW_OFFSET 16
#define SPI_CMD_ACT BIT(0)
#define SPI_CMD_RESUME BIT(1)
@@ -259,7 +260,7 @@ static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
static void mtk_spi_prepare_transfer(struct spi_master *master,
struct spi_transfer *xfer)
{
- u32 spi_clk_hz, div, sck_time, cs_time, reg_val = 0;
+ u32 spi_clk_hz, div, sck_time, cs_time, reg_val;
struct mtk_spi *mdata = spi_master_get_devdata(master);
spi_clk_hz = clk_get_rate(mdata->spi_clk);
@@ -272,18 +273,18 @@ static void mtk_spi_prepare_transfer(struct spi_master *master,
cs_time = sck_time * 2;
if (mdata->dev_comp->enhance_timing) {
+ reg_val = (((sck_time - 1) & 0xffff)
+ << SPI_CFG2_SCK_HIGH_OFFSET);
reg_val |= (((sck_time - 1) & 0xffff)
- << SPI_CFG0_SCK_HIGH_OFFSET);
- reg_val |= (((sck_time - 1) & 0xffff)
- << SPI_ADJUST_CFG0_SCK_LOW_OFFSET);
+ << SPI_CFG2_SCK_LOW_OFFSET);
writel(reg_val, mdata->base + SPI_CFG2_REG);
- reg_val |= (((cs_time - 1) & 0xffff)
+ reg_val = (((cs_time - 1) & 0xffff)
<< SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
reg_val |= (((cs_time - 1) & 0xffff)
<< SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
writel(reg_val, mdata->base + SPI_CFG0_REG);
} else {
- reg_val |= (((sck_time - 1) & 0xff)
+ reg_val = (((sck_time - 1) & 0xff)
<< SPI_CFG0_SCK_HIGH_OFFSET);
reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET);
reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 6ac95a2a21ce..4a7375ecb65e 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -605,6 +605,7 @@ static int mxs_spi_probe(struct platform_device *pdev)
ret = pm_runtime_get_sync(ssp->dev);
if (ret < 0) {
+ pm_runtime_put_noidle(ssp->dev);
dev_err(ssp->dev, "runtime_get_sync failed\n");
goto out_pm_runtime_disable;
}
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
index 76a8425be227..1eccdc4a4581 100644
--- a/drivers/spi/spi-omap-100k.c
+++ b/drivers/spi/spi-omap-100k.c
@@ -435,7 +435,7 @@ err:
static int omap1_spi100k_remove(struct platform_device *pdev)
{
- struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
+ struct spi_master *master = platform_get_drvdata(pdev);
struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
pm_runtime_disable(&pdev->dev);
@@ -449,7 +449,7 @@ static int omap1_spi100k_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int omap1_spi100k_runtime_suspend(struct device *dev)
{
- struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
+ struct spi_master *master = dev_get_drvdata(dev);
struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
clk_disable_unprepare(spi100k->ick);
@@ -460,7 +460,7 @@ static int omap1_spi100k_runtime_suspend(struct device *dev)
static int omap1_spi100k_runtime_resume(struct device *dev)
{
- struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
+ struct spi_master *master = dev_get_drvdata(dev);
struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
int ret;
diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c
index 288002f6c613..661a40c653e9 100644
--- a/drivers/spi/spi-pic32.c
+++ b/drivers/spi/spi-pic32.c
@@ -839,6 +839,7 @@ static int pic32_spi_probe(struct platform_device *pdev)
return 0;
err_bailout:
+ pic32_spi_dma_unprep(pic32s);
clk_disable_unprepare(pic32s->clk);
err_master:
spi_master_put(master);
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 869f188b02eb..1736a48bbcce 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -21,7 +21,8 @@ enum {
PORT_BSW1,
PORT_BSW2,
PORT_CE4100,
- PORT_LPT,
+ PORT_LPT0,
+ PORT_LPT1,
};
struct pxa_spi_info {
@@ -55,8 +56,10 @@ static struct dw_dma_slave bsw1_rx_param = { .src_id = 7 };
static struct dw_dma_slave bsw2_tx_param = { .dst_id = 8 };
static struct dw_dma_slave bsw2_rx_param = { .src_id = 9 };
-static struct dw_dma_slave lpt_tx_param = { .dst_id = 0 };
-static struct dw_dma_slave lpt_rx_param = { .src_id = 1 };
+static struct dw_dma_slave lpt1_tx_param = { .dst_id = 0 };
+static struct dw_dma_slave lpt1_rx_param = { .src_id = 1 };
+static struct dw_dma_slave lpt0_tx_param = { .dst_id = 2 };
+static struct dw_dma_slave lpt0_rx_param = { .src_id = 3 };
static bool lpss_dma_filter(struct dma_chan *chan, void *param)
{
@@ -182,12 +185,19 @@ static struct pxa_spi_info spi_info_configs[] = {
.num_chipselect = 1,
.max_clk_rate = 50000000,
},
- [PORT_LPT] = {
+ [PORT_LPT0] = {
.type = LPSS_LPT_SSP,
.port_id = 0,
.setup = lpss_spi_setup,
- .tx_param = &lpt_tx_param,
- .rx_param = &lpt_rx_param,
+ .tx_param = &lpt0_tx_param,
+ .rx_param = &lpt0_rx_param,
+ },
+ [PORT_LPT1] = {
+ .type = LPSS_LPT_SSP,
+ .port_id = 1,
+ .setup = lpss_spi_setup,
+ .tx_param = &lpt1_tx_param,
+ .rx_param = &lpt1_rx_param,
},
};
@@ -281,8 +291,9 @@ static const struct pci_device_id pxa2xx_spi_pci_devices[] = {
{ PCI_VDEVICE(INTEL, 0x2290), PORT_BSW1 },
{ PCI_VDEVICE(INTEL, 0x22ac), PORT_BSW2 },
{ PCI_VDEVICE(INTEL, 0x2e6a), PORT_CE4100 },
- { PCI_VDEVICE(INTEL, 0x9ce6), PORT_LPT },
- { },
+ { PCI_VDEVICE(INTEL, 0x9ce5), PORT_LPT0 },
+ { PCI_VDEVICE(INTEL, 0x9ce6), PORT_LPT1 },
+ { }
};
MODULE_DEVICE_TABLE(pci, pxa2xx_spi_pci_devices);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index e4482823d8d7..a889505e978b 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -156,6 +156,7 @@ static const struct lpss_config lpss_platforms[] = {
.tx_threshold_hi = 48,
.cs_sel_shift = 8,
.cs_sel_mask = 3 << 8,
+ .cs_clk_stays_gated = true,
},
{ /* LPSS_CNL_SSP */
.offset = 0x200,
@@ -1571,7 +1572,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
return -ENODEV;
}
- master = spi_alloc_master(dev, sizeof(struct driver_data));
+ master = devm_spi_alloc_master(dev, sizeof(*drv_data));
if (!master) {
dev_err(&pdev->dev, "cannot alloc spi_master\n");
pxa_ssp_free(ssp);
@@ -1739,17 +1740,18 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
/* Register with the SPI framework */
platform_set_drvdata(pdev, drv_data);
- status = devm_spi_register_controller(&pdev->dev, master);
+ status = spi_register_controller(master);
if (status != 0) {
dev_err(&pdev->dev, "problem registering spi master\n");
- goto out_error_clock_enabled;
+ goto out_error_pm_runtime_enabled;
}
return status;
-out_error_clock_enabled:
- pm_runtime_put_noidle(&pdev->dev);
+out_error_pm_runtime_enabled:
pm_runtime_disable(&pdev->dev);
+
+out_error_clock_enabled:
clk_disable_unprepare(ssp->clk);
out_error_dma_irq_alloc:
@@ -1757,7 +1759,6 @@ out_error_dma_irq_alloc:
free_irq(ssp->irq, drv_data);
out_error_master_alloc:
- spi_controller_put(master);
pxa_ssp_free(ssp);
return status;
}
@@ -1773,6 +1774,8 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
pm_runtime_get_sync(&pdev->dev);
+ spi_unregister_controller(drv_data->master);
+
/* Disable the SSP at the peripheral and SOC level */
pxa2xx_spi_write(drv_data, SSCR0, 0);
clk_disable_unprepare(ssp->clk);
diff --git a/drivers/spi/spi-rb4xx.c b/drivers/spi/spi-rb4xx.c
index 3641d0e20135..1d7fd6dbaf87 100644
--- a/drivers/spi/spi-rb4xx.c
+++ b/drivers/spi/spi-rb4xx.c
@@ -148,7 +148,7 @@ static int rb4xx_spi_probe(struct platform_device *pdev)
if (IS_ERR(spi_base))
return PTR_ERR(spi_base);
- master = spi_alloc_master(&pdev->dev, sizeof(*rbspi));
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(*rbspi));
if (!master)
return -ENOMEM;
diff --git a/drivers/spi/spi-s3c24xx-fiq.S b/drivers/spi/spi-s3c24xx-fiq.S
index 059f2dc1fda2..1565c792da07 100644
--- a/drivers/spi/spi-s3c24xx-fiq.S
+++ b/drivers/spi/spi-s3c24xx-fiq.S
@@ -36,7 +36,6 @@
@ and an offset to the irq acknowledgment word
ENTRY(s3c24xx_spi_fiq_rx)
-s3c24xx_spi_fix_rx:
.word fiq_rx_end - fiq_rx_start
.word fiq_rx_irq_ack - fiq_rx_start
fiq_rx_start:
@@ -50,7 +49,7 @@ fiq_rx_start:
strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
subs fiq_rcount, fiq_rcount, #1
- subnes pc, lr, #4 @@ return, still have work to do
+ subsne pc, lr, #4 @@ return, still have work to do
@@ set IRQ controller so that next op will trigger IRQ
mov fiq_rtmp, #0
@@ -62,7 +61,6 @@ fiq_rx_irq_ack:
fiq_rx_end:
ENTRY(s3c24xx_spi_fiq_txrx)
-s3c24xx_spi_fiq_txrx:
.word fiq_txrx_end - fiq_txrx_start
.word fiq_txrx_irq_ack - fiq_txrx_start
fiq_txrx_start:
@@ -77,7 +75,7 @@ fiq_txrx_start:
strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
subs fiq_rcount, fiq_rcount, #1
- subnes pc, lr, #4 @@ return, still have work to do
+ subsne pc, lr, #4 @@ return, still have work to do
mov fiq_rtmp, #0
str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
@@ -89,7 +87,6 @@ fiq_txrx_irq_ack:
fiq_txrx_end:
ENTRY(s3c24xx_spi_fiq_tx)
-s3c24xx_spi_fix_tx:
.word fiq_tx_end - fiq_tx_start
.word fiq_tx_irq_ack - fiq_tx_start
fiq_tx_start:
@@ -102,7 +99,7 @@ fiq_tx_start:
strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
subs fiq_rcount, fiq_rcount, #1
- subnes pc, lr, #4 @@ return, still have work to do
+ subsne pc, lr, #4 @@ return, still have work to do
mov fiq_rtmp, #0
str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 7b7151ec14c8..1d948fee1a03 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -122,6 +122,7 @@
struct s3c64xx_spi_dma_data {
struct dma_chan *ch;
+ dma_cookie_t cookie;
enum dma_transfer_direction direction;
};
@@ -264,12 +265,13 @@ static void s3c64xx_spi_dmacb(void *data)
spin_unlock_irqrestore(&sdd->lock, flags);
}
-static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
+static int prepare_dma(struct s3c64xx_spi_dma_data *dma,
struct sg_table *sgt)
{
struct s3c64xx_spi_driver_data *sdd;
struct dma_slave_config config;
struct dma_async_tx_descriptor *desc;
+ int ret;
memset(&config, 0, sizeof(config));
@@ -293,12 +295,24 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
dma->direction, DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(&sdd->pdev->dev, "unable to prepare %s scatterlist",
+ dma->direction == DMA_DEV_TO_MEM ? "rx" : "tx");
+ return -ENOMEM;
+ }
desc->callback = s3c64xx_spi_dmacb;
desc->callback_param = dma;
- dmaengine_submit(desc);
+ dma->cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(dma->cookie);
+ if (ret) {
+ dev_err(&sdd->pdev->dev, "DMA submission failed");
+ return -EIO;
+ }
+
dma_async_issue_pending(dma->ch);
+ return 0;
}
static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
@@ -348,11 +362,12 @@ static bool s3c64xx_spi_can_dma(struct spi_master *master,
return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
}
-static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
+static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
struct spi_transfer *xfer, int dma_mode)
{
void __iomem *regs = sdd->regs;
u32 modecfg, chcfg;
+ int ret = 0;
modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
@@ -378,7 +393,7 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
chcfg |= S3C64XX_SPI_CH_TXCH_ON;
if (dma_mode) {
modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
- prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
+ ret = prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
} else {
switch (sdd->cur_bpw) {
case 32:
@@ -410,12 +425,17 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
| S3C64XX_SPI_PACKET_CNT_EN,
regs + S3C64XX_SPI_PACKET_CNT);
- prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
+ ret = prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
}
}
+ if (ret)
+ return ret;
+
writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
+
+ return 0;
}
static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
@@ -548,9 +568,10 @@ static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
return 0;
}
-static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
+static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
{
void __iomem *regs = sdd->regs;
+ int ret;
u32 val;
/* Disable Clock */
@@ -598,7 +619,9 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
if (sdd->port_conf->clk_from_cmu) {
/* The src_clk clock is divided internally by 2 */
- clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
+ ret = clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
+ if (ret)
+ return ret;
} else {
/* Configure Clock */
val = readl(regs + S3C64XX_SPI_CLK_CFG);
@@ -612,6 +635,8 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
val |= S3C64XX_SPI_ENCLK_ENABLE;
writel(val, regs + S3C64XX_SPI_CLK_CFG);
}
+
+ return 0;
}
#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
@@ -654,7 +679,9 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
sdd->cur_bpw = bpw;
sdd->cur_speed = speed;
sdd->cur_mode = spi->mode;
- s3c64xx_spi_config(sdd);
+ status = s3c64xx_spi_config(sdd);
+ if (status)
+ return status;
}
if (!is_polling(sdd) && (xfer->len > fifo_len) &&
@@ -678,13 +705,18 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
sdd->state &= ~RXBUSY;
sdd->state &= ~TXBUSY;
- s3c64xx_enable_datapath(sdd, xfer, use_dma);
-
/* Start the signals */
s3c64xx_spi_set_cs(spi, true);
+ status = s3c64xx_enable_datapath(sdd, xfer, use_dma);
+
spin_unlock_irqrestore(&sdd->lock, flags);
+ if (status) {
+ dev_err(&spi->dev, "failed to enable data path for transfer: %d\n", status);
+ break;
+ }
+
if (use_dma)
status = s3c64xx_wait_for_dma(sdd, xfer);
else
diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c
index 52cf0e9189c2..64cf1f572b6d 100644
--- a/drivers/spi/spi-sc18is602.c
+++ b/drivers/spi/spi-sc18is602.c
@@ -248,13 +248,12 @@ static int sc18is602_probe(struct i2c_client *client,
struct sc18is602_platform_data *pdata = dev_get_platdata(dev);
struct sc18is602 *hw;
struct spi_master *master;
- int error;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
return -EINVAL;
- master = spi_alloc_master(dev, sizeof(struct sc18is602));
+ master = devm_spi_alloc_master(dev, sizeof(struct sc18is602));
if (!master)
return -ENOMEM;
@@ -308,15 +307,7 @@ static int sc18is602_probe(struct i2c_client *client,
master->min_speed_hz = hw->freq / 128;
master->max_speed_hz = hw->freq / 4;
- error = devm_spi_register_master(dev, master);
- if (error)
- goto error_reg;
-
- return 0;
-
-error_reg:
- spi_master_put(master);
- return error;
+ return devm_spi_register_master(dev, master);
}
static const struct i2c_device_id sc18is602_id[] = {
diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c
index 50e0ea9acf8b..cba49a65ed2b 100644
--- a/drivers/spi/spi-sh.c
+++ b/drivers/spi/spi-sh.c
@@ -450,7 +450,7 @@ static int spi_sh_probe(struct platform_device *pdev)
return irq;
}
- master = spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data));
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data));
if (master == NULL) {
dev_err(&pdev->dev, "spi_alloc_master error.\n");
return -ENOMEM;
@@ -468,16 +468,14 @@ static int spi_sh_probe(struct platform_device *pdev)
break;
default:
dev_err(&pdev->dev, "No support width\n");
- ret = -ENODEV;
- goto error1;
+ return -ENODEV;
}
ss->irq = irq;
ss->master = master;
ss->addr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (ss->addr == NULL) {
dev_err(&pdev->dev, "ioremap error.\n");
- ret = -ENOMEM;
- goto error1;
+ return -ENOMEM;
}
INIT_LIST_HEAD(&ss->queue);
spin_lock_init(&ss->lock);
@@ -487,7 +485,7 @@ static int spi_sh_probe(struct platform_device *pdev)
ret = request_irq(irq, spi_sh_irq, 0, "spi_sh", ss);
if (ret < 0) {
dev_err(&pdev->dev, "request_irq error\n");
- goto error1;
+ return ret;
}
master->num_chipselect = 2;
@@ -506,9 +504,6 @@ static int spi_sh_probe(struct platform_device *pdev)
error3:
free_irq(irq, ss);
- error1:
- spi_master_put(master);
-
return ret;
}
diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c
index f1fc2bde6ef3..e41976010dc4 100644
--- a/drivers/spi/spi-sprd-adi.c
+++ b/drivers/spi/spi-sprd-adi.c
@@ -358,9 +358,9 @@ static int sprd_adi_restart_handler(struct notifier_block *this,
sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOCK, WDG_UNLOCK_KEY);
/* Load the watchdog timeout value, 50ms is always enough. */
+ sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_HIGH, 0);
sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_LOW,
WDG_LOAD_VAL & WDG_LOAD_MASK);
- sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_HIGH, 0);
/* Start the watchdog to reset system */
sprd_adi_read(sadi, sadi->slave_pbase + REG_WDG_CTRL, &val);
diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
index 5df01ffdef46..b46502db7f12 100644
--- a/drivers/spi/spi-st-ssc4.c
+++ b/drivers/spi/spi-st-ssc4.c
@@ -379,13 +379,14 @@ static int spi_st_probe(struct platform_device *pdev)
ret = devm_spi_register_master(&pdev->dev, master);
if (ret) {
dev_err(&pdev->dev, "Failed to register master\n");
- goto clk_disable;
+ goto rpm_disable;
}
return 0;
-clk_disable:
+rpm_disable:
pm_runtime_disable(&pdev->dev);
+clk_disable:
clk_disable_unprepare(spi_st->clk);
put_master:
spi_master_put(master);
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index ad1e55d3d5d5..8d692f16d90a 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -254,7 +254,8 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz)
{
u32 div, mbrdiv;
- div = DIV_ROUND_UP(spi->clk_rate, speed_hz);
+ /* Ensure spi->clk_rate is even */
+ div = DIV_ROUND_UP(spi->clk_rate & ~0x1, speed_hz);
/*
* SPI framework set xfer->speed_hz to master->max_speed_hz if
@@ -298,9 +299,9 @@ static u32 stm32_spi_prepare_fthlv(struct stm32_spi *spi)
/* align packet size with data registers access */
if (spi->cur_bpw > 8)
- fthlv -= (fthlv % 2); /* multiple of 2 */
+ fthlv += (fthlv % 2) ? 1 : 0;
else
- fthlv -= (fthlv % 4); /* multiple of 4 */
+ fthlv += (fthlv % 4) ? (4 - (fthlv % 4)) : 0;
return fthlv;
}
@@ -991,6 +992,10 @@ static int stm32_spi_transfer_one(struct spi_master *master,
struct stm32_spi *spi = spi_master_get_devdata(master);
int ret;
+ /* Don't do anything on 0 bytes transfers */
+ if (transfer->len == 0)
+ return 0;
+
spi->tx_buf = transfer->tx_buf;
spi->rx_buf = transfer->rx_buf;
spi->tx_len = spi->tx_buf ? transfer->len : 0;
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index 8533f4edd00a..21a22d42818c 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -202,7 +202,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
struct spi_transfer *tfr)
{
struct sun6i_spi *sspi = spi_master_get_devdata(master);
- unsigned int mclk_rate, div, timeout;
+ unsigned int mclk_rate, div, div_cdr1, div_cdr2, timeout;
unsigned int start, end, tx_time;
unsigned int trig_level;
unsigned int tx_len = 0;
@@ -291,14 +291,12 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
* First try CDR2, and if we can't reach the expected
* frequency, fall back to CDR1.
*/
- div = mclk_rate / (2 * tfr->speed_hz);
- if (div <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) {
- if (div > 0)
- div--;
-
- reg = SUN6I_CLK_CTL_CDR2(div) | SUN6I_CLK_CTL_DRS;
+ div_cdr1 = DIV_ROUND_UP(mclk_rate, tfr->speed_hz);
+ div_cdr2 = DIV_ROUND_UP(div_cdr1, 2);
+ if (div_cdr2 <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) {
+ reg = SUN6I_CLK_CTL_CDR2(div_cdr2 - 1) | SUN6I_CLK_CTL_DRS;
} else {
- div = ilog2(mclk_rate) - ilog2(tfr->speed_hz);
+ div = min(SUN6I_CLK_CTL_CDR1_MASK, order_base_2(div_cdr1));
reg = SUN6I_CLK_CTL_CDR1(div);
}
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index 09cfae3abce2..c510b53e5e3f 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -827,6 +827,7 @@ static int tegra_spi_setup(struct spi_device *spi)
ret = pm_runtime_get_sync(tspi->dev);
if (ret < 0) {
+ pm_runtime_put_noidle(tspi->dev);
dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
return ret;
}
@@ -1252,6 +1253,7 @@ static int tegra_spi_resume(struct device *dev)
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
+ pm_runtime_put_noidle(dev);
dev_err(dev, "pm runtime failed, e = %d\n", ret);
return ret;
}
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
index 22893a7e0aa0..749288310c36 100644
--- a/drivers/spi/spi-tegra20-sflash.c
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -564,6 +564,7 @@ static int tegra_sflash_resume(struct device *dev)
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
+ pm_runtime_put_noidle(dev);
dev_err(dev, "pm runtime failed, e = %d\n", ret);
return ret;
}
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index d1187317bb5d..c6b80a60951b 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -761,6 +761,7 @@ static int tegra_slink_setup(struct spi_device *spi)
ret = pm_runtime_get_sync(tspi->dev);
if (ret < 0) {
+ pm_runtime_put_noidle(tspi->dev);
dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
return ret;
}
@@ -1197,6 +1198,7 @@ static int tegra_slink_resume(struct device *dev)
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
+ pm_runtime_put_noidle(dev);
dev_err(dev, "pm runtime failed, e = %d\n", ret);
return ret;
}
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 95c28abaa027..c70b1790a959 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -183,6 +183,7 @@ static int ti_qspi_setup(struct spi_device *spi)
ret = pm_runtime_get_sync(qspi->dev);
if (ret < 0) {
+ pm_runtime_put_noidle(qspi->dev);
dev_err(qspi->dev, "pm_runtime_get_sync() failed\n");
return ret;
}
@@ -662,6 +663,17 @@ static int ti_qspi_runtime_resume(struct device *dev)
return 0;
}
+static void ti_qspi_dma_cleanup(struct ti_qspi *qspi)
+{
+ if (qspi->rx_bb_addr)
+ dma_free_coherent(qspi->dev, QSPI_DMA_BUFFER_SIZE,
+ qspi->rx_bb_addr,
+ qspi->rx_bb_dma_addr);
+
+ if (qspi->rx_chan)
+ dma_release_channel(qspi->rx_chan);
+}
+
static const struct of_device_id ti_qspi_match[] = {
{.compatible = "ti,dra7xxx-qspi" },
{.compatible = "ti,am4372-qspi" },
@@ -816,6 +828,8 @@ no_dma:
if (!ret)
return 0;
+ ti_qspi_dma_cleanup(qspi);
+
pm_runtime_disable(&pdev->dev);
free_master:
spi_master_put(master);
@@ -834,12 +848,7 @@ static int ti_qspi_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (qspi->rx_bb_addr)
- dma_free_coherent(qspi->dev, QSPI_DMA_BUFFER_SIZE,
- qspi->rx_bb_addr,
- qspi->rx_bb_dma_addr);
- if (qspi->rx_chan)
- dma_release_channel(qspi->rx_chan);
+ ti_qspi_dma_cleanup(qspi);
return 0;
}
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index fa730a871d25..8a5966963834 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -92,7 +92,6 @@
#define PCH_MAX_SPBR 1023
/* Definition for ML7213/ML7223/ML7831 by LAPIS Semiconductor */
-#define PCI_VENDOR_ID_ROHM 0x10DB
#define PCI_DEVICE_ID_ML7213_SPI 0x802c
#define PCI_DEVICE_ID_ML7223_SPI 0x800F
#define PCI_DEVICE_ID_ML7831_SPI 0x8816
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 88a8a8edd44b..bbe33016d371 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -362,9 +362,11 @@ static int spi_drv_probe(struct device *dev)
if (ret)
return ret;
- ret = sdrv->probe(spi);
- if (ret)
- dev_pm_domain_detach(dev, true);
+ if (sdrv->probe) {
+ ret = sdrv->probe(spi);
+ if (ret)
+ dev_pm_domain_detach(dev, true);
+ }
return ret;
}
@@ -372,9 +374,10 @@ static int spi_drv_probe(struct device *dev)
static int spi_drv_remove(struct device *dev)
{
const struct spi_driver *sdrv = to_spi_driver(dev->driver);
- int ret;
+ int ret = 0;
- ret = sdrv->remove(to_spi_device(dev));
+ if (sdrv->remove)
+ ret = sdrv->remove(to_spi_device(dev));
dev_pm_domain_detach(dev, true);
return ret;
@@ -399,10 +402,8 @@ int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
{
sdrv->driver.owner = owner;
sdrv->driver.bus = &spi_bus_type;
- if (sdrv->probe)
- sdrv->driver.probe = spi_drv_probe;
- if (sdrv->remove)
- sdrv->driver.remove = spi_drv_remove;
+ sdrv->driver.probe = spi_drv_probe;
+ sdrv->driver.remove = spi_drv_remove;
if (sdrv->shutdown)
sdrv->driver.shutdown = spi_drv_shutdown;
return driver_register(&sdrv->driver);
@@ -432,6 +433,12 @@ static LIST_HEAD(spi_controller_list);
*/
static DEFINE_MUTEX(board_lock);
+/*
+ * Prevents addition of devices with same chip select and
+ * addition of devices below an unregistering controller.
+ */
+static DEFINE_MUTEX(spi_add_lock);
+
/**
* spi_alloc_device - Allocate a new SPI device
* @ctlr: Controller to which device is connected
@@ -510,7 +517,6 @@ static int spi_dev_check(struct device *dev, void *data)
*/
int spi_add_device(struct spi_device *spi)
{
- static DEFINE_MUTEX(spi_add_lock);
struct spi_controller *ctlr = spi->controller;
struct device *dev = ctlr->dev.parent;
int status;
@@ -538,6 +544,13 @@ int spi_add_device(struct spi_device *spi)
goto done;
}
+ /* Controller may unregister concurrently */
+ if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
+ !device_is_registered(&ctlr->dev)) {
+ status = -ENODEV;
+ goto done;
+ }
+
if (ctlr->cs_gpios)
spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
@@ -1104,8 +1117,6 @@ out:
if (msg->status && ctlr->handle_err)
ctlr->handle_err(ctlr, msg);
- spi_res_release(ctlr, msg);
-
spi_finalize_current_message(ctlr);
return ret;
@@ -1363,6 +1374,13 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
spi_unmap_msg(ctlr, mesg);
+ /* In the prepare_messages callback the spi bus has the opportunity to
+ * split a transfer to smaller chunks.
+ * Release splited transfers here since spi_map_msg is done on the
+ * splited transfers.
+ */
+ spi_res_release(ctlr, mesg);
+
if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
ret = ctlr->unprepare_message(ctlr, mesg);
if (ret) {
@@ -2033,6 +2051,50 @@ struct spi_controller *__spi_alloc_controller(struct device *dev,
}
EXPORT_SYMBOL_GPL(__spi_alloc_controller);
+static void devm_spi_release_controller(struct device *dev, void *ctlr)
+{
+ spi_controller_put(*(struct spi_controller **)ctlr);
+}
+
+/**
+ * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
+ * @dev: physical device of SPI controller
+ * @size: how much zeroed driver-private data to allocate
+ * @slave: whether to allocate an SPI master (false) or SPI slave (true)
+ * Context: can sleep
+ *
+ * Allocate an SPI controller and automatically release a reference on it
+ * when @dev is unbound from its driver. Drivers are thus relieved from
+ * having to call spi_controller_put().
+ *
+ * The arguments to this function are identical to __spi_alloc_controller().
+ *
+ * Return: the SPI controller structure on success, else NULL.
+ */
+struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
+ unsigned int size,
+ bool slave)
+{
+ struct spi_controller **ptr, *ctlr;
+
+ ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ ctlr = __spi_alloc_controller(dev, size, slave);
+ if (ctlr) {
+ ctlr->devm_allocated = true;
+ *ptr = ctlr;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return ctlr;
+}
+EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
+
#ifdef CONFIG_OF
static int of_spi_register_master(struct spi_controller *ctlr)
{
@@ -2305,7 +2367,12 @@ void spi_unregister_controller(struct spi_controller *ctlr)
{
struct spi_controller *found;
int id = ctlr->bus_num;
- int dummy;
+
+ /* Prevent addition of new devices, unregister existing ones */
+ if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
+ mutex_lock(&spi_add_lock);
+
+ device_for_each_child(&ctlr->dev, NULL, __unregister);
/* First make sure that this controller was ever added */
mutex_lock(&board_lock);
@@ -2319,13 +2386,22 @@ void spi_unregister_controller(struct spi_controller *ctlr)
list_del(&ctlr->list);
mutex_unlock(&board_lock);
- dummy = device_for_each_child(&ctlr->dev, NULL, __unregister);
- device_unregister(&ctlr->dev);
+ device_del(&ctlr->dev);
+
+ /* Release the last reference on the controller if its driver
+ * has not yet been converted to devm_spi_alloc_master/slave().
+ */
+ if (!ctlr->devm_allocated)
+ put_device(&ctlr->dev);
+
/* free bus id */
mutex_lock(&board_lock);
if (found == ctlr)
idr_remove(&spi_master_idr, id);
mutex_unlock(&board_lock);
+
+ if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
+ mutex_unlock(&spi_add_lock);
}
EXPORT_SYMBOL_GPL(spi_unregister_controller);
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 028725573e63..e444e7cc6968 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -232,6 +232,11 @@ static int spidev_message(struct spidev_data *spidev,
for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
n;
n--, k_tmp++, u_tmp++) {
+ /* Ensure that also following allocations from rx_buf/tx_buf will meet
+ * DMA alignment requirements.
+ */
+ unsigned int len_aligned = ALIGN(u_tmp->len, ARCH_KMALLOC_MINALIGN);
+
k_tmp->len = u_tmp->len;
total += k_tmp->len;
@@ -247,17 +252,17 @@ static int spidev_message(struct spidev_data *spidev,
if (u_tmp->rx_buf) {
/* this transfer needs space in RX bounce buffer */
- rx_total += k_tmp->len;
+ rx_total += len_aligned;
if (rx_total > bufsiz) {
status = -EMSGSIZE;
goto done;
}
k_tmp->rx_buf = rx_buf;
- rx_buf += k_tmp->len;
+ rx_buf += len_aligned;
}
if (u_tmp->tx_buf) {
/* this transfer needs space in TX bounce buffer */
- tx_total += k_tmp->len;
+ tx_total += len_aligned;
if (tx_total > bufsiz) {
status = -EMSGSIZE;
goto done;
@@ -267,7 +272,7 @@ static int spidev_message(struct spidev_data *spidev,
(uintptr_t) u_tmp->tx_buf,
u_tmp->len))
goto done;
- tx_buf += k_tmp->len;
+ tx_buf += len_aligned;
}
k_tmp->cs_change = !!u_tmp->cs_change;
@@ -297,16 +302,16 @@ static int spidev_message(struct spidev_data *spidev,
goto done;
/* copy any rx data out of bounce buffer */
- rx_buf = spidev->rx_buffer;
- for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) {
+ for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
+ n;
+ n--, k_tmp++, u_tmp++) {
if (u_tmp->rx_buf) {
if (copy_to_user((u8 __user *)
- (uintptr_t) u_tmp->rx_buf, rx_buf,
+ (uintptr_t) u_tmp->rx_buf, k_tmp->rx_buf,
u_tmp->len)) {
status = -EFAULT;
goto done;
}
- rx_buf += u_tmp->len;
}
}
status = total;
@@ -607,15 +612,20 @@ err_find_dev:
static int spidev_release(struct inode *inode, struct file *filp)
{
struct spidev_data *spidev;
+ int dofree;
mutex_lock(&device_list_lock);
spidev = filp->private_data;
filp->private_data = NULL;
+ spin_lock_irq(&spidev->spi_lock);
+ /* ... after we unbound from the underlying device? */
+ dofree = (spidev->spi == NULL);
+ spin_unlock_irq(&spidev->spi_lock);
+
/* last close? */
spidev->users--;
if (!spidev->users) {
- int dofree;
kfree(spidev->tx_buffer);
spidev->tx_buffer = NULL;
@@ -623,19 +633,14 @@ static int spidev_release(struct inode *inode, struct file *filp)
kfree(spidev->rx_buffer);
spidev->rx_buffer = NULL;
- spin_lock_irq(&spidev->spi_lock);
- if (spidev->spi)
- spidev->speed_hz = spidev->spi->max_speed_hz;
-
- /* ... after we unbound from the underlying device? */
- dofree = (spidev->spi == NULL);
- spin_unlock_irq(&spidev->spi_lock);
-
if (dofree)
kfree(spidev);
+ else
+ spidev->speed_hz = spidev->spi->max_speed_hz;
}
#ifdef CONFIG_SPI_SLAVE
- spi_slave_abort(spidev->spi);
+ if (!dofree)
+ spi_slave_abort(spidev->spi);
#endif
mutex_unlock(&device_list_lock);
@@ -782,13 +787,13 @@ static int spidev_remove(struct spi_device *spi)
{
struct spidev_data *spidev = spi_get_drvdata(spi);
+ /* prevent new opens */
+ mutex_lock(&device_list_lock);
/* make sure ops on existing fds can abort cleanly */
spin_lock_irq(&spidev->spi_lock);
spidev->spi = NULL;
spin_unlock_irq(&spidev->spi_lock);
- /* prevent new opens */
- mutex_lock(&device_list_lock);
list_del(&spidev->device_entry);
device_destroy(spidev_class, spidev->devt);
clear_bit(MINOR(spidev->devt), minors);
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 1abf76be2aa8..58e8140c1f57 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -114,8 +114,6 @@ source "drivers/staging/mt7621-spi/Kconfig"
source "drivers/staging/mt7621-dma/Kconfig"
-source "drivers/staging/mt7621-mmc/Kconfig"
-
source "drivers/staging/mt7621-eth/Kconfig"
source "drivers/staging/mt7621-dts/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index ab0cbe8815b1..6f3065569fa0 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -47,7 +47,6 @@ obj-$(CONFIG_SOC_MT7621) += mt7621-pci/
obj-$(CONFIG_SOC_MT7621) += mt7621-pinctrl/
obj-$(CONFIG_SOC_MT7621) += mt7621-spi/
obj-$(CONFIG_SOC_MT7621) += mt7621-dma/
-obj-$(CONFIG_SOC_MT7621) += mt7621-mmc/
obj-$(CONFIG_SOC_MT7621) += mt7621-eth/
obj-$(CONFIG_SOC_MT7621) += mt7621-dts/
obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index e3df4bf521b5..a97bbd89fae2 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -95,6 +95,15 @@ static DEFINE_MUTEX(ashmem_mutex);
static struct kmem_cache *ashmem_area_cachep __read_mostly;
static struct kmem_cache *ashmem_range_cachep __read_mostly;
+/*
+ * A separate lockdep class for the backing shmem inodes to resolve the lockdep
+ * warning about the race between kswapd taking fs_reclaim before inode_lock
+ * and write syscall taking inode_lock and then fs_reclaim.
+ * Note that such race is impossible because ashmem does not support write
+ * syscalls operating on the backing shmem.
+ */
+static struct lock_class_key backing_shmem_inode_class;
+
static inline unsigned long range_size(struct ashmem_range *range)
{
return range->pgend - range->pgstart + 1;
@@ -395,6 +404,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
if (!asma->file) {
char *name = ASHMEM_NAME_DEF;
struct file *vmfile;
+ struct inode *inode;
if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
name = asma->name;
@@ -406,6 +416,8 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
goto out;
}
vmfile->f_mode |= FMODE_LSEEK;
+ inode = file_inode(vmfile);
+ lockdep_set_class(&inode->i_rwsem, &backing_shmem_inode_class);
asma->file = vmfile;
/*
* override mmap operation of the vmfile so that it can't be
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index 31db510018a9..6babcdb4d7d2 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -97,12 +97,12 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
{
- void *addr = vm_map_ram(pages, num, -1, pgprot);
+ void *addr = vmap(pages, num, VM_MAP, pgprot);
if (!addr)
return -ENOMEM;
memset(addr, 0, PAGE_SIZE * num);
- vm_unmap_ram(addr, num);
+ vunmap(addr);
return 0;
}
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index e18b61cdbdeb..636988248da2 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -2594,8 +2594,10 @@ static int comedi_open(struct inode *inode, struct file *file)
}
cfp = kzalloc(sizeof(*cfp), GFP_KERNEL);
- if (!cfp)
+ if (!cfp) {
+ comedi_dev_put(dev);
return -ENOMEM;
+ }
cfp->dev = dev;
diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c
index 560649be9d13..2176d3289eff 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1032.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1032.c
@@ -106,14 +106,22 @@ static int apci1032_cos_insn_config(struct comedi_device *dev,
unsigned int *data)
{
struct apci1032_private *devpriv = dev->private;
- unsigned int shift, oldmask;
+ unsigned int shift, oldmask, himask, lomask;
switch (data[0]) {
case INSN_CONFIG_DIGITAL_TRIG:
if (data[1] != 0)
return -EINVAL;
shift = data[3];
- oldmask = (1U << shift) - 1;
+ if (shift < 32) {
+ oldmask = (1U << shift) - 1;
+ himask = data[4] << shift;
+ lomask = data[5] << shift;
+ } else {
+ oldmask = 0xffffffffu;
+ himask = 0;
+ lomask = 0;
+ }
switch (data[2]) {
case COMEDI_DIGITAL_TRIG_DISABLE:
devpriv->ctrl = 0;
@@ -136,8 +144,8 @@ static int apci1032_cos_insn_config(struct comedi_device *dev,
devpriv->mode2 &= oldmask;
}
/* configure specified channels */
- devpriv->mode1 |= data[4] << shift;
- devpriv->mode2 |= data[5] << shift;
+ devpriv->mode1 |= himask;
+ devpriv->mode2 |= lomask;
break;
case COMEDI_DIGITAL_TRIG_ENABLE_LEVELS:
if (devpriv->ctrl != (APCI1032_CTRL_INT_ENA |
@@ -154,8 +162,8 @@ static int apci1032_cos_insn_config(struct comedi_device *dev,
devpriv->mode2 &= oldmask;
}
/* configure specified channels */
- devpriv->mode1 |= data[4] << shift;
- devpriv->mode2 |= data[5] << shift;
+ devpriv->mode1 |= himask;
+ devpriv->mode2 |= lomask;
break;
default:
return -EINVAL;
@@ -252,6 +260,7 @@ static irqreturn_t apci1032_interrupt(int irq, void *d)
struct apci1032_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
unsigned int ctrl;
+ unsigned short val;
/* check interrupt is from this device */
if ((inl(devpriv->amcc_iobase + AMCC_OP_REG_INTCSR) &
@@ -267,7 +276,8 @@ static irqreturn_t apci1032_interrupt(int irq, void *d)
outl(ctrl & ~APCI1032_CTRL_INT_ENA, dev->iobase + APCI1032_CTRL_REG);
s->state = inl(dev->iobase + APCI1032_STATUS_REG) & 0xffff;
- comedi_buf_write_samples(s, &s->state, 1);
+ val = s->state;
+ comedi_buf_write_samples(s, &val, 1);
comedi_handle_events(dev, s);
/* enable the interrupt */
diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c
index 45ad4ba92f94..8c3eff7cf465 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1500.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1500.c
@@ -208,7 +208,7 @@ static irqreturn_t apci1500_interrupt(int irq, void *d)
struct comedi_device *dev = d;
struct apci1500_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
- unsigned int status = 0;
+ unsigned short status = 0;
unsigned int val;
val = inl(devpriv->amcc + AMCC_OP_REG_INTCSR);
@@ -238,14 +238,14 @@ static irqreturn_t apci1500_interrupt(int irq, void *d)
*
* Mask Meaning
* ---------- ------------------------------------------
- * 0x00000001 Event 1 has occurred
- * 0x00000010 Event 2 has occurred
- * 0x00000100 Counter/timer 1 has run down (not implemented)
- * 0x00001000 Counter/timer 2 has run down (not implemented)
- * 0x00010000 Counter 3 has run down (not implemented)
- * 0x00100000 Watchdog has run down (not implemented)
- * 0x01000000 Voltage error
- * 0x10000000 Short-circuit error
+ * 0b00000001 Event 1 has occurred
+ * 0b00000010 Event 2 has occurred
+ * 0b00000100 Counter/timer 1 has run down (not implemented)
+ * 0b00001000 Counter/timer 2 has run down (not implemented)
+ * 0b00010000 Counter 3 has run down (not implemented)
+ * 0b00100000 Watchdog has run down (not implemented)
+ * 0b01000000 Voltage error
+ * 0b10000000 Short-circuit error
*/
comedi_buf_write_samples(s, &status, 1);
comedi_handle_events(dev, s);
@@ -452,13 +452,14 @@ static int apci1500_di_cfg_trig(struct comedi_device *dev,
struct apci1500_private *devpriv = dev->private;
unsigned int trig = data[1];
unsigned int shift = data[3];
- unsigned int hi_mask = data[4] << shift;
- unsigned int lo_mask = data[5] << shift;
- unsigned int chan_mask = hi_mask | lo_mask;
- unsigned int old_mask = (1 << shift) - 1;
- unsigned int pm = devpriv->pm[trig] & old_mask;
- unsigned int pt = devpriv->pt[trig] & old_mask;
- unsigned int pp = devpriv->pp[trig] & old_mask;
+ unsigned int hi_mask;
+ unsigned int lo_mask;
+ unsigned int chan_mask;
+ unsigned int old_mask;
+ unsigned int pm;
+ unsigned int pt;
+ unsigned int pp;
+ unsigned int invalid_chan;
if (trig > 1) {
dev_dbg(dev->class_dev,
@@ -466,11 +467,28 @@ static int apci1500_di_cfg_trig(struct comedi_device *dev,
return -EINVAL;
}
- if (chan_mask > 0xffff) {
+ if (shift <= 16) {
+ hi_mask = data[4] << shift;
+ lo_mask = data[5] << shift;
+ old_mask = (1U << shift) - 1;
+ invalid_chan = (data[4] | data[5]) >> (16 - shift);
+ } else {
+ hi_mask = 0;
+ lo_mask = 0;
+ old_mask = 0xffff;
+ invalid_chan = data[4] | data[5];
+ }
+ chan_mask = hi_mask | lo_mask;
+
+ if (invalid_chan) {
dev_dbg(dev->class_dev, "invalid digital trigger channel\n");
return -EINVAL;
}
+ pm = devpriv->pm[trig] & old_mask;
+ pt = devpriv->pt[trig] & old_mask;
+ pp = devpriv->pp[trig] & old_mask;
+
switch (data[2]) {
case COMEDI_DIGITAL_TRIG_DISABLE:
/* clear trigger configuration */
diff --git a/drivers/staging/comedi/drivers/addi_apci_1564.c b/drivers/staging/comedi/drivers/addi_apci_1564.c
index 10501fe6bb25..1268ba34be5f 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1564.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1564.c
@@ -331,14 +331,22 @@ static int apci1564_cos_insn_config(struct comedi_device *dev,
unsigned int *data)
{
struct apci1564_private *devpriv = dev->private;
- unsigned int shift, oldmask;
+ unsigned int shift, oldmask, himask, lomask;
switch (data[0]) {
case INSN_CONFIG_DIGITAL_TRIG:
if (data[1] != 0)
return -EINVAL;
shift = data[3];
- oldmask = (1U << shift) - 1;
+ if (shift < 32) {
+ oldmask = (1U << shift) - 1;
+ himask = data[4] << shift;
+ lomask = data[5] << shift;
+ } else {
+ oldmask = 0xffffffffu;
+ himask = 0;
+ lomask = 0;
+ }
switch (data[2]) {
case COMEDI_DIGITAL_TRIG_DISABLE:
devpriv->ctrl = 0;
@@ -362,8 +370,8 @@ static int apci1564_cos_insn_config(struct comedi_device *dev,
devpriv->mode2 &= oldmask;
}
/* configure specified channels */
- devpriv->mode1 |= data[4] << shift;
- devpriv->mode2 |= data[5] << shift;
+ devpriv->mode1 |= himask;
+ devpriv->mode2 |= lomask;
break;
case COMEDI_DIGITAL_TRIG_ENABLE_LEVELS:
if (devpriv->ctrl != (APCI1564_DI_IRQ_ENA |
@@ -380,8 +388,8 @@ static int apci1564_cos_insn_config(struct comedi_device *dev,
devpriv->mode2 &= oldmask;
}
/* configure specified channels */
- devpriv->mode1 |= data[4] << shift;
- devpriv->mode2 |= data[5] << shift;
+ devpriv->mode1 |= himask;
+ devpriv->mode2 |= lomask;
break;
default:
return -EINVAL;
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index c1c3b18793d5..8cdee455e8a5 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -300,11 +300,11 @@ static int pci1710_ai_eoc(struct comedi_device *dev,
static int pci1710_ai_read_sample(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int cur_chan,
- unsigned int *val)
+ unsigned short *val)
{
const struct boardtype *board = dev->board_ptr;
struct pci1710_private *devpriv = dev->private;
- unsigned int sample;
+ unsigned short sample;
unsigned int chan;
sample = inw(dev->iobase + PCI171X_AD_DATA_REG);
@@ -345,7 +345,7 @@ static int pci1710_ai_insn_read(struct comedi_device *dev,
pci1710_ai_setup_chanlist(dev, s, &insn->chanspec, 1, 1);
for (i = 0; i < insn->n; i++) {
- unsigned int val;
+ unsigned short val;
/* start conversion */
outw(0, dev->iobase + PCI171X_SOFTTRG_REG);
@@ -395,7 +395,7 @@ static void pci1710_handle_every_sample(struct comedi_device *dev,
{
struct comedi_cmd *cmd = &s->async->cmd;
unsigned int status;
- unsigned int val;
+ unsigned short val;
int ret;
status = inw(dev->iobase + PCI171X_STATUS_REG);
@@ -455,7 +455,7 @@ static void pci1710_handle_fifo(struct comedi_device *dev,
}
for (i = 0; i < devpriv->max_samples; i++) {
- unsigned int val;
+ unsigned short val;
int ret;
ret = pci1710_ai_read_sample(dev, s, s->async->cur_chan, &val);
diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c
index 8429d57087fd..86cae5d0e983 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas.c
@@ -1281,7 +1281,7 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev,
devpriv->amcc + AMCC_OP_REG_INTCSR);
ret = request_irq(pcidev->irq, cb_pcidas_interrupt, IRQF_SHARED,
- dev->board_name, dev);
+ "cb_pcidas", dev);
if (ret) {
dev_dbg(dev->class_dev, "unable to allocate irq %d\n",
pcidev->irq);
@@ -1342,6 +1342,7 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev,
if (dev->irq && board->has_ao_fifo) {
dev->write_subdev = s;
s->subdev_flags |= SDF_CMD_WRITE;
+ s->len_chanlist = s->n_chan;
s->do_cmdtest = cb_pcidas_ao_cmdtest;
s->do_cmd = cb_pcidas_ao_cmd;
s->cancel = cb_pcidas_ao_cancel;
diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
index 631a703b345d..91403cc1bbf9 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
@@ -4021,7 +4021,7 @@ static int auto_attach(struct comedi_device *dev,
init_stc_registers(dev);
retval = request_irq(pcidev->irq, handle_interrupt, IRQF_SHARED,
- dev->board_name, dev);
+ "cb_pcidas64", dev);
if (retval) {
dev_dbg(dev->class_dev, "unable to allocate irq %u\n",
pcidev->irq);
diff --git a/drivers/staging/comedi/drivers/das6402.c b/drivers/staging/comedi/drivers/das6402.c
index f99211ec46de..0034005bdf8f 100644
--- a/drivers/staging/comedi/drivers/das6402.c
+++ b/drivers/staging/comedi/drivers/das6402.c
@@ -186,7 +186,7 @@ static irqreturn_t das6402_interrupt(int irq, void *d)
if (status & DAS6402_STATUS_FFULL) {
async->events |= COMEDI_CB_OVERFLOW;
} else if (status & DAS6402_STATUS_FFNE) {
- unsigned int val;
+ unsigned short val;
val = das6402_ai_read_sample(dev, s);
comedi_buf_write_samples(s, &val, 1);
diff --git a/drivers/staging/comedi/drivers/das800.c b/drivers/staging/comedi/drivers/das800.c
index 8cf09ef3012f..4bd8fd5218c8 100644
--- a/drivers/staging/comedi/drivers/das800.c
+++ b/drivers/staging/comedi/drivers/das800.c
@@ -427,7 +427,7 @@ static irqreturn_t das800_interrupt(int irq, void *d)
struct comedi_cmd *cmd;
unsigned long irq_flags;
unsigned int status;
- unsigned int val;
+ unsigned short val;
bool fifo_empty;
bool fifo_overflow;
int i;
diff --git a/drivers/staging/comedi/drivers/dmm32at.c b/drivers/staging/comedi/drivers/dmm32at.c
index 75693cdde313..c180d18ce517 100644
--- a/drivers/staging/comedi/drivers/dmm32at.c
+++ b/drivers/staging/comedi/drivers/dmm32at.c
@@ -404,7 +404,7 @@ static irqreturn_t dmm32at_isr(int irq, void *d)
{
struct comedi_device *dev = d;
unsigned char intstat;
- unsigned int val;
+ unsigned short val;
int i;
if (!dev->attached) {
diff --git a/drivers/staging/comedi/drivers/dt2815.c b/drivers/staging/comedi/drivers/dt2815.c
index 83026ba63d1c..78a7c1b3448a 100644
--- a/drivers/staging/comedi/drivers/dt2815.c
+++ b/drivers/staging/comedi/drivers/dt2815.c
@@ -92,6 +92,7 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s,
int ret;
for (i = 0; i < insn->n; i++) {
+ /* FIXME: lo bit 0 chooses voltage output or current output */
lo = ((data[i] & 0x0f) << 4) | (chan << 1) | 0x01;
hi = (data[i] & 0xff0) >> 4;
@@ -105,6 +106,8 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s,
if (ret)
return ret;
+ outb(hi, dev->iobase + DT2815_DATA);
+
devpriv->ao_readback[chan] = data[i];
}
return i;
diff --git a/drivers/staging/comedi/drivers/me4000.c b/drivers/staging/comedi/drivers/me4000.c
index ee53571a8969..ead8000b5929 100644
--- a/drivers/staging/comedi/drivers/me4000.c
+++ b/drivers/staging/comedi/drivers/me4000.c
@@ -924,7 +924,7 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
struct comedi_subdevice *s = dev->read_subdev;
int i;
int c = 0;
- unsigned int lval;
+ unsigned short lval;
if (!dev->attached)
return IRQ_NONE;
diff --git a/drivers/staging/comedi/drivers/mf6x4.c b/drivers/staging/comedi/drivers/mf6x4.c
index ea430237efa7..9da8dd748078 100644
--- a/drivers/staging/comedi/drivers/mf6x4.c
+++ b/drivers/staging/comedi/drivers/mf6x4.c
@@ -112,8 +112,9 @@ static int mf6x4_ai_eoc(struct comedi_device *dev,
struct mf6x4_private *devpriv = dev->private;
unsigned int status;
+ /* EOLC goes low at end of conversion. */
status = ioread32(devpriv->gpioc_reg);
- if (status & MF6X4_GPIOC_EOLC)
+ if ((status & MF6X4_GPIOC_EOLC) == 0)
return 0;
return -EBUSY;
}
diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c
index 4d1eccb5041d..4518c2680b7c 100644
--- a/drivers/staging/comedi/drivers/ni_6527.c
+++ b/drivers/staging/comedi/drivers/ni_6527.c
@@ -332,7 +332,7 @@ static int ni6527_intr_insn_config(struct comedi_device *dev,
case COMEDI_DIGITAL_TRIG_ENABLE_EDGES:
/* check shift amount */
shift = data[3];
- if (shift >= s->n_chan) {
+ if (shift >= 32) {
mask = 0;
rising = 0;
falling = 0;
diff --git a/drivers/staging/comedi/drivers/pcl711.c b/drivers/staging/comedi/drivers/pcl711.c
index a5937206bf1c..e9abae418062 100644
--- a/drivers/staging/comedi/drivers/pcl711.c
+++ b/drivers/staging/comedi/drivers/pcl711.c
@@ -184,7 +184,7 @@ static irqreturn_t pcl711_interrupt(int irq, void *d)
struct comedi_device *dev = d;
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int data;
+ unsigned short data;
if (!dev->attached) {
dev_err(dev->class_dev, "spurious interrupt\n");
diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c
index 0af5315d4357..fc8afffc1815 100644
--- a/drivers/staging/comedi/drivers/pcl818.c
+++ b/drivers/staging/comedi/drivers/pcl818.c
@@ -423,7 +423,7 @@ static int pcl818_ai_eoc(struct comedi_device *dev,
static bool pcl818_ai_write_sample(struct comedi_device *dev,
struct comedi_subdevice *s,
- unsigned int chan, unsigned int val)
+ unsigned int chan, unsigned short val)
{
struct pcl818_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c
index 65dc6c51037e..7956abcbae22 100644
--- a/drivers/staging/comedi/drivers/vmk80xx.c
+++ b/drivers/staging/comedi/drivers/vmk80xx.c
@@ -667,6 +667,9 @@ static int vmk80xx_find_usb_endpoints(struct comedi_device *dev)
if (!devpriv->ep_rx || !devpriv->ep_tx)
return -ENODEV;
+ if (!usb_endpoint_maxp(devpriv->ep_rx) || !usb_endpoint_maxp(devpriv->ep_tx))
+ return -EINVAL;
+
return 0;
}
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index 3e51476a7045..d2cb2bd6d913 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -2148,7 +2148,7 @@ static int _nbu2ss_nuke(struct nbu2ss_udc *udc,
struct nbu2ss_ep *ep,
int status)
{
- struct nbu2ss_req *req;
+ struct nbu2ss_req *req, *n;
/* Endpoint Disable */
_nbu2ss_epn_exit(udc, ep);
@@ -2160,7 +2160,7 @@ static int _nbu2ss_nuke(struct nbu2ss_udc *udc,
return 0;
/* called with irqs blocked */
- list_for_each_entry(req, &ep->queue, queue) {
+ list_for_each_entry_safe(req, n, &ep->queue, queue) {
_nbu2ss_ep_done(ep, req, status);
}
diff --git a/drivers/staging/erofs/erofs_fs.h b/drivers/staging/erofs/erofs_fs.h
index 7677da889f12..d8838ce66941 100644
--- a/drivers/staging/erofs/erofs_fs.h
+++ b/drivers/staging/erofs/erofs_fs.h
@@ -71,6 +71,9 @@ enum {
#define EROFS_I_VERSION_BIT 0
__EROFS_BIT(EROFS_I_, DATA_MAPPING, VERSION);
+#define EROFS_I_ALL \
+ ((1 << (EROFS_I_DATA_MAPPING_BIT + EROFS_I_DATA_MAPPING_BITS)) - 1)
+
struct erofs_inode_v1 {
/* 0 */__le16 i_advise;
diff --git a/drivers/staging/erofs/inode.c b/drivers/staging/erofs/inode.c
index 7448744cc515..02398c7eb4a4 100644
--- a/drivers/staging/erofs/inode.c
+++ b/drivers/staging/erofs/inode.c
@@ -14,26 +14,84 @@
#include <trace/events/erofs.h>
-/* no locking */
-static int read_inode(struct inode *inode, void *data)
+/*
+ * if inode is successfully read, return its inode page (or sometimes
+ * the inode payload page if it's an extended inode) in order to fill
+ * inline data if possible.
+ */
+static struct page *read_inode(struct inode *inode, unsigned int *ofs)
{
+ struct super_block *sb = inode->i_sb;
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
struct erofs_vnode *vi = EROFS_V(inode);
- struct erofs_inode_v1 *v1 = data;
- const unsigned advise = le16_to_cpu(v1->i_advise);
+ const erofs_off_t inode_loc = iloc(sbi, vi->nid);
+ erofs_blk_t blkaddr;
+ struct page *page;
+ struct erofs_inode_v1 *v1;
+ struct erofs_inode_v2 *v2, *copied = NULL;
+ unsigned int ifmt;
+ int err;
+
+ blkaddr = erofs_blknr(inode_loc);
+ *ofs = erofs_blkoff(inode_loc);
+
+ debugln("%s, reading inode nid %llu at %u of blkaddr %u",
+ __func__, vi->nid, *ofs, blkaddr);
+
+ page = erofs_get_meta_page(sb, blkaddr, false);
+ if (IS_ERR(page)) {
+ errln("failed to get inode (nid: %llu) page, err %ld",
+ vi->nid, PTR_ERR(page));
+ return page;
+ }
- vi->data_mapping_mode = __inode_data_mapping(advise);
+ v1 = page_address(page) + *ofs;
+ ifmt = le16_to_cpu(v1->i_advise);
+
+ if (ifmt & ~EROFS_I_ALL) {
+ errln("unsupported i_format %u of nid %llu", ifmt, vi->nid);
+ err = -EOPNOTSUPP;
+ goto err_out;
+ }
+ vi->data_mapping_mode = __inode_data_mapping(ifmt);
if (unlikely(vi->data_mapping_mode >= EROFS_INODE_LAYOUT_MAX)) {
errln("unknown data mapping mode %u of nid %llu",
vi->data_mapping_mode, vi->nid);
- DBG_BUGON(1);
- return -EIO;
+ err = -EOPNOTSUPP;
+ goto err_out;
}
- if (__inode_version(advise) == EROFS_INODE_LAYOUT_V2) {
- struct erofs_inode_v2 *v2 = data;
-
+ switch (__inode_version(ifmt)) {
+ case EROFS_INODE_LAYOUT_V2:
vi->inode_isize = sizeof(struct erofs_inode_v2);
+ /* check if the inode acrosses page boundary */
+ if (*ofs + vi->inode_isize <= PAGE_SIZE) {
+ *ofs += vi->inode_isize;
+ v2 = (struct erofs_inode_v2 *)v1;
+ } else {
+ const unsigned int gotten = PAGE_SIZE - *ofs;
+
+ copied = kmalloc(vi->inode_isize, GFP_NOFS);
+ if (!copied) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ memcpy(copied, v1, gotten);
+ unlock_page(page);
+ put_page(page);
+
+ page = erofs_get_meta_page(sb, blkaddr + 1, false);
+ if (IS_ERR(page)) {
+ errln("failed to get inode payload page (nid: %llu), err %ld",
+ vi->nid, PTR_ERR(page));
+ kfree(copied);
+ return page;
+ }
+ *ofs = vi->inode_isize - gotten;
+ memcpy((u8 *)copied + gotten, page_address(page), *ofs);
+ v2 = copied;
+ }
vi->xattr_isize = ondisk_xattr_ibody_size(v2->i_xattr_icount);
inode->i_mode = le16_to_cpu(v2->i_mode);
@@ -46,24 +104,23 @@ static int read_inode(struct inode *inode, void *data)
} else if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
inode->i_rdev = 0;
} else {
- return -EIO;
+ goto bogusimode;
}
i_uid_write(inode, le32_to_cpu(v2->i_uid));
i_gid_write(inode, le32_to_cpu(v2->i_gid));
set_nlink(inode, le32_to_cpu(v2->i_nlink));
- /* ns timestamp */
- inode->i_mtime.tv_sec = inode->i_ctime.tv_sec =
- le64_to_cpu(v2->i_ctime);
- inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec =
- le32_to_cpu(v2->i_ctime_nsec);
+ /* extended inode has its own timestamp */
+ inode->i_ctime.tv_sec = le64_to_cpu(v2->i_ctime);
+ inode->i_ctime.tv_nsec = le32_to_cpu(v2->i_ctime_nsec);
inode->i_size = le64_to_cpu(v2->i_size);
- } else if (__inode_version(advise) == EROFS_INODE_LAYOUT_V1) {
- struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
-
+ kfree(copied);
+ break;
+ case EROFS_INODE_LAYOUT_V1:
vi->inode_isize = sizeof(struct erofs_inode_v1);
+ *ofs += vi->inode_isize;
vi->xattr_isize = ondisk_xattr_ibody_size(v1->i_xattr_icount);
inode->i_mode = le16_to_cpu(v1->i_mode);
@@ -76,30 +133,43 @@ static int read_inode(struct inode *inode, void *data)
} else if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
inode->i_rdev = 0;
} else {
- return -EIO;
+ goto bogusimode;
}
i_uid_write(inode, le16_to_cpu(v1->i_uid));
i_gid_write(inode, le16_to_cpu(v1->i_gid));
set_nlink(inode, le16_to_cpu(v1->i_nlink));
- /* use build time to derive all file time */
- inode->i_mtime.tv_sec = inode->i_ctime.tv_sec =
- sbi->build_time;
- inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec =
- sbi->build_time_nsec;
+ /* use build time for compact inodes */
+ inode->i_ctime.tv_sec = sbi->build_time;
+ inode->i_ctime.tv_nsec = sbi->build_time_nsec;
inode->i_size = le32_to_cpu(v1->i_size);
- } else {
+ break;
+ default:
errln("unsupported on-disk inode version %u of nid %llu",
- __inode_version(advise), vi->nid);
- DBG_BUGON(1);
- return -EIO;
+ __inode_version(ifmt), vi->nid);
+ err = -EOPNOTSUPP;
+ goto err_out;
}
+ inode->i_mtime.tv_sec = inode->i_ctime.tv_sec;
+ inode->i_atime.tv_sec = inode->i_ctime.tv_sec;
+ inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec;
+ inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec;
+
/* measure inode.i_blocks as the generic filesystem */
inode->i_blocks = ((inode->i_size - 1) >> 9) + 1;
- return 0;
+ return page;
+bogusimode:
+ errln("bogus i_mode (%o) @ nid %llu", inode->i_mode, vi->nid);
+ err = -EIO;
+err_out:
+ DBG_BUGON(1);
+ kfree(copied);
+ unlock_page(page);
+ put_page(page);
+ return ERR_PTR(err);
}
/*
@@ -131,7 +201,7 @@ static int fill_inline_data(struct inode *inode, void *data, unsigned m_pofs)
if (unlikely(lnk == NULL))
return -ENOMEM;
- m_pofs += vi->inode_isize + vi->xattr_isize;
+ m_pofs += vi->xattr_isize;
/* inline symlink data shouldn't across page boundary as well */
if (unlikely(m_pofs + inode->i_size > PAGE_SIZE)) {
@@ -152,35 +222,17 @@ static int fill_inline_data(struct inode *inode, void *data, unsigned m_pofs)
static int fill_inode(struct inode *inode, int isdir)
{
- struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
- struct erofs_vnode *vi = EROFS_V(inode);
struct page *page;
- void *data;
- int err;
- erofs_blk_t blkaddr;
- unsigned ofs;
+ unsigned int ofs;
+ int err = 0;
trace_erofs_fill_inode(inode, isdir);
- blkaddr = erofs_blknr(iloc(sbi, vi->nid));
- ofs = erofs_blkoff(iloc(sbi, vi->nid));
-
- debugln("%s, reading inode nid %llu at %u of blkaddr %u",
- __func__, vi->nid, ofs, blkaddr);
-
- page = erofs_get_meta_page(inode->i_sb, blkaddr, isdir);
-
+ /* read inode base data from disk */
+ page = read_inode(inode, &ofs);
if (IS_ERR(page)) {
- errln("failed to get inode (nid: %llu) page, err %ld",
- vi->nid, PTR_ERR(page));
return PTR_ERR(page);
- }
-
- DBG_BUGON(!PageUptodate(page));
- data = page_address(page);
-
- err = read_inode(inode, data + ofs);
- if (!err) {
+ } else {
/* setup the new inode */
if (S_ISREG(inode->i_mode)) {
#ifdef CONFIG_EROFS_FS_XATTR
@@ -228,7 +280,7 @@ static int fill_inode(struct inode *inode, int isdir)
inode->i_mapping->a_ops = &erofs_raw_access_aops;
/* fill last page if inline data is available */
- fill_inline_data(inode, data, ofs);
+ fill_inline_data(inode, page_address(page), ofs);
}
out_unlock:
diff --git a/drivers/staging/erofs/unzip_vle.h b/drivers/staging/erofs/unzip_vle.h
index 684ff06fc7bf..630fd1f4f123 100644
--- a/drivers/staging/erofs/unzip_vle.h
+++ b/drivers/staging/erofs/unzip_vle.h
@@ -169,22 +169,22 @@ static inline void z_erofs_onlinepage_init(struct page *page)
static inline void z_erofs_onlinepage_fixup(struct page *page,
uintptr_t index, bool down)
{
- unsigned long *p, o, v, id;
-repeat:
- p = &page_private(page);
- o = READ_ONCE(*p);
+ union z_erofs_onlinepage_converter u = { .v = &page_private(page) };
+ int orig, orig_index, val;
- id = o >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
- if (id) {
+repeat:
+ orig = atomic_read(u.o);
+ orig_index = orig >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
+ if (orig_index) {
if (!index)
return;
- BUG_ON(id != index);
+ DBG_BUGON(orig_index != index);
}
- v = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
- ((o & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned)down);
- if (cmpxchg(p, o, v) != o)
+ val = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
+ ((orig & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
+ if (atomic_cmpxchg(u.o, orig, val) != orig)
goto repeat;
}
diff --git a/drivers/staging/erofs/utils.c b/drivers/staging/erofs/utils.c
index 2d96820da62e..4de9c39535eb 100644
--- a/drivers/staging/erofs/utils.c
+++ b/drivers/staging/erofs/utils.c
@@ -309,7 +309,7 @@ unsigned long erofs_shrink_scan(struct shrinker *shrink,
sbi->shrinker_run_no = run_no;
#ifdef CONFIG_EROFS_FS_ZIP
- freed += erofs_shrink_workstation(sbi, nr, false);
+ freed += erofs_shrink_workstation(sbi, nr - freed, false);
#endif
spin_lock(&erofs_sb_list_lock);
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index fa0dd425b454..cd062628a46b 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -2219,6 +2219,7 @@ static int fwserial_create(struct fw_unit *unit)
err = fw_core_add_address_handler(&port->rx_handler,
&fw_high_memory_region);
if (err) {
+ tty_port_destroy(&port->port);
kfree(port);
goto free_ports;
}
@@ -2301,6 +2302,7 @@ unregister_ttys:
free_ports:
for (--i; i >= 0; --i) {
+ fw_core_remove_address_handler(&serial->ports[i]->rx_handler);
tty_port_destroy(&serial->ports[i]->port);
kfree(serial->ports[i]);
}
diff --git a/drivers/staging/gasket/apex_driver.c b/drivers/staging/gasket/apex_driver.c
index 0cef1d6d2e2b..99da735bdf6d 100644
--- a/drivers/staging/gasket/apex_driver.c
+++ b/drivers/staging/gasket/apex_driver.c
@@ -578,13 +578,6 @@ static const struct pci_device_id apex_pci_ids[] = {
{ PCI_DEVICE(APEX_PCI_VENDOR_ID, APEX_PCI_DEVICE_ID) }, { 0 }
};
-static void apex_pci_fixup_class(struct pci_dev *pdev)
-{
- pdev->class = (PCI_CLASS_SYSTEM_OTHER << 8) | pdev->class;
-}
-DECLARE_PCI_FIXUP_CLASS_HEADER(APEX_PCI_VENDOR_ID, APEX_PCI_DEVICE_ID,
- PCI_CLASS_NOT_DEFINED, 8, apex_pci_fixup_class);
-
static int apex_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id)
{
diff --git a/drivers/staging/gasket/gasket_core.c b/drivers/staging/gasket/gasket_core.c
index d12ab560411f..9396aeb3f431 100644
--- a/drivers/staging/gasket/gasket_core.c
+++ b/drivers/staging/gasket/gasket_core.c
@@ -933,6 +933,10 @@ do_map_region(const struct gasket_dev *gasket_dev, struct vm_area_struct *vma,
gasket_get_bar_index(gasket_dev,
(vma->vm_pgoff << PAGE_SHIFT) +
driver_desc->legacy_mmap_address_offset);
+
+ if (bar_index < 0)
+ return DO_MAP_REGION_INVALID;
+
phys_base = gasket_dev->bar_data[bar_index].phys_base + phys_offset;
while (mapped_bytes < map_length) {
/*
diff --git a/drivers/staging/gasket/gasket_interrupt.c b/drivers/staging/gasket/gasket_interrupt.c
index 1cfbc120f228..225460c535d6 100644
--- a/drivers/staging/gasket/gasket_interrupt.c
+++ b/drivers/staging/gasket/gasket_interrupt.c
@@ -527,14 +527,16 @@ int gasket_interrupt_system_status(struct gasket_dev *gasket_dev)
int gasket_interrupt_set_eventfd(struct gasket_interrupt_data *interrupt_data,
int interrupt, int event_fd)
{
- struct eventfd_ctx *ctx = eventfd_ctx_fdget(event_fd);
-
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
+ struct eventfd_ctx *ctx;
if (interrupt < 0 || interrupt >= interrupt_data->num_interrupts)
return -EINVAL;
+ ctx = eventfd_ctx_fdget(event_fd);
+
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
interrupt_data->eventfd_ctxs[interrupt] = ctx;
return 0;
}
@@ -545,6 +547,9 @@ int gasket_interrupt_clear_eventfd(struct gasket_interrupt_data *interrupt_data,
if (interrupt < 0 || interrupt >= interrupt_data->num_interrupts)
return -EINVAL;
- interrupt_data->eventfd_ctxs[interrupt] = NULL;
+ if (interrupt_data->eventfd_ctxs[interrupt]) {
+ eventfd_ctx_put(interrupt_data->eventfd_ctxs[interrupt]);
+ interrupt_data->eventfd_ctxs[interrupt] = NULL;
+ }
return 0;
}
diff --git a/drivers/staging/gasket/gasket_sysfs.c b/drivers/staging/gasket/gasket_sysfs.c
index fc45f0d13e87..5986c67bc7ec 100644
--- a/drivers/staging/gasket/gasket_sysfs.c
+++ b/drivers/staging/gasket/gasket_sysfs.c
@@ -343,6 +343,7 @@ void gasket_sysfs_put_attr(struct device *device,
dev_err(device, "Unable to put unknown attribute: %s\n",
attr->attr.attr.name);
+ put_mapping(mapping);
}
EXPORT_SYMBOL(gasket_sysfs_put_attr);
@@ -376,6 +377,7 @@ ssize_t gasket_sysfs_register_store(struct device *device,
gasket_dev = mapping->gasket_dev;
if (!gasket_dev) {
dev_err(device, "Device driver may have been removed\n");
+ put_mapping(mapping);
return 0;
}
diff --git a/drivers/staging/gdm724x/gdm_usb.c b/drivers/staging/gdm724x/gdm_usb.c
index dc4da66c3695..54bdb64f52e8 100644
--- a/drivers/staging/gdm724x/gdm_usb.c
+++ b/drivers/staging/gdm724x/gdm_usb.c
@@ -56,20 +56,24 @@ static int gdm_usb_recv(void *priv_dev,
static int request_mac_address(struct lte_udev *udev)
{
- u8 buf[16] = {0,};
- struct hci_packet *hci = (struct hci_packet *)buf;
+ struct hci_packet *hci;
struct usb_device *usbdev = udev->usbdev;
int actual;
int ret = -1;
+ hci = kmalloc(struct_size(hci, data, 1), GFP_KERNEL);
+ if (!hci)
+ return -ENOMEM;
+
hci->cmd_evt = gdm_cpu_to_dev16(udev->gdm_ed, LTE_GET_INFORMATION);
hci->len = gdm_cpu_to_dev16(udev->gdm_ed, 1);
hci->data[0] = MAC_ADDRESS;
- ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), buf, 5,
+ ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), hci, 5,
&actual, 1000);
udev->request_mac_addr = 1;
+ kfree(hci);
return ret;
}
diff --git a/drivers/staging/greybus/audio_codec.c b/drivers/staging/greybus/audio_codec.c
index 35acd55ca5ab..6cbf69a57dfd 100644
--- a/drivers/staging/greybus/audio_codec.c
+++ b/drivers/staging/greybus/audio_codec.c
@@ -489,6 +489,7 @@ static int gbcodec_hw_params(struct snd_pcm_substream *substream,
if (ret) {
dev_err_ratelimited(dai->dev, "%d: Error during set_config\n",
ret);
+ gb_pm_runtime_put_noidle(bundle);
mutex_unlock(&codec->lock);
return ret;
}
@@ -565,6 +566,7 @@ static int gbcodec_prepare(struct snd_pcm_substream *substream,
break;
}
if (ret) {
+ gb_pm_runtime_put_noidle(bundle);
mutex_unlock(&codec->lock);
dev_err_ratelimited(dai->dev, "set_data_size failed:%d\n",
ret);
diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c
index b71078339e86..860247d71818 100644
--- a/drivers/staging/greybus/audio_topology.c
+++ b/drivers/staging/greybus/audio_topology.c
@@ -460,6 +460,15 @@ static int gbcodec_mixer_dapm_ctl_put(struct snd_kcontrol *kcontrol,
val = ucontrol->value.integer.value[0] & mask;
connect = !!val;
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ return ret;
+
+ ret = gb_audio_gb_get_control(module->mgmt_connection, data->ctl_id,
+ GB_AUDIO_INVALID_INDEX, &gbvalue);
+ if (ret)
+ goto exit;
+
/* update ucontrol */
if (gbvalue.value.integer_value[0] != val) {
for (wi = 0; wi < wlist->num_widgets; wi++) {
@@ -473,25 +482,17 @@ static int gbcodec_mixer_dapm_ctl_put(struct snd_kcontrol *kcontrol,
gbvalue.value.integer_value[0] =
cpu_to_le32(ucontrol->value.integer.value[0]);
- ret = gb_pm_runtime_get_sync(bundle);
- if (ret)
- return ret;
-
ret = gb_audio_gb_set_control(module->mgmt_connection,
data->ctl_id,
GB_AUDIO_INVALID_INDEX, &gbvalue);
-
- gb_pm_runtime_put_autosuspend(bundle);
-
- if (ret) {
- dev_err_ratelimited(codec->dev,
- "%d:Error in %s for %s\n", ret,
- __func__, kcontrol->id.name);
- return ret;
- }
}
- return 0;
+exit:
+ gb_pm_runtime_put_autosuspend(bundle);
+ if (ret)
+ dev_err_ratelimited(codec_dev, "%d:Error in %s for %s\n", ret,
+ __func__, kcontrol->id.name);
+ return ret;
}
#define SOC_DAPM_MIXER_GB(xname, kcount, data) \
diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
index 40680eaf3974..db06cd544af5 100644
--- a/drivers/staging/greybus/light.c
+++ b/drivers/staging/greybus/light.c
@@ -1028,7 +1028,8 @@ static int gb_lights_light_config(struct gb_lights *glights, u8 id)
light->channels_count = conf.channel_count;
light->name = kstrndup(conf.name, NAMES_MAX, GFP_KERNEL);
-
+ if (!light->name)
+ return -ENOMEM;
light->channels = kcalloc(light->channels_count,
sizeof(struct gb_channel), GFP_KERNEL);
if (!light->channels)
diff --git a/drivers/staging/greybus/sdio.c b/drivers/staging/greybus/sdio.c
index 38e85033fc4b..afb2e5e5111a 100644
--- a/drivers/staging/greybus/sdio.c
+++ b/drivers/staging/greybus/sdio.c
@@ -411,6 +411,7 @@ static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd)
struct gb_sdio_command_request request = {0};
struct gb_sdio_command_response response;
struct mmc_data *data = host->mrq->data;
+ unsigned int timeout_ms;
u8 cmd_flags;
u8 cmd_type;
int i;
@@ -469,9 +470,12 @@ static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd)
request.data_blksz = cpu_to_le16(data->blksz);
}
- ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_COMMAND,
- &request, sizeof(request), &response,
- sizeof(response));
+ timeout_ms = cmd->busy_timeout ? cmd->busy_timeout :
+ GB_OPERATION_TIMEOUT_DEFAULT;
+
+ ret = gb_operation_sync_timeout(host->connection, GB_SDIO_TYPE_COMMAND,
+ &request, sizeof(request), &response,
+ sizeof(response), timeout_ms);
if (ret < 0)
goto out;
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
index 8a006323c3c1..2343914f7548 100644
--- a/drivers/staging/greybus/uart.c
+++ b/drivers/staging/greybus/uart.c
@@ -537,9 +537,9 @@ static void gb_tty_set_termios(struct tty_struct *tty,
}
if (C_CRTSCTS(tty) && C_BAUD(tty) != B0)
- newline.flow_control |= GB_SERIAL_AUTO_RTSCTS_EN;
+ newline.flow_control = GB_SERIAL_AUTO_RTSCTS_EN;
else
- newline.flow_control &= ~GB_SERIAL_AUTO_RTSCTS_EN;
+ newline.flow_control = 0;
if (memcmp(&gb_tty->line_coding, &newline, sizeof(newline))) {
memcpy(&gb_tty->line_coding, &newline, sizeof(newline));
@@ -656,8 +656,6 @@ static int set_serial_info(struct gb_tty *gb_tty,
if ((close_delay != gb_tty->port.close_delay) ||
(closing_wait != gb_tty->port.closing_wait))
retval = -EPERM;
- else
- retval = -EOPNOTSUPP;
} else {
gb_tty->port.close_delay = close_delay;
gb_tty->port.closing_wait = closing_wait;
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c
index f53612a6461d..18a73ee48e7c 100644
--- a/drivers/staging/iio/cdc/ad7746.c
+++ b/drivers/staging/iio/cdc/ad7746.c
@@ -703,7 +703,6 @@ static int ad7746_probe(struct i2c_client *client,
indio_dev->num_channels = ARRAY_SIZE(ad7746_channels);
else
indio_dev->num_channels = ARRAY_SIZE(ad7746_channels) - 2;
- indio_dev->num_channels = ARRAY_SIZE(ad7746_channels);
indio_dev->modes = INDIO_DIRECT_MODE;
if (pdata) {
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index ac13b99bd9cb..aca983f34f5e 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -114,17 +114,24 @@ static int ad2s1210_config_write(struct ad2s1210_state *st, u8 data)
static int ad2s1210_config_read(struct ad2s1210_state *st,
unsigned char address)
{
- struct spi_transfer xfer = {
- .len = 2,
- .rx_buf = st->rx,
- .tx_buf = st->tx,
+ struct spi_transfer xfers[] = {
+ {
+ .len = 1,
+ .rx_buf = &st->rx[0],
+ .tx_buf = &st->tx[0],
+ .cs_change = 1,
+ }, {
+ .len = 1,
+ .rx_buf = &st->rx[1],
+ .tx_buf = &st->tx[1],
+ },
};
int ret = 0;
ad2s1210_set_mode(MOD_CONFIG, st);
st->tx[0] = address | AD2S1210_MSB_IS_HIGH;
st->tx[1] = AD2S1210_REG_FAULT;
- ret = spi_sync_transfer(st->sdev, &xfer, 1);
+ ret = spi_sync_transfer(st->sdev, xfers, 2);
if (ret < 0)
return ret;
diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c
index dc5459ae0b51..f624d0d53a8f 100644
--- a/drivers/staging/ks7010/ks_wlan_net.c
+++ b/drivers/staging/ks7010/ks_wlan_net.c
@@ -1120,6 +1120,7 @@ static int ks_wlan_set_scan(struct net_device *dev,
{
struct ks_wlan_private *priv = netdev_priv(dev);
struct iw_scan_req *req = NULL;
+ int len;
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
@@ -1129,8 +1130,9 @@ static int ks_wlan_set_scan(struct net_device *dev,
if (wrqu->data.length == sizeof(struct iw_scan_req) &&
wrqu->data.flags & IW_SCAN_THIS_ESSID) {
req = (struct iw_scan_req *)extra;
- priv->scan_ssid_len = req->essid_len;
- memcpy(priv->scan_ssid, req->essid, priv->scan_ssid_len);
+ len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE);
+ priv->scan_ssid_len = len;
+ memcpy(priv->scan_ssid, req->essid, len);
} else {
priv->scan_ssid_len = 0;
}
diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
index 256039ce561e..81a3370551db 100644
--- a/drivers/staging/media/imx/imx-media-capture.c
+++ b/drivers/staging/media/imx/imx-media-capture.c
@@ -678,7 +678,7 @@ int imx_media_capture_device_register(struct imx_media_video_dev *vdev)
/* setup default format */
fmt_src.pad = priv->src_sd_pad;
fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt_src);
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt_src);
if (ret) {
v4l2_err(sd, "failed to get src_sd format\n");
goto unreg;
diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
index b1036baebb03..d796e754610c 100644
--- a/drivers/staging/media/omap4iss/iss.c
+++ b/drivers/staging/media/omap4iss/iss.c
@@ -1244,8 +1244,10 @@ static int iss_probe(struct platform_device *pdev)
if (ret < 0)
goto error;
- if (!omap4iss_get(iss))
+ if (!omap4iss_get(iss)) {
+ ret = -EINVAL;
goto error;
+ }
ret = iss_reset(iss);
if (ret < 0)
diff --git a/drivers/staging/most/core.c b/drivers/staging/most/core.c
index 25a077f4ea94..08f60ce6293d 100644
--- a/drivers/staging/most/core.c
+++ b/drivers/staging/most/core.c
@@ -1412,7 +1412,7 @@ int most_register_interface(struct most_interface *iface)
INIT_LIST_HEAD(&iface->p->channel_list);
iface->p->dev_id = id;
- strcpy(iface->p->name, iface->description);
+ strscpy(iface->p->name, iface->description, sizeof(iface->p->name));
iface->dev.init_name = iface->p->name;
iface->dev.bus = &mc.bus;
iface->dev.parent = &mc.dev;
diff --git a/drivers/staging/most/sound/sound.c b/drivers/staging/most/sound/sound.c
index 89b02fc305b8..fd9245d7eeb9 100644
--- a/drivers/staging/most/sound/sound.c
+++ b/drivers/staging/most/sound/sound.c
@@ -86,6 +86,8 @@ static void swap_copy24(u8 *dest, const u8 *source, unsigned int bytes)
{
unsigned int i = 0;
+ if (bytes < 2)
+ return;
while (i < bytes - 2) {
dest[i] = source[i + 2];
dest[i + 1] = source[i + 1];
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c
index 448478451c4c..b50788b2d1d9 100644
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.c
+++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c
@@ -934,7 +934,7 @@ static int spinand_probe(struct spi_device *spi_nand)
mtd_set_ooblayout(mtd, &spinand_oob_64_ops);
#endif
- if (nand_scan(mtd, 1))
+ if (nand_scan(chip, 1))
return -ENXIO;
return mtd_device_register(mtd, NULL, 0);
diff --git a/drivers/staging/mt7621-dma/Makefile b/drivers/staging/mt7621-dma/Makefile
index d3152d45cf45..e3ab560ed35a 100644
--- a/drivers/staging/mt7621-dma/Makefile
+++ b/drivers/staging/mt7621-dma/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_DMA_RALINK) += ralink-gdma.o
-obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o
+obj-$(CONFIG_MTK_HSDMA) += hsdma-mt7621.o
ccflags-y += -I$(srctree)/drivers/dma
diff --git a/drivers/staging/mt7621-dma/mtk-hsdma.c b/drivers/staging/mt7621-dma/hsdma-mt7621.c
index 5831f816c17b..f487cbc91dee 100644
--- a/drivers/staging/mt7621-dma/mtk-hsdma.c
+++ b/drivers/staging/mt7621-dma/hsdma-mt7621.c
@@ -723,7 +723,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
ret = dma_async_device_register(dd);
if (ret) {
dev_err(&pdev->dev, "failed to register dma device\n");
- return ret;
+ goto err_uninit_hsdma;
}
ret = of_dma_controller_register(pdev->dev.of_node,
@@ -739,6 +739,8 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
err_unregister:
dma_async_device_unregister(dd);
+err_uninit_hsdma:
+ mtk_hsdma_uninit(hsdma);
return ret;
}
@@ -758,7 +760,7 @@ static struct platform_driver mtk_hsdma_driver = {
.probe = mtk_hsdma_probe,
.remove = mtk_hsdma_remove,
.driver = {
- .name = "hsdma-mt7621",
+ .name = KBUILD_MODNAME,
.of_match_table = mtk_hsdma_of_match,
},
};
diff --git a/drivers/staging/mt7621-mmc/Kconfig b/drivers/staging/mt7621-mmc/Kconfig
deleted file mode 100644
index c6dfe8c637dc..000000000000
--- a/drivers/staging/mt7621-mmc/Kconfig
+++ /dev/null
@@ -1,16 +0,0 @@
-config MTK_MMC
- tristate "MTK SD/MMC"
- depends on !MTD_NAND_RALINK && MMC
-
-config MTK_AEE_KDUMP
- bool "MTK AEE KDUMP"
- depends on MTK_MMC
-
-config MTK_MMC_CD_POLL
- bool "Card Detect with Polling"
- depends on MTK_MMC
-
-config MTK_MMC_EMMC_8BIT
- bool "eMMC 8-bit support"
- depends on MTK_MMC && RALINK_MT7628
-
diff --git a/drivers/staging/mt7621-mmc/Makefile b/drivers/staging/mt7621-mmc/Makefile
deleted file mode 100644
index caead0b54703..000000000000
--- a/drivers/staging/mt7621-mmc/Makefile
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright Statement:
-#
-# This software/firmware and related documentation ("MediaTek Software") are
-# protected under relevant copyright laws. The information contained herein
-# is confidential and proprietary to MediaTek Inc. and/or its licensors.
-# Without the prior written permission of MediaTek inc. and/or its licensors,
-# any reproduction, modification, use or disclosure of MediaTek Software,
-# and information contained herein, in whole or in part, shall be strictly prohibited.
-#
-# MediaTek Inc. (C) 2010. All rights reserved.
-#
-# BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
-# THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
-# RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
-# AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
-# NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
-# SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
-# SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
-# THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
-# THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
-# CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
-# SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
-# STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
-# CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
-# AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
-# OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
-# MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
-#
-# The following software/firmware and/or related documentation ("MediaTek Software")
-# have been modified by MediaTek Inc. All revisions are subject to any receiver's
-# applicable license agreements with MediaTek Inc.
-
-obj-$(CONFIG_MTK_MMC) += mtk_sd.o
-mtk_sd-objs := sd.o dbg.o
-ifeq ($(CONFIG_MTK_AEE_KDUMP),y)
-EXTRA_CFLAGS += -DMT6575_SD_DEBUG
-endif
-
-clean:
- @rm -f *.o modules.order .*.cmd
diff --git a/drivers/staging/mt7621-mmc/TODO b/drivers/staging/mt7621-mmc/TODO
deleted file mode 100644
index febb32d37e07..000000000000
--- a/drivers/staging/mt7621-mmc/TODO
+++ /dev/null
@@ -1,8 +0,0 @@
-
-- general code review and clean up
-- ensure device-tree requirements are documented
-- should probably be merged with drivers/mmc/host/mtk-sd.c
-- possibly fix to work with highmem pages so a bounce buffer isn't
- needed.
-
-Cc: NeilBrown <neil@brown.name>
diff --git a/drivers/staging/mt7621-mmc/board.h b/drivers/staging/mt7621-mmc/board.h
deleted file mode 100644
index 983791ee308d..000000000000
--- a/drivers/staging/mt7621-mmc/board.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/* Copyright Statement:
- *
- * This software/firmware and related documentation ("MediaTek Software") are
- * protected under relevant copyright laws. The information contained herein
- * is confidential and proprietary to MediaTek Inc. and/or its licensors.
- * Without the prior written permission of MediaTek inc. and/or its licensors,
- * any reproduction, modification, use or disclosure of MediaTek Software,
- * and information contained herein, in whole or in part, shall be strictly prohibited.
- */
-/* MediaTek Inc. (C) 2010. All rights reserved.
- *
- * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
- * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
- * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
- * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
- * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
- * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
- * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
- * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
- * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
- * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
- * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
- * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
- * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
- * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
- * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
- * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
- *
- * The following software/firmware and/or related documentation ("MediaTek Software")
- * have been modified by MediaTek Inc. All revisions are subject to any receiver's
- * applicable license agreements with MediaTek Inc.
- */
-
-#ifndef __ARCH_ARM_MACH_BOARD_H
-#define __ARCH_ARM_MACH_BOARD_H
-
-#define MSDC_CD_PIN_EN BIT(0) /* card detection pin is wired */
-#define MSDC_WP_PIN_EN BIT(1) /* write protection pin is wired */
-#define MSDC_RST_PIN_EN BIT(2) /* emmc reset pin is wired */
-#define MSDC_REMOVABLE BIT(5) /* removable slot */
-
-#define MSDC_SMPL_RISING (0)
-#define MSDC_SMPL_FALLING (1)
-
-#define MSDC_CMD_PIN (0)
-#define MSDC_DAT_PIN (1)
-#define MSDC_CD_PIN (2)
-#define MSDC_WP_PIN (3)
-#define MSDC_RST_PIN (4)
-
-struct msdc_hw {
- unsigned char clk_src; /* host clock source */
- unsigned long flags; /* hardware capability flags */
-
- /* config gpio pull mode */
- void (*config_gpio_pin)(int type, int pull);
-};
-
-extern struct msdc_hw msdc0_hw;
-
-#endif /* __ARCH_ARM_MACH_BOARD_H */
diff --git a/drivers/staging/mt7621-mmc/dbg.c b/drivers/staging/mt7621-mmc/dbg.c
deleted file mode 100644
index 6e518dce9029..000000000000
--- a/drivers/staging/mt7621-mmc/dbg.c
+++ /dev/null
@@ -1,307 +0,0 @@
-/* Copyright Statement:
- *
- * This software/firmware and related documentation ("MediaTek Software") are
- * protected under relevant copyright laws. The information contained herein
- * is confidential and proprietary to MediaTek Inc. and/or its licensors.
- * Without the prior written permission of MediaTek inc. and/or its licensors,
- * any reproduction, modification, use or disclosure of MediaTek Software,
- * and information contained herein, in whole or in part, shall be strictly prohibited.
- *
- * MediaTek Inc. (C) 2010. All rights reserved.
- *
- * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
- * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
- * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
- * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
- * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
- * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
- * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
- * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
- * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
- * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
- * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
- * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
- * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
- * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
- * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
- * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
- *
- * The following software/firmware and/or related documentation ("MediaTek Software")
- * have been modified by MediaTek Inc. All revisions are subject to any receiver's
- * applicable license agreements with MediaTek Inc.
- */
-
-#include <linux/version.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/kthread.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/proc_fs.h>
-#include <linux/string.h>
-#include <linux/uaccess.h>
-// #include <mach/mt6575_gpt.h> /* --- by chhung */
-#include "dbg.h"
-#include "mt6575_sd.h"
-#include <linux/seq_file.h>
-
-static char cmd_buf[256];
-
-/* for debug zone */
-unsigned int sd_debug_zone[4] = {
- 0,
- 0,
- 0,
- 0
-};
-
-#if defined(MT6575_SD_DEBUG)
-/* for driver profile */
-#define TICKS_ONE_MS (13000)
-u32 gpt_enable;
-u32 sdio_pro_enable; /* make sure gpt is enabled */
-u32 sdio_pro_time; /* no more than 30s */
-struct sdio_profile sdio_perfomance = {0};
-
-#if 0 /* --- chhung */
-void msdc_init_gpt(void)
-{
- GPT_CONFIG config;
-
- config.num = GPT6;
- config.mode = GPT_FREE_RUN;
- config.clkSrc = GPT_CLK_SRC_SYS;
- config.clkDiv = GPT_CLK_DIV_1; /* 13MHz GPT6 */
-
- if (GPT_Config(config) == FALSE)
- return;
-
- GPT_Start(GPT6);
-}
-#endif /* end of --- */
-
-u32 msdc_time_calc(u32 old_L32, u32 old_H32, u32 new_L32, u32 new_H32)
-{
- u32 ret = 0;
-
- if (new_H32 == old_H32) {
- ret = new_L32 - old_L32;
- } else if (new_H32 == (old_H32 + 1)) {
- if (new_L32 > old_L32)
- pr_debug("msdc old_L<0x%x> new_L<0x%x>\n", old_L32, new_L32);
- ret = (0xffffffff - old_L32);
- ret += new_L32;
- } else {
- pr_debug("msdc old_H<0x%x> new_H<0x%x>\n", old_H32, new_H32);
- }
-
- return ret;
-}
-
-void msdc_sdio_profile(struct sdio_profile *result)
-{
- struct cmd_profile *cmd;
- u32 i;
-
- pr_debug("sdio === performance dump ===\n");
- pr_debug("sdio === total execute tick<%d> time<%dms> Tx<%dB> Rx<%dB>\n",
- result->total_tc, result->total_tc / TICKS_ONE_MS,
- result->total_tx_bytes, result->total_rx_bytes);
-
- /* CMD52 Dump */
- cmd = &result->cmd52_rx;
- pr_debug("sdio === CMD52 Rx <%d>times tick<%d> Max<%d> Min<%d> Aver<%d>\n", cmd->count, cmd->tot_tc,
- cmd->max_tc, cmd->min_tc, cmd->tot_tc / cmd->count);
- cmd = &result->cmd52_tx;
- pr_debug("sdio === CMD52 Tx <%d>times tick<%d> Max<%d> Min<%d> Aver<%d>\n", cmd->count, cmd->tot_tc,
- cmd->max_tc, cmd->min_tc, cmd->tot_tc / cmd->count);
-
- /* CMD53 Rx bytes + block mode */
- for (i = 0; i < 512; i++) {
- cmd = &result->cmd53_rx_byte[i];
- if (cmd->count) {
- pr_debug("sdio<%6d><%3dB>_Rx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n", cmd->count, i, cmd->tot_tc,
- cmd->max_tc, cmd->min_tc, cmd->tot_tc / cmd->count,
- cmd->tot_bytes, (cmd->tot_bytes / 10) * 13 / (cmd->tot_tc / 10));
- }
- }
- for (i = 0; i < 100; i++) {
- cmd = &result->cmd53_rx_blk[i];
- if (cmd->count) {
- pr_debug("sdio<%6d><%3d>B_Rx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n", cmd->count, i, cmd->tot_tc,
- cmd->max_tc, cmd->min_tc, cmd->tot_tc / cmd->count,
- cmd->tot_bytes, (cmd->tot_bytes / 10) * 13 / (cmd->tot_tc / 10));
- }
- }
-
- /* CMD53 Tx bytes + block mode */
- for (i = 0; i < 512; i++) {
- cmd = &result->cmd53_tx_byte[i];
- if (cmd->count) {
- pr_debug("sdio<%6d><%3dB>_Tx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n", cmd->count, i, cmd->tot_tc,
- cmd->max_tc, cmd->min_tc, cmd->tot_tc / cmd->count,
- cmd->tot_bytes, (cmd->tot_bytes / 10) * 13 / (cmd->tot_tc / 10));
- }
- }
- for (i = 0; i < 100; i++) {
- cmd = &result->cmd53_tx_blk[i];
- if (cmd->count) {
- pr_debug("sdio<%6d><%3d>B_Tx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n", cmd->count, i, cmd->tot_tc,
- cmd->max_tc, cmd->min_tc, cmd->tot_tc / cmd->count,
- cmd->tot_bytes, (cmd->tot_bytes / 10) * 13 / (cmd->tot_tc / 10));
- }
- }
-
- pr_debug("sdio === performance dump done ===\n");
-}
-
-//========= sdio command table ===========
-void msdc_performance(u32 opcode, u32 sizes, u32 bRx, u32 ticks)
-{
- struct sdio_profile *result = &sdio_perfomance;
- struct cmd_profile *cmd;
- u32 block;
-
- if (sdio_pro_enable == 0)
- return;
-
- if (opcode == 52) {
- cmd = bRx ? &result->cmd52_rx : &result->cmd52_tx;
- } else if (opcode == 53) {
- if (sizes < 512) {
- cmd = bRx ? &result->cmd53_rx_byte[sizes] : &result->cmd53_tx_byte[sizes];
- } else {
- block = sizes / 512;
- if (block >= 99) {
- pr_err("cmd53 error blocks\n");
- while (1)
- ;
- }
- cmd = bRx ? &result->cmd53_rx_blk[block] : &result->cmd53_tx_blk[block];
- }
- } else {
- return;
- }
-
- /* update the members */
- if (ticks > cmd->max_tc)
- cmd->max_tc = ticks;
- if (cmd->min_tc == 0 || ticks < cmd->min_tc)
- cmd->min_tc = ticks;
- cmd->tot_tc += ticks;
- cmd->tot_bytes += sizes;
- cmd->count++;
-
- if (bRx)
- result->total_rx_bytes += sizes;
- else
- result->total_tx_bytes += sizes;
- result->total_tc += ticks;
-
- /* dump when total_tc > 30s */
- if (result->total_tc >= sdio_pro_time * TICKS_ONE_MS * 1000) {
- msdc_sdio_profile(result);
- memset(result, 0, sizeof(struct sdio_profile));
- }
-}
-
-//========== driver proc interface ===========
-static int msdc_debug_proc_read(struct seq_file *s, void *p)
-{
- seq_puts(s, "\n=========================================\n");
- seq_puts(s, "Index<0> + Id + Zone\n");
- seq_puts(s, "-> PWR<9> WRN<8> | FIO<7> OPS<6> FUN<5> CFG<4> | INT<3> RSP<2> CMD<1> DMA<0>\n");
- seq_puts(s, "-> echo 0 3 0x3ff >msdc_bebug -> host[3] debug zone set to 0x3ff\n");
- seq_printf(s, "-> MSDC[0] Zone: 0x%.8x\n", sd_debug_zone[0]);
- seq_printf(s, "-> MSDC[1] Zone: 0x%.8x\n", sd_debug_zone[1]);
- seq_printf(s, "-> MSDC[2] Zone: 0x%.8x\n", sd_debug_zone[2]);
- seq_printf(s, "-> MSDC[3] Zone: 0x%.8x\n", sd_debug_zone[3]);
-
- seq_puts(s, "Index<3> + SDIO_PROFILE + TIME\n");
- seq_puts(s, "-> echo 3 1 0x1E >msdc_bebug -> enable sdio_profile, 30s\n");
- seq_printf(s, "-> SDIO_PROFILE<%d> TIME<%ds>\n", sdio_pro_enable, sdio_pro_time);
- seq_puts(s, "=========================================\n\n");
-
- return 0;
-}
-
-static ssize_t msdc_debug_proc_write(struct file *file,
- const char __user *buf,
- size_t count, loff_t *data)
-{
- int ret;
-
- int cmd, p1, p2;
- int id, zone;
- int mode, size;
-
- if (count == 0)
- return -1;
- if (count > 255)
- count = 255;
-
- if (copy_from_user(cmd_buf, buf, count))
- return -EFAULT;
-
- cmd_buf[count] = '\0';
- pr_debug("msdc Write %s\n", cmd_buf);
-
- sscanf(cmd_buf, "%x %x %x", &cmd, &p1, &p2);
-
- if (cmd == SD_TOOL_ZONE) {
- id = p1;
- zone = p2;
- zone &= 0x3ff;
- pr_debug("msdc host_id<%d> zone<0x%.8x>\n", id, zone);
- if (id >= 0 && id <= 3) {
- sd_debug_zone[id] = zone;
- } else if (id == 4) {
- sd_debug_zone[0] = sd_debug_zone[1] = zone;
- sd_debug_zone[2] = sd_debug_zone[3] = zone;
- } else {
- pr_err("msdc host_id error when set debug zone\n");
- }
- } else if (cmd == SD_TOOL_SDIO_PROFILE) {
- if (p1 == 1) { /* enable profile */
- if (gpt_enable == 0) {
- // msdc_init_gpt(); /* --- by chhung */
- gpt_enable = 1;
- }
- sdio_pro_enable = 1;
- if (p2 == 0)
- p2 = 1;
- if (p2 >= 30)
- p2 = 30;
- sdio_pro_time = p2;
- } else if (p1 == 0) {
- /* todo */
- sdio_pro_enable = 0;
- }
- }
-
- return count;
-}
-
-static int msdc_debug_show(struct inode *inode, struct file *file)
-{
- return single_open(file, msdc_debug_proc_read, NULL);
-}
-
-static const struct file_operations msdc_debug_fops = {
- .owner = THIS_MODULE,
- .open = msdc_debug_show,
- .read = seq_read,
- .write = msdc_debug_proc_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-void msdc_debug_proc_init(void)
-{
- proc_create("msdc_debug", 0660, NULL, &msdc_debug_fops);
-}
-EXPORT_SYMBOL_GPL(msdc_debug_proc_init);
-#endif
diff --git a/drivers/staging/mt7621-mmc/dbg.h b/drivers/staging/mt7621-mmc/dbg.h
deleted file mode 100644
index 2f2c56b73987..000000000000
--- a/drivers/staging/mt7621-mmc/dbg.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/* Copyright Statement:
- *
- * This software/firmware and related documentation ("MediaTek Software") are
- * protected under relevant copyright laws. The information contained herein
- * is confidential and proprietary to MediaTek Inc. and/or its licensors.
- * Without the prior written permission of MediaTek inc. and/or its licensors,
- * any reproduction, modification, use or disclosure of MediaTek Software,
- * and information contained herein, in whole or in part, shall be strictly prohibited.
- *
- * MediaTek Inc. (C) 2010. All rights reserved.
- *
- * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
- * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
- * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
- * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
- * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
- * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
- * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
- * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
- * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
- * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
- * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
- * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
- * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
- * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
- * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
- * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
- *
- * The following software/firmware and/or related documentation ("MediaTek Software")
- * have been modified by MediaTek Inc. All revisions are subject to any receiver's
- * applicable license agreements with MediaTek Inc.
- */
-#ifndef __MT_MSDC_DEUBG__
-#define __MT_MSDC_DEUBG__
-
-//==========================
-extern u32 sdio_pro_enable;
-/* for a type command, e.g. CMD53, 2 blocks */
-struct cmd_profile {
- u32 max_tc; /* Max tick count */
- u32 min_tc;
- u32 tot_tc; /* total tick count */
- u32 tot_bytes;
- u32 count; /* the counts of the command */
-};
-
-/* dump when total_tc and total_bytes */
-struct sdio_profile {
- u32 total_tc; /* total tick count of CMD52 and CMD53 */
- u32 total_tx_bytes; /* total bytes of CMD53 Tx */
- u32 total_rx_bytes; /* total bytes of CMD53 Rx */
-
- /*CMD52*/
- struct cmd_profile cmd52_tx;
- struct cmd_profile cmd52_rx;
-
- /*CMD53 in byte unit */
- struct cmd_profile cmd53_tx_byte[512];
- struct cmd_profile cmd53_rx_byte[512];
-
- /*CMD53 in block unit */
- struct cmd_profile cmd53_tx_blk[100];
- struct cmd_profile cmd53_rx_blk[100];
-};
-
-//==========================
-enum msdc_dbg {
- SD_TOOL_ZONE = 0,
- SD_TOOL_DMA_SIZE = 1,
- SD_TOOL_PM_ENABLE = 2,
- SD_TOOL_SDIO_PROFILE = 3,
-};
-
-/* Debug message event */
-#define DBG_EVT_NONE (0) /* No event */
-#define DBG_EVT_DMA (1 << 0) /* DMA related event */
-#define DBG_EVT_CMD (1 << 1) /* MSDC CMD related event */
-#define DBG_EVT_RSP (1 << 2) /* MSDC CMD RSP related event */
-#define DBG_EVT_INT (1 << 3) /* MSDC INT event */
-#define DBG_EVT_CFG (1 << 4) /* MSDC CFG event */
-#define DBG_EVT_FUC (1 << 5) /* Function event */
-#define DBG_EVT_OPS (1 << 6) /* Read/Write operation event */
-#define DBG_EVT_FIO (1 << 7) /* FIFO operation event */
-#define DBG_EVT_WRN (1 << 8) /* Warning event */
-#define DBG_EVT_PWR (1 << 9) /* Power event */
-#define DBG_EVT_ALL (0xffffffff)
-
-#define DBG_EVT_MASK (DBG_EVT_ALL)
-
-extern unsigned int sd_debug_zone[4];
-#define TAG "msdc"
-#if 0 /* +++ chhung */
-#define BUG_ON(x) \
-do { \
- if (x) { \
- printk("[BUG] %s LINE:%d FILE:%s\n", #x, __LINE__, __FILE__); \
- while (1) \
- ; \
- } \
-} while (0)
-#endif /* end of +++ */
-
-#define N_MSG(evt, fmt, args...)
-/*
-do { \
- if ((DBG_EVT_##evt) & sd_debug_zone[host->id]) { \
- printk(KERN_ERR TAG"%d -> "fmt" <- %s() : L<%d> PID<%s><0x%x>\n", \
- host->id, ##args , __FUNCTION__, __LINE__, current->comm, current->pid); \
- } \
-} while(0)
-*/
-
-#define ERR_MSG(fmt, args...) \
-do { \
- printk(KERN_ERR TAG"%d -> "fmt" <- %s() : L<%d> PID<%s><0x%x>\n", \
- host->id, ##args, __FUNCTION__, __LINE__, current->comm, current->pid); \
-} while (0);
-
-#if 1
-//defined CONFIG_MTK_MMC_CD_POLL
-#define INIT_MSG(fmt, args...)
-#define IRQ_MSG(fmt, args...)
-#else
-#define INIT_MSG(fmt, args...) \
-do { \
- printk(KERN_ERR TAG"%d -> "fmt" <- %s() : L<%d> PID<%s><0x%x>\n", \
- host->id, ##args, __FUNCTION__, __LINE__, current->comm, current->pid); \
-} while (0);
-
-/* PID in ISR in not corrent */
-#define IRQ_MSG(fmt, args...) \
-do { \
- printk(KERN_ERR TAG"%d -> "fmt" <- %s() : L<%d>\n", \
- host->id, ##args, __FUNCTION__, __LINE__); \
-} while (0);
-#endif
-
-void msdc_debug_proc_init(void);
-
-#if 0 /* --- chhung */
-void msdc_init_gpt(void);
-extern void GPT_GetCounter64(UINT32 *cntL32, UINT32 *cntH32);
-#endif /* end of --- */
-u32 msdc_time_calc(u32 old_L32, u32 old_H32, u32 new_L32, u32 new_H32);
-void msdc_performance(u32 opcode, u32 sizes, u32 bRx, u32 ticks);
-
-#endif
diff --git a/drivers/staging/mt7621-mmc/mt6575_sd.h b/drivers/staging/mt7621-mmc/mt6575_sd.h
deleted file mode 100644
index 4e287c140acb..000000000000
--- a/drivers/staging/mt7621-mmc/mt6575_sd.h
+++ /dev/null
@@ -1,488 +0,0 @@
-/* Copyright Statement:
- *
- * This software/firmware and related documentation ("MediaTek Software") are
- * protected under relevant copyright laws. The information contained herein
- * is confidential and proprietary to MediaTek Inc. and/or its licensors.
- * Without the prior written permission of MediaTek inc. and/or its licensors,
- * any reproduction, modification, use or disclosure of MediaTek Software,
- * and information contained herein, in whole or in part, shall be strictly prohibited.
- */
-/* MediaTek Inc. (C) 2010. All rights reserved.
- *
- * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
- * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
- * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
- * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
- * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
- * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
- * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
- * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
- * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
- * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
- * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
- * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
- * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
- * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
- * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
- * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
- *
- * The following software/firmware and/or related documentation ("MediaTek Software")
- * have been modified by MediaTek Inc. All revisions are subject to any receiver's
- * applicable license agreements with MediaTek Inc.
- */
-
-#ifndef MT6575_SD_H
-#define MT6575_SD_H
-
-#include <linux/bitops.h>
-#include <linux/mmc/host.h>
-
-// #include <mach/mt6575_reg_base.h> /* --- by chhung */
-
-/*--------------------------------------------------------------------------*/
-/* Common Definition */
-/*--------------------------------------------------------------------------*/
-#define MSDC_FIFO_SZ (128)
-#define MSDC_FIFO_THD (64) // (128)
-#define MSDC_NUM (4)
-
-#define MSDC_MS (0)
-#define MSDC_SDMMC (1)
-
-#define MSDC_BUS_1BITS (0)
-#define MSDC_BUS_4BITS (1)
-#define MSDC_BUS_8BITS (2)
-
-#define MSDC_BRUST_8B (3)
-#define MSDC_BRUST_16B (4)
-#define MSDC_BRUST_32B (5)
-#define MSDC_BRUST_64B (6)
-
-#define MSDC_PIN_PULL_NONE (0)
-#define MSDC_PIN_PULL_DOWN (1)
-#define MSDC_PIN_PULL_UP (2)
-#define MSDC_PIN_KEEP (3)
-
-#define MSDC_MAX_SCLK (48000000) /* +/- by chhung */
-#define MSDC_MIN_SCLK (260000)
-
-#define MSDC_AUTOCMD12 (0x0001)
-#define MSDC_AUTOCMD23 (0x0002)
-#define MSDC_AUTOCMD19 (0x0003)
-
-#define MSDC_EMMC_BOOTMODE0 (0) /* Pull low CMD mode */
-#define MSDC_EMMC_BOOTMODE1 (1) /* Reset CMD mode */
-
-enum {
- RESP_NONE = 0,
- RESP_R1,
- RESP_R2,
- RESP_R3,
- RESP_R4,
- RESP_R5,
- RESP_R6,
- RESP_R7,
- RESP_R1B
-};
-
-/*--------------------------------------------------------------------------*/
-/* Register Offset */
-/*--------------------------------------------------------------------------*/
-#define MSDC_CFG (0x0)
-#define MSDC_IOCON (0x04)
-#define MSDC_PS (0x08)
-#define MSDC_INT (0x0c)
-#define MSDC_INTEN (0x10)
-#define MSDC_FIFOCS (0x14)
-#define MSDC_TXDATA (0x18)
-#define MSDC_RXDATA (0x1c)
-#define SDC_CFG (0x30)
-#define SDC_CMD (0x34)
-#define SDC_ARG (0x38)
-#define SDC_STS (0x3c)
-#define SDC_RESP0 (0x40)
-#define SDC_RESP1 (0x44)
-#define SDC_RESP2 (0x48)
-#define SDC_RESP3 (0x4c)
-#define SDC_BLK_NUM (0x50)
-#define SDC_CSTS (0x58)
-#define SDC_CSTS_EN (0x5c)
-#define SDC_DCRC_STS (0x60)
-#define EMMC_CFG0 (0x70)
-#define EMMC_CFG1 (0x74)
-#define EMMC_STS (0x78)
-#define EMMC_IOCON (0x7c)
-#define SDC_ACMD_RESP (0x80)
-#define SDC_ACMD19_TRG (0x84)
-#define SDC_ACMD19_STS (0x88)
-#define MSDC_DMA_SA (0x90)
-#define MSDC_DMA_CA (0x94)
-#define MSDC_DMA_CTRL (0x98)
-#define MSDC_DMA_CFG (0x9c)
-#define MSDC_DBG_SEL (0xa0)
-#define MSDC_DBG_OUT (0xa4)
-#define MSDC_PATCH_BIT (0xb0)
-#define MSDC_PATCH_BIT0 MSDC_PATCH_BIT
-#define MSDC_PATCH_BIT1 (0xb4)
-#define MSDC_PAD_CTL0 (0xe0)
-#define MSDC_PAD_CTL1 (0xe4)
-#define MSDC_PAD_CTL2 (0xe8)
-#define MSDC_PAD_TUNE (0xec)
-#define MSDC_DAT_RDDLY0 (0xf0)
-#define MSDC_DAT_RDDLY1 (0xf4)
-#define MSDC_HW_DBG (0xf8)
-#define MSDC_VERSION (0x100)
-#define MSDC_ECO_VER (0x104)
-
-/*--------------------------------------------------------------------------*/
-/* Register Mask */
-/*--------------------------------------------------------------------------*/
-
-/* MSDC_CFG mask */
-#define MSDC_CFG_MODE (0x1 << 0) /* RW */
-#define MSDC_CFG_CKPDN (0x1 << 1) /* RW */
-#define MSDC_CFG_RST (0x1 << 2) /* RW */
-#define MSDC_CFG_PIO (0x1 << 3) /* RW */
-#define MSDC_CFG_CKDRVEN (0x1 << 4) /* RW */
-#define MSDC_CFG_BV18SDT (0x1 << 5) /* RW */
-#define MSDC_CFG_BV18PSS (0x1 << 6) /* R */
-#define MSDC_CFG_CKSTB (0x1 << 7) /* R */
-#define MSDC_CFG_CKDIV (0xff << 8) /* RW */
-#define MSDC_CFG_CKMOD (0x3 << 16) /* RW */
-
-/* MSDC_IOCON mask */
-#define MSDC_IOCON_SDR104CKS (0x1 << 0) /* RW */
-#define MSDC_IOCON_RSPL (0x1 << 1) /* RW */
-#define MSDC_IOCON_DSPL (0x1 << 2) /* RW */
-#define MSDC_IOCON_DDLSEL (0x1 << 3) /* RW */
-#define MSDC_IOCON_DDR50CKD (0x1 << 4) /* RW */
-#define MSDC_IOCON_DSPLSEL (0x1 << 5) /* RW */
-#define MSDC_IOCON_D0SPL (0x1 << 16) /* RW */
-#define MSDC_IOCON_D1SPL (0x1 << 17) /* RW */
-#define MSDC_IOCON_D2SPL (0x1 << 18) /* RW */
-#define MSDC_IOCON_D3SPL (0x1 << 19) /* RW */
-#define MSDC_IOCON_D4SPL (0x1 << 20) /* RW */
-#define MSDC_IOCON_D5SPL (0x1 << 21) /* RW */
-#define MSDC_IOCON_D6SPL (0x1 << 22) /* RW */
-#define MSDC_IOCON_D7SPL (0x1 << 23) /* RW */
-#define MSDC_IOCON_RISCSZ (0x3 << 24) /* RW */
-
-/* MSDC_PS mask */
-#define MSDC_PS_CDEN (0x1 << 0) /* RW */
-#define MSDC_PS_CDSTS (0x1 << 1) /* R */
-#define MSDC_PS_CDDEBOUNCE (0xf << 12) /* RW */
-#define MSDC_PS_DAT (0xff << 16) /* R */
-#define MSDC_PS_CMD (0x1 << 24) /* R */
-#define MSDC_PS_WP (0x1UL << 31) /* R */
-
-/* MSDC_INT mask */
-#define MSDC_INT_MMCIRQ (0x1 << 0) /* W1C */
-#define MSDC_INT_CDSC (0x1 << 1) /* W1C */
-#define MSDC_INT_ACMDRDY (0x1 << 3) /* W1C */
-#define MSDC_INT_ACMDTMO (0x1 << 4) /* W1C */
-#define MSDC_INT_ACMDCRCERR (0x1 << 5) /* W1C */
-#define MSDC_INT_DMAQ_EMPTY (0x1 << 6) /* W1C */
-#define MSDC_INT_SDIOIRQ (0x1 << 7) /* W1C */
-#define MSDC_INT_CMDRDY (0x1 << 8) /* W1C */
-#define MSDC_INT_CMDTMO (0x1 << 9) /* W1C */
-#define MSDC_INT_RSPCRCERR (0x1 << 10) /* W1C */
-#define MSDC_INT_CSTA (0x1 << 11) /* R */
-#define MSDC_INT_XFER_COMPL (0x1 << 12) /* W1C */
-#define MSDC_INT_DXFER_DONE (0x1 << 13) /* W1C */
-#define MSDC_INT_DATTMO (0x1 << 14) /* W1C */
-#define MSDC_INT_DATCRCERR (0x1 << 15) /* W1C */
-#define MSDC_INT_ACMD19_DONE (0x1 << 16) /* W1C */
-
-/* MSDC_INTEN mask */
-#define MSDC_INTEN_MMCIRQ (0x1 << 0) /* RW */
-#define MSDC_INTEN_CDSC (0x1 << 1) /* RW */
-#define MSDC_INTEN_ACMDRDY (0x1 << 3) /* RW */
-#define MSDC_INTEN_ACMDTMO (0x1 << 4) /* RW */
-#define MSDC_INTEN_ACMDCRCERR (0x1 << 5) /* RW */
-#define MSDC_INTEN_DMAQ_EMPTY (0x1 << 6) /* RW */
-#define MSDC_INTEN_SDIOIRQ (0x1 << 7) /* RW */
-#define MSDC_INTEN_CMDRDY (0x1 << 8) /* RW */
-#define MSDC_INTEN_CMDTMO (0x1 << 9) /* RW */
-#define MSDC_INTEN_RSPCRCERR (0x1 << 10) /* RW */
-#define MSDC_INTEN_CSTA (0x1 << 11) /* RW */
-#define MSDC_INTEN_XFER_COMPL (0x1 << 12) /* RW */
-#define MSDC_INTEN_DXFER_DONE (0x1 << 13) /* RW */
-#define MSDC_INTEN_DATTMO (0x1 << 14) /* RW */
-#define MSDC_INTEN_DATCRCERR (0x1 << 15) /* RW */
-#define MSDC_INTEN_ACMD19_DONE (0x1 << 16) /* RW */
-
-/* MSDC_FIFOCS mask */
-#define MSDC_FIFOCS_RXCNT (0xff << 0) /* R */
-#define MSDC_FIFOCS_TXCNT (0xff << 16) /* R */
-#define MSDC_FIFOCS_CLR (0x1UL << 31) /* RW */
-
-/* SDC_CFG mask */
-#define SDC_CFG_SDIOINTWKUP (0x1 << 0) /* RW */
-#define SDC_CFG_INSWKUP (0x1 << 1) /* RW */
-#define SDC_CFG_BUSWIDTH (0x3 << 16) /* RW */
-#define SDC_CFG_SDIO (0x1 << 19) /* RW */
-#define SDC_CFG_SDIOIDE (0x1 << 20) /* RW */
-#define SDC_CFG_INTATGAP (0x1 << 21) /* RW */
-#define SDC_CFG_DTOC (0xffUL << 24) /* RW */
-
-/* SDC_CMD mask */
-#define SDC_CMD_OPC (0x3f << 0) /* RW */
-#define SDC_CMD_BRK (0x1 << 6) /* RW */
-#define SDC_CMD_RSPTYP (0x7 << 7) /* RW */
-#define SDC_CMD_DTYP (0x3 << 11) /* RW */
-#define SDC_CMD_DTYP (0x3 << 11) /* RW */
-#define SDC_CMD_RW (0x1 << 13) /* RW */
-#define SDC_CMD_STOP (0x1 << 14) /* RW */
-#define SDC_CMD_GOIRQ (0x1 << 15) /* RW */
-#define SDC_CMD_BLKLEN (0xfff << 16) /* RW */
-#define SDC_CMD_AUTOCMD (0x3 << 28) /* RW */
-#define SDC_CMD_VOLSWTH (0x1 << 30) /* RW */
-
-/* SDC_STS mask */
-#define SDC_STS_SDCBUSY (0x1 << 0) /* RW */
-#define SDC_STS_CMDBUSY (0x1 << 1) /* RW */
-#define SDC_STS_SWR_COMPL (0x1 << 31) /* RW */
-
-/* SDC_DCRC_STS mask */
-#define SDC_DCRC_STS_NEG (0xf << 8) /* RO */
-#define SDC_DCRC_STS_POS (0xff << 0) /* RO */
-
-/* EMMC_CFG0 mask */
-#define EMMC_CFG0_BOOTSTART (0x1 << 0) /* W */
-#define EMMC_CFG0_BOOTSTOP (0x1 << 1) /* W */
-#define EMMC_CFG0_BOOTMODE (0x1 << 2) /* RW */
-#define EMMC_CFG0_BOOTACKDIS (0x1 << 3) /* RW */
-#define EMMC_CFG0_BOOTWDLY (0x7 << 12) /* RW */
-#define EMMC_CFG0_BOOTSUPP (0x1 << 15) /* RW */
-
-/* EMMC_CFG1 mask */
-#define EMMC_CFG1_BOOTDATTMC (0xfffff << 0) /* RW */
-#define EMMC_CFG1_BOOTACKTMC (0xfffUL << 20) /* RW */
-
-/* EMMC_STS mask */
-#define EMMC_STS_BOOTCRCERR (0x1 << 0) /* W1C */
-#define EMMC_STS_BOOTACKERR (0x1 << 1) /* W1C */
-#define EMMC_STS_BOOTDATTMO (0x1 << 2) /* W1C */
-#define EMMC_STS_BOOTACKTMO (0x1 << 3) /* W1C */
-#define EMMC_STS_BOOTUPSTATE (0x1 << 4) /* R */
-#define EMMC_STS_BOOTACKRCV (0x1 << 5) /* W1C */
-#define EMMC_STS_BOOTDATRCV (0x1 << 6) /* R */
-
-/* EMMC_IOCON mask */
-#define EMMC_IOCON_BOOTRST (0x1 << 0) /* RW */
-
-/* SDC_ACMD19_TRG mask */
-#define SDC_ACMD19_TRG_TUNESEL (0xf << 0) /* RW */
-
-/* MSDC_DMA_CTRL mask */
-#define MSDC_DMA_CTRL_START (0x1 << 0) /* W */
-#define MSDC_DMA_CTRL_STOP (0x1 << 1) /* W */
-#define MSDC_DMA_CTRL_RESUME (0x1 << 2) /* W */
-#define MSDC_DMA_CTRL_MODE (0x1 << 8) /* RW */
-#define MSDC_DMA_CTRL_LASTBUF (0x1 << 10) /* RW */
-#define MSDC_DMA_CTRL_BRUSTSZ (0x7 << 12) /* RW */
-#define MSDC_DMA_CTRL_XFERSZ (0xffffUL << 16)/* RW */
-
-/* MSDC_DMA_CFG mask */
-#define MSDC_DMA_CFG_STS (0x1 << 0) /* R */
-#define MSDC_DMA_CFG_DECSEN (0x1 << 1) /* RW */
-#define MSDC_DMA_CFG_BDCSERR (0x1 << 4) /* R */
-#define MSDC_DMA_CFG_GPDCSERR (0x1 << 5) /* R */
-
-/* MSDC_PATCH_BIT mask */
-#define MSDC_PATCH_BIT_WFLSMODE (0x1 << 0) /* RW */
-#define MSDC_PATCH_BIT_ODDSUPP (0x1 << 1) /* RW */
-#define MSDC_PATCH_BIT_CKGEN_CK (0x1 << 6) /* E2: Fixed to 1 */
-#define MSDC_PATCH_BIT_IODSSEL (0x1 << 16) /* RW */
-#define MSDC_PATCH_BIT_IOINTSEL (0x1 << 17) /* RW */
-#define MSDC_PATCH_BIT_BUSYDLY (0xf << 18) /* RW */
-#define MSDC_PATCH_BIT_WDOD (0xf << 22) /* RW */
-#define MSDC_PATCH_BIT_IDRTSEL (0x1 << 26) /* RW */
-#define MSDC_PATCH_BIT_CMDFSEL (0x1 << 27) /* RW */
-#define MSDC_PATCH_BIT_INTDLSEL (0x1 << 28) /* RW */
-#define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */
-#define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */
-
-/* MSDC_PATCH_BIT1 mask */
-#define MSDC_PATCH_BIT1_WRDAT_CRCS (0x7 << 3)
-#define MSDC_PATCH_BIT1_CMD_RSP (0x7 << 0)
-
-/* MSDC_PAD_CTL0 mask */
-#define MSDC_PAD_CTL0_CLKDRVN (0x7 << 0) /* RW */
-#define MSDC_PAD_CTL0_CLKDRVP (0x7 << 4) /* RW */
-#define MSDC_PAD_CTL0_CLKSR (0x1 << 8) /* RW */
-#define MSDC_PAD_CTL0_CLKPD (0x1 << 16) /* RW */
-#define MSDC_PAD_CTL0_CLKPU (0x1 << 17) /* RW */
-#define MSDC_PAD_CTL0_CLKSMT (0x1 << 18) /* RW */
-#define MSDC_PAD_CTL0_CLKIES (0x1 << 19) /* RW */
-#define MSDC_PAD_CTL0_CLKTDSEL (0xf << 20) /* RW */
-#define MSDC_PAD_CTL0_CLKRDSEL (0xffUL << 24) /* RW */
-
-/* MSDC_PAD_CTL1 mask */
-#define MSDC_PAD_CTL1_CMDDRVN (0x7 << 0) /* RW */
-#define MSDC_PAD_CTL1_CMDDRVP (0x7 << 4) /* RW */
-#define MSDC_PAD_CTL1_CMDSR (0x1 << 8) /* RW */
-#define MSDC_PAD_CTL1_CMDPD (0x1 << 16) /* RW */
-#define MSDC_PAD_CTL1_CMDPU (0x1 << 17) /* RW */
-#define MSDC_PAD_CTL1_CMDSMT (0x1 << 18) /* RW */
-#define MSDC_PAD_CTL1_CMDIES (0x1 << 19) /* RW */
-#define MSDC_PAD_CTL1_CMDTDSEL (0xf << 20) /* RW */
-#define MSDC_PAD_CTL1_CMDRDSEL (0xffUL << 24) /* RW */
-
-/* MSDC_PAD_CTL2 mask */
-#define MSDC_PAD_CTL2_DATDRVN (0x7 << 0) /* RW */
-#define MSDC_PAD_CTL2_DATDRVP (0x7 << 4) /* RW */
-#define MSDC_PAD_CTL2_DATSR (0x1 << 8) /* RW */
-#define MSDC_PAD_CTL2_DATPD (0x1 << 16) /* RW */
-#define MSDC_PAD_CTL2_DATPU (0x1 << 17) /* RW */
-#define MSDC_PAD_CTL2_DATIES (0x1 << 19) /* RW */
-#define MSDC_PAD_CTL2_DATSMT (0x1 << 18) /* RW */
-#define MSDC_PAD_CTL2_DATTDSEL (0xf << 20) /* RW */
-#define MSDC_PAD_CTL2_DATRDSEL (0xffUL << 24) /* RW */
-
-/* MSDC_PAD_TUNE mask */
-#define MSDC_PAD_TUNE_DATWRDLY (0x1F << 0) /* RW */
-#define MSDC_PAD_TUNE_DATRRDLY (0x1F << 8) /* RW */
-#define MSDC_PAD_TUNE_CMDRDLY (0x1F << 16) /* RW */
-#define MSDC_PAD_TUNE_CMDRRDLY (0x1FUL << 22) /* RW */
-#define MSDC_PAD_TUNE_CLKTXDLY (0x1FUL << 27) /* RW */
-
-/* MSDC_DAT_RDDLY0/1 mask */
-#define MSDC_DAT_RDDLY0_D0 (0x1F << 0) /* RW */
-#define MSDC_DAT_RDDLY0_D1 (0x1F << 8) /* RW */
-#define MSDC_DAT_RDDLY0_D2 (0x1F << 16) /* RW */
-#define MSDC_DAT_RDDLY0_D3 (0x1F << 24) /* RW */
-
-#define MSDC_DAT_RDDLY1_D4 (0x1F << 0) /* RW */
-#define MSDC_DAT_RDDLY1_D5 (0x1F << 8) /* RW */
-#define MSDC_DAT_RDDLY1_D6 (0x1F << 16) /* RW */
-#define MSDC_DAT_RDDLY1_D7 (0x1F << 24) /* RW */
-
-#define MSDC_CKGEN_MSDC_DLY_SEL (0x1F << 10)
-#define MSDC_INT_DAT_LATCH_CK_SEL (0x7 << 7)
-#define MSDC_CKGEN_MSDC_CK_SEL (0x1 << 6)
-#define CARD_READY_FOR_DATA (1 << 8)
-#define CARD_CURRENT_STATE(x) ((x & 0x00001E00) >> 9)
-
-/*--------------------------------------------------------------------------*/
-/* Descriptor Structure */
-/*--------------------------------------------------------------------------*/
-struct gpd {
- u32 hwo:1; /* could be changed by hw */
- u32 bdp:1;
- u32 rsv0:6;
- u32 chksum:8;
- u32 intr:1;
- u32 rsv1:15;
- void *next;
- void *ptr;
- u32 buflen:16;
- u32 extlen:8;
- u32 rsv2:8;
- u32 arg;
- u32 blknum;
- u32 cmd;
-};
-
-struct bd {
- u32 eol:1;
- u32 rsv0:7;
- u32 chksum:8;
- u32 rsv1:1;
- u32 blkpad:1;
- u32 dwpad:1;
- u32 rsv2:13;
- void *next;
- void *ptr;
- u32 buflen:16;
- u32 rsv3:16;
-};
-
-struct msdc_dma {
- struct gpd *gpd; /* pointer to gpd array */
- struct bd *bd; /* pointer to bd array */
- dma_addr_t gpd_addr; /* the physical address of gpd array */
- dma_addr_t bd_addr; /* the physical address of bd array */
-};
-
-struct msdc_host {
- struct msdc_hw *hw;
-
- struct mmc_host *mmc; /* mmc structure */
- struct mmc_command *cmd;
- struct mmc_data *data;
- struct mmc_request *mrq;
- int cmd_rsp;
-
- int error;
- spinlock_t lock; /* mutex */
- struct semaphore sem;
-
- u32 blksz; /* host block size */
- void __iomem *base; /* host base address */
- int id; /* host id */
- int pwr_ref; /* core power reference count */
-
- u32 xfer_size; /* total transferred size */
-
- struct msdc_dma dma; /* dma channel */
- u32 dma_xfer_size; /* dma transfer size in bytes */
-
- u32 timeout_ns; /* data timeout ns */
- u32 timeout_clks; /* data timeout clks */
-
- int irq; /* host interrupt */
-
- struct delayed_work card_delaywork;
-
- struct completion cmd_done;
- struct completion xfer_done;
- struct pm_message pm_state;
-
- u32 mclk; /* mmc subsystem clock */
- u32 hclk; /* host clock speed */
- u32 sclk; /* SD/MS clock speed */
- u8 core_clkon; /* Host core clock on ? */
- u8 card_clkon; /* Card clock on ? */
- u8 core_power; /* core power */
- u8 power_mode; /* host power mode */
- u8 card_inserted; /* card inserted ? */
- u8 suspend; /* host suspended ? */
- u8 app_cmd; /* for app command */
- u32 app_cmd_arg;
-};
-
-static inline void sdr_set_bits(void __iomem *reg, u32 bs)
-{
- u32 val = readl(reg);
-
- val |= bs;
- writel(val, reg);
-}
-
-static inline void sdr_clr_bits(void __iomem *reg, u32 bs)
-{
- u32 val = readl(reg);
-
- val &= ~bs;
- writel(val, reg);
-}
-
-static inline void sdr_set_field(void __iomem *reg, u32 field, u32 val)
-{
- unsigned int tv = readl(reg);
-
- tv &= ~field;
- tv |= ((val) << (ffs((unsigned int)field) - 1));
- writel(tv, reg);
-}
-
-static inline void sdr_get_field(void __iomem *reg, u32 field, u32 *val)
-{
- unsigned int tv = readl(reg);
- *val = ((tv & field) >> (ffs((unsigned int)field) - 1));
-}
-
-#endif
diff --git a/drivers/staging/mt7621-mmc/sd.c b/drivers/staging/mt7621-mmc/sd.c
deleted file mode 100644
index 04d23cc7cd4a..000000000000
--- a/drivers/staging/mt7621-mmc/sd.c
+++ /dev/null
@@ -1,2392 +0,0 @@
-/* Copyright Statement:
- *
- * This software/firmware and related documentation ("MediaTek Software") are
- * protected under relevant copyright laws. The information contained herein
- * is confidential and proprietary to MediaTek Inc. and/or its licensors.
- * Without the prior written permission of MediaTek inc. and/or its licensors,
- * any reproduction, modification, use or disclosure of MediaTek Software,
- * and information contained herein, in whole or in part, shall be strictly prohibited.
- *
- * MediaTek Inc. (C) 2010. All rights reserved.
- *
- * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
- * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
- * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
- * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
- * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
- * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
- * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
- * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
- * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
- * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
- * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
- * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
- * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
- * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
- * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
- * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
- *
- * The following software/firmware and/or related documentation ("MediaTek Software")
- * have been modified by MediaTek Inc. All revisions are subject to any receiver's
- * applicable license agreements with MediaTek Inc.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/spinlock.h>
-#include <linux/platform_device.h>
-
-#include <linux/mmc/host.h>
-#include <linux/mmc/mmc.h>
-#include <linux/mmc/sd.h>
-#include <linux/mmc/sdio.h>
-
-#include <asm/mach-ralink/ralink_regs.h>
-
-#include "board.h"
-#include "dbg.h"
-#include "mt6575_sd.h"
-
-#ifdef CONFIG_SOC_MT7621
-#define RALINK_SYSCTL_BASE 0xbe000000
-#else
-#define RALINK_SYSCTL_BASE 0xb0000000
-#endif
-
-#define DRV_NAME "mtk-sd"
-
-#if defined(CONFIG_SOC_MT7620)
-#define HOST_MAX_MCLK (48000000) /* +/- by chhung */
-#elif defined(CONFIG_SOC_MT7621)
-#define HOST_MAX_MCLK (50000000) /* +/- by chhung */
-#endif
-#define HOST_MIN_MCLK (260000)
-
-#define HOST_MAX_BLKSZ (2048)
-
-#define MSDC_OCR_AVAIL (MMC_VDD_28_29 | MMC_VDD_29_30 | MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33)
-
-#define GPIO_PULL_DOWN (0)
-#define GPIO_PULL_UP (1)
-
-#if 0 /* --- by chhung */
-#define MSDC_CLKSRC_REG (0xf100000C)
-#define PDN_REG (0xF1000010)
-#endif /* end of --- */
-
-#define DEFAULT_DEBOUNCE (8) /* 8 cycles */
-#define DEFAULT_DTOC (40) /* data timeout counter. 65536x40 sclk. */
-
-#define CMD_TIMEOUT (HZ / 10) /* 100ms */
-#define DAT_TIMEOUT (HZ / 2 * 5) /* 500ms x5 */
-
-#define MAX_DMA_CNT (64 * 1024 - 512) /* a single transaction for WIFI may be 50K*/
-
-#define MAX_GPD_NUM (1 + 1) /* one null gpd */
-#define MAX_BD_NUM (1024)
-
-#define MAX_HW_SGMTS (MAX_BD_NUM)
-#define MAX_SGMT_SZ (MAX_DMA_CNT)
-#define MAX_REQ_SZ (MAX_SGMT_SZ * 8)
-
-static int cd_active_low = 1;
-
-//=================================
-#define PERI_MSDC0_PDN (15)
-//#define PERI_MSDC1_PDN (16)
-//#define PERI_MSDC2_PDN (17)
-//#define PERI_MSDC3_PDN (18)
-
-#if 0 /* --- by chhung */
-/* gate means clock power down */
-static int g_clk_gate = 0;
-#define msdc_gate_clock(id) \
- do { \
- g_clk_gate &= ~(1 << ((id) + PERI_MSDC0_PDN)); \
- } while (0)
-/* not like power down register. 1 means clock on. */
-#define msdc_ungate_clock(id) \
- do { \
- g_clk_gate |= 1 << ((id) + PERI_MSDC0_PDN); \
- } while (0)
-
-// do we need sync object or not
-void msdc_clk_status(int *status)
-{
- *status = g_clk_gate;
-}
-#endif /* end of --- */
-
-/* +++ by chhung */
-struct msdc_hw msdc0_hw = {
- .clk_src = 0,
- .flags = MSDC_CD_PIN_EN | MSDC_REMOVABLE,
-// .flags = MSDC_WP_PIN_EN | MSDC_CD_PIN_EN | MSDC_REMOVABLE,
-};
-
-/* end of +++ */
-
-static int msdc_rsp[] = {
- 0, /* RESP_NONE */
- 1, /* RESP_R1 */
- 2, /* RESP_R2 */
- 3, /* RESP_R3 */
- 4, /* RESP_R4 */
- 1, /* RESP_R5 */
- 1, /* RESP_R6 */
- 1, /* RESP_R7 */
- 7, /* RESP_R1b */
-};
-
-#define msdc_dma_on() sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_PIO)
-
-static void msdc_reset_hw(struct msdc_host *host)
-{
- sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_RST);
- while (readl(host->base + MSDC_CFG) & MSDC_CFG_RST)
- cpu_relax();
-}
-
-#define msdc_clr_int() \
- do { \
- volatile u32 val = readl(host->base + MSDC_INT); \
- writel(val, host->base + MSDC_INT); \
- } while (0)
-
-static void msdc_clr_fifo(struct msdc_host *host)
-{
- sdr_set_bits(host->base + MSDC_FIFOCS, MSDC_FIFOCS_CLR);
- while (readl(host->base + MSDC_FIFOCS) & MSDC_FIFOCS_CLR)
- cpu_relax();
-}
-
-#define msdc_irq_save(val) \
- do { \
- val = readl(host->base + MSDC_INTEN); \
- sdr_clr_bits(host->base + MSDC_INTEN, val); \
- } while (0)
-
-#define msdc_irq_restore(val) \
- do { \
- sdr_set_bits(host->base + MSDC_INTEN, val); \
- } while (0)
-
-/* clock source for host: global */
-#if defined(CONFIG_SOC_MT7620)
-static u32 hclks[] = {48000000}; /* +/- by chhung */
-#elif defined(CONFIG_SOC_MT7621)
-static u32 hclks[] = {50000000}; /* +/- by chhung */
-#endif
-
-//============================================
-// the power for msdc host controller: global
-// always keep the VMC on.
-//============================================
-#define msdc_vcore_on(host) \
- do { \
- INIT_MSG("[+]VMC ref. count<%d>", ++host->pwr_ref); \
- (void)hwPowerOn(MT65XX_POWER_LDO_VMC, VOL_3300, "SD"); \
- } while (0)
-#define msdc_vcore_off(host) \
- do { \
- INIT_MSG("[-]VMC ref. count<%d>", --host->pwr_ref); \
- (void)hwPowerDown(MT65XX_POWER_LDO_VMC, "SD"); \
- } while (0)
-
-//====================================
-// the vdd output for card: global
-// always keep the VMCH on.
-//====================================
-#define msdc_vdd_on(host) \
- do { \
- (void)hwPowerOn(MT65XX_POWER_LDO_VMCH, VOL_3300, "SD"); \
- } while (0)
-#define msdc_vdd_off(host) \
- do { \
- (void)hwPowerDown(MT65XX_POWER_LDO_VMCH, "SD"); \
- } while (0)
-
-#define sdc_is_busy() (readl(host->base + SDC_STS) & SDC_STS_SDCBUSY)
-#define sdc_is_cmd_busy() (readl(host->base + SDC_STS) & SDC_STS_CMDBUSY)
-
-#define sdc_send_cmd(cmd, arg) \
- do { \
- writel((arg), host->base + SDC_ARG); \
- writel((cmd), host->base + SDC_CMD); \
- } while (0)
-
-/* +++ by chhung */
-#ifndef __ASSEMBLY__
-#define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff)
-#else
-#define PHYSADDR(a) ((a) & 0x1fffffff)
-#endif
-/* end of +++ */
-static unsigned int msdc_do_command(struct msdc_host *host,
- struct mmc_command *cmd,
- int tune,
- unsigned long timeout);
-
-static int msdc_tune_cmdrsp(struct msdc_host *host, struct mmc_command *cmd);
-
-#ifdef MT6575_SD_DEBUG
-static void msdc_dump_card_status(struct msdc_host *host, u32 status)
-{
-/* N_MSG is currently a no-op */
-#if 0
- static char *state[] = {
- "Idle", /* 0 */
- "Ready", /* 1 */
- "Ident", /* 2 */
- "Stby", /* 3 */
- "Tran", /* 4 */
- "Data", /* 5 */
- "Rcv", /* 6 */
- "Prg", /* 7 */
- "Dis", /* 8 */
- "Reserved", /* 9 */
- "Reserved", /* 10 */
- "Reserved", /* 11 */
- "Reserved", /* 12 */
- "Reserved", /* 13 */
- "Reserved", /* 14 */
- "I/O mode", /* 15 */
- };
-#endif
- if (status & R1_OUT_OF_RANGE)
- N_MSG(RSP, "[CARD_STATUS] Out of Range");
- if (status & R1_ADDRESS_ERROR)
- N_MSG(RSP, "[CARD_STATUS] Address Error");
- if (status & R1_BLOCK_LEN_ERROR)
- N_MSG(RSP, "[CARD_STATUS] Block Len Error");
- if (status & R1_ERASE_SEQ_ERROR)
- N_MSG(RSP, "[CARD_STATUS] Erase Seq Error");
- if (status & R1_ERASE_PARAM)
- N_MSG(RSP, "[CARD_STATUS] Erase Param");
- if (status & R1_WP_VIOLATION)
- N_MSG(RSP, "[CARD_STATUS] WP Violation");
- if (status & R1_CARD_IS_LOCKED)
- N_MSG(RSP, "[CARD_STATUS] Card is Locked");
- if (status & R1_LOCK_UNLOCK_FAILED)
- N_MSG(RSP, "[CARD_STATUS] Lock/Unlock Failed");
- if (status & R1_COM_CRC_ERROR)
- N_MSG(RSP, "[CARD_STATUS] Command CRC Error");
- if (status & R1_ILLEGAL_COMMAND)
- N_MSG(RSP, "[CARD_STATUS] Illegal Command");
- if (status & R1_CARD_ECC_FAILED)
- N_MSG(RSP, "[CARD_STATUS] Card ECC Failed");
- if (status & R1_CC_ERROR)
- N_MSG(RSP, "[CARD_STATUS] CC Error");
- if (status & R1_ERROR)
- N_MSG(RSP, "[CARD_STATUS] Error");
- if (status & R1_UNDERRUN)
- N_MSG(RSP, "[CARD_STATUS] Underrun");
- if (status & R1_OVERRUN)
- N_MSG(RSP, "[CARD_STATUS] Overrun");
- if (status & R1_CID_CSD_OVERWRITE)
- N_MSG(RSP, "[CARD_STATUS] CID/CSD Overwrite");
- if (status & R1_WP_ERASE_SKIP)
- N_MSG(RSP, "[CARD_STATUS] WP Eraser Skip");
- if (status & R1_CARD_ECC_DISABLED)
- N_MSG(RSP, "[CARD_STATUS] Card ECC Disabled");
- if (status & R1_ERASE_RESET)
- N_MSG(RSP, "[CARD_STATUS] Erase Reset");
- if (status & R1_READY_FOR_DATA)
- N_MSG(RSP, "[CARD_STATUS] Ready for Data");
- if (status & R1_SWITCH_ERROR)
- N_MSG(RSP, "[CARD_STATUS] Switch error");
- if (status & R1_APP_CMD)
- N_MSG(RSP, "[CARD_STATUS] App Command");
-
- N_MSG(RSP, "[CARD_STATUS] '%s' State", state[R1_CURRENT_STATE(status)]);
-}
-
-static void msdc_dump_ocr_reg(struct msdc_host *host, u32 resp)
-{
- if (resp & (1 << 7))
- N_MSG(RSP, "[OCR] Low Voltage Range");
- if (resp & (1 << 15))
- N_MSG(RSP, "[OCR] 2.7-2.8 volt");
- if (resp & (1 << 16))
- N_MSG(RSP, "[OCR] 2.8-2.9 volt");
- if (resp & (1 << 17))
- N_MSG(RSP, "[OCR] 2.9-3.0 volt");
- if (resp & (1 << 18))
- N_MSG(RSP, "[OCR] 3.0-3.1 volt");
- if (resp & (1 << 19))
- N_MSG(RSP, "[OCR] 3.1-3.2 volt");
- if (resp & (1 << 20))
- N_MSG(RSP, "[OCR] 3.2-3.3 volt");
- if (resp & (1 << 21))
- N_MSG(RSP, "[OCR] 3.3-3.4 volt");
- if (resp & (1 << 22))
- N_MSG(RSP, "[OCR] 3.4-3.5 volt");
- if (resp & (1 << 23))
- N_MSG(RSP, "[OCR] 3.5-3.6 volt");
- if (resp & (1 << 24))
- N_MSG(RSP, "[OCR] Switching to 1.8V Accepted (S18A)");
- if (resp & (1 << 30))
- N_MSG(RSP, "[OCR] Card Capacity Status (CCS)");
- if (resp & (1 << 31))
- N_MSG(RSP, "[OCR] Card Power Up Status (Idle)");
- else
- N_MSG(RSP, "[OCR] Card Power Up Status (Busy)");
-}
-
-static void msdc_dump_rca_resp(struct msdc_host *host, u32 resp)
-{
- u32 status = (((resp >> 15) & 0x1) << 23) |
- (((resp >> 14) & 0x1) << 22) |
- (((resp >> 13) & 0x1) << 19) |
- (resp & 0x1fff);
-
- N_MSG(RSP, "[RCA] 0x%.4x", resp >> 16);
- msdc_dump_card_status(host, status);
-}
-
-static void msdc_dump_io_resp(struct msdc_host *host, u32 resp)
-{
- u32 flags = (resp >> 8) & 0xFF;
-#if 0
- char *state[] = {"DIS", "CMD", "TRN", "RFU"};
-#endif
- if (flags & (1 << 7))
- N_MSG(RSP, "[IO] COM_CRC_ERR");
- if (flags & (1 << 6))
- N_MSG(RSP, "[IO] Illegal command");
- if (flags & (1 << 3))
- N_MSG(RSP, "[IO] Error");
- if (flags & (1 << 2))
- N_MSG(RSP, "[IO] RFU");
- if (flags & (1 << 1))
- N_MSG(RSP, "[IO] Function number error");
- if (flags & (1 << 0))
- N_MSG(RSP, "[IO] Out of range");
-
- N_MSG(RSP, "[IO] State: %s, Data:0x%x", state[(resp >> 12) & 0x3], resp & 0xFF);
-}
-#endif
-
-static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks)
-{
- u32 timeout, clk_ns;
-
- host->timeout_ns = ns;
- host->timeout_clks = clks;
-
- clk_ns = 1000000000UL / host->sclk;
- timeout = ns / clk_ns + clks;
- timeout = timeout >> 16; /* in 65536 sclk cycle unit */
- timeout = timeout > 1 ? timeout - 1 : 0;
- timeout = timeout > 255 ? 255 : timeout;
-
- sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, timeout);
-
- N_MSG(OPS, "Set read data timeout: %dns %dclks -> %d x 65536 cycles",
- ns, clks, timeout + 1);
-}
-
-static void msdc_tasklet_card(struct work_struct *work)
-{
- struct msdc_host *host = (struct msdc_host *)container_of(work,
- struct msdc_host, card_delaywork.work);
- u32 inserted;
- u32 status = 0;
- //u32 change = 0;
-
- spin_lock(&host->lock);
-
- status = readl(host->base + MSDC_PS);
- if (cd_active_low)
- inserted = (status & MSDC_PS_CDSTS) ? 0 : 1;
- else
- inserted = (status & MSDC_PS_CDSTS) ? 1 : 0;
-
-#if 0
- change = host->card_inserted ^ inserted;
- host->card_inserted = inserted;
-
- if (change && !host->suspend) {
- if (inserted)
- host->mmc->f_max = HOST_MAX_MCLK; // work around
- mmc_detect_change(host->mmc, msecs_to_jiffies(20));
- }
-#else /* Make sure: handle the last interrupt */
- host->card_inserted = inserted;
-
- if (!host->suspend) {
- host->mmc->f_max = HOST_MAX_MCLK;
- mmc_detect_change(host->mmc, msecs_to_jiffies(20));
- }
-
- IRQ_MSG("card found<%s>", inserted ? "inserted" : "removed");
-#endif
-
- spin_unlock(&host->lock);
-}
-
-#if 0 /* --- by chhung */
-/* For E2 only */
-static u8 clk_src_bit[4] = {
- 0, 3, 5, 7
-};
-
-static void msdc_select_clksrc(struct msdc_host *host, unsigned char clksrc)
-{
- u32 val;
-
- BUG_ON(clksrc > 3);
- INIT_MSG("set clock source to <%d>", clksrc);
-
- val = readl(host->base + MSDC_CLKSRC_REG);
- if (readl(host->base + MSDC_ECO_VER) >= 4) {
- val &= ~(0x3 << clk_src_bit[host->id]);
- val |= clksrc << clk_src_bit[host->id];
- } else {
- val &= ~0x3; val |= clksrc;
- }
- writel(val, host->base + MSDC_CLKSRC_REG);
-
- host->hclk = hclks[clksrc];
- host->hw->clk_src = clksrc;
-}
-#endif /* end of --- */
-
-static void msdc_set_mclk(struct msdc_host *host, int ddr, unsigned int hz)
-{
- //struct msdc_hw *hw = host->hw;
- u32 mode;
- u32 flags;
- u32 div;
- u32 sclk;
- u32 hclk = host->hclk;
- //u8 clksrc = hw->clk_src;
-
- if (!hz) { // set mmc system clock to 0 ?
- //ERR_MSG("set mclk to 0!!!");
- msdc_reset_hw(host);
- return;
- }
-
- msdc_irq_save(flags);
-
- if (ddr) {
- mode = 0x2; /* ddr mode and use divisor */
- if (hz >= (hclk >> 2)) {
- div = 1; /* mean div = 1/4 */
- sclk = hclk >> 2; /* sclk = clk / 4 */
- } else {
- div = (hclk + ((hz << 2) - 1)) / (hz << 2);
- sclk = (hclk >> 2) / div;
- }
- } else if (hz >= hclk) { /* bug fix */
- mode = 0x1; /* no divisor and divisor is ignored */
- div = 0;
- sclk = hclk;
- } else {
- mode = 0x0; /* use divisor */
- if (hz >= (hclk >> 1)) {
- div = 0; /* mean div = 1/2 */
- sclk = hclk >> 1; /* sclk = clk / 2 */
- } else {
- div = (hclk + ((hz << 2) - 1)) / (hz << 2);
- sclk = (hclk >> 2) / div;
- }
- }
-
- /* set clock mode and divisor */
- sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD, mode);
- sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKDIV, div);
-
- /* wait clock stable */
- while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
- cpu_relax();
-
- host->sclk = sclk;
- host->mclk = hz;
- msdc_set_timeout(host, host->timeout_ns, host->timeout_clks); // need?
-
- INIT_MSG("================");
- INIT_MSG("!!! Set<%dKHz> Source<%dKHz> -> sclk<%dKHz>", hz / 1000, hclk / 1000, sclk / 1000);
- INIT_MSG("================");
-
- msdc_irq_restore(flags);
-}
-
-/* Fix me. when need to abort */
-static void msdc_abort_data(struct msdc_host *host)
-{
- struct mmc_command *stop = host->mrq->stop;
-
- ERR_MSG("Need to Abort.");
-
- msdc_reset_hw(host);
- msdc_clr_fifo(host);
- msdc_clr_int();
-
- // need to check FIFO count 0 ?
-
- if (stop) { /* try to stop, but may not success */
- ERR_MSG("stop when abort CMD<%d>", stop->opcode);
- (void)msdc_do_command(host, stop, 0, CMD_TIMEOUT);
- }
-
- //if (host->mclk >= 25000000) {
- // msdc_set_mclk(host, 0, host->mclk >> 1);
- //}
-}
-
-#if 0 /* --- by chhung */
-static void msdc_pin_config(struct msdc_host *host, int mode)
-{
- struct msdc_hw *hw = host->hw;
- int pull = (mode == MSDC_PIN_PULL_UP) ? GPIO_PULL_UP : GPIO_PULL_DOWN;
-
- /* Config WP pin */
- if (hw->flags & MSDC_WP_PIN_EN) {
- if (hw->config_gpio_pin) /* NULL */
- hw->config_gpio_pin(MSDC_WP_PIN, pull);
- }
-
- switch (mode) {
- case MSDC_PIN_PULL_UP:
- //sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPU, 1); /* Check & FIXME */
- //sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPD, 0); /* Check & FIXME */
- sdr_set_field(host->base + MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPU, 1);
- sdr_set_field(host->base + MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPD, 0);
- sdr_set_field(host->base + MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPU, 1);
- sdr_set_field(host->base + MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPD, 0);
- break;
- case MSDC_PIN_PULL_DOWN:
- //sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPU, 0); /* Check & FIXME */
- //sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPD, 1); /* Check & FIXME */
- sdr_set_field(host->base + MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPU, 0);
- sdr_set_field(host->base + MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPD, 1);
- sdr_set_field(host->base + MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPU, 0);
- sdr_set_field(host->base + MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPD, 1);
- break;
- case MSDC_PIN_PULL_NONE:
- default:
- //sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPU, 0); /* Check & FIXME */
- //sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPD, 0); /* Check & FIXME */
- sdr_set_field(host->base + MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPU, 0);
- sdr_set_field(host->base + MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPD, 0);
- sdr_set_field(host->base + MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPU, 0);
- sdr_set_field(host->base + MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPD, 0);
- break;
- }
-
- N_MSG(CFG, "Pins mode(%d), down(%d), up(%d)",
- mode, MSDC_PIN_PULL_DOWN, MSDC_PIN_PULL_UP);
-}
-
-void msdc_pin_reset(struct msdc_host *host, int mode)
-{
- struct msdc_hw *hw = (struct msdc_hw *)host->hw;
- int pull = (mode == MSDC_PIN_PULL_UP) ? GPIO_PULL_UP : GPIO_PULL_DOWN;
-
- /* Config reset pin */
- if (hw->flags & MSDC_RST_PIN_EN) {
- if (hw->config_gpio_pin) /* NULL */
- hw->config_gpio_pin(MSDC_RST_PIN, pull);
-
- if (mode == MSDC_PIN_PULL_UP)
- sdr_clr_bits(host->base + EMMC_IOCON, EMMC_IOCON_BOOTRST);
- else
- sdr_set_bits(host->base + EMMC_IOCON, EMMC_IOCON_BOOTRST);
- }
-}
-
-static void msdc_core_power(struct msdc_host *host, int on)
-{
- N_MSG(CFG, "Turn %s %s power (copower: %d -> %d)",
- on ? "on" : "off", "core", host->core_power, on);
-
- if (on && host->core_power == 0) {
- msdc_vcore_on(host);
- host->core_power = 1;
- msleep(1);
- } else if (!on && host->core_power == 1) {
- msdc_vcore_off(host);
- host->core_power = 0;
- msleep(1);
- }
-}
-
-static void msdc_host_power(struct msdc_host *host, int on)
-{
- N_MSG(CFG, "Turn %s %s power ", on ? "on" : "off", "host");
-
- if (on) {
- //msdc_core_power(host, 1); // need do card detection.
- msdc_pin_reset(host, MSDC_PIN_PULL_UP);
- } else {
- msdc_pin_reset(host, MSDC_PIN_PULL_DOWN);
- //msdc_core_power(host, 0);
- }
-}
-
-static void msdc_card_power(struct msdc_host *host, int on)
-{
- N_MSG(CFG, "Turn %s %s power ", on ? "on" : "off", "card");
-
- if (on) {
- msdc_pin_config(host, MSDC_PIN_PULL_UP);
- //msdc_vdd_on(host); // need todo card detection.
- msleep(1);
- } else {
- //msdc_vdd_off(host);
- msdc_pin_config(host, MSDC_PIN_PULL_DOWN);
- msleep(1);
- }
-}
-
-static void msdc_set_power_mode(struct msdc_host *host, u8 mode)
-{
- N_MSG(CFG, "Set power mode(%d)", mode);
-
- if (host->power_mode == MMC_POWER_OFF && mode != MMC_POWER_OFF) {
- msdc_host_power(host, 1);
- msdc_card_power(host, 1);
- } else if (host->power_mode != MMC_POWER_OFF && mode == MMC_POWER_OFF) {
- msdc_card_power(host, 0);
- msdc_host_power(host, 0);
- }
- host->power_mode = mode;
-}
-#endif /* end of --- */
-
-#ifdef CONFIG_PM
-/*
- register as callback function of WIFI(combo_sdio_register_pm) .
- can called by msdc_drv_suspend/resume too.
-*/
-static void msdc_pm(pm_message_t state, void *data)
-{
- struct msdc_host *host = (struct msdc_host *)data;
- int evt = state.event;
-
- if (evt == PM_EVENT_USER_RESUME || evt == PM_EVENT_USER_SUSPEND) {
- INIT_MSG("USR_%s: suspend<%d> power<%d>",
- evt == PM_EVENT_USER_RESUME ? "EVENT_USER_RESUME" : "EVENT_USER_SUSPEND",
- host->suspend, host->power_mode);
- }
-
- if (evt == PM_EVENT_SUSPEND || evt == PM_EVENT_USER_SUSPEND) {
- if (host->suspend) /* already suspend */ /* default 0*/
- return;
-
- /* for memory card. already power off by mmc */
- if (evt == PM_EVENT_SUSPEND && host->power_mode == MMC_POWER_OFF)
- return;
-
- host->suspend = 1;
- host->pm_state = state; /* default PMSG_RESUME */
-
- } else if (evt == PM_EVENT_RESUME || evt == PM_EVENT_USER_RESUME) {
- if (!host->suspend) {
- //ERR_MSG("warning: already resume");
- return;
- }
-
- /* No PM resume when USR suspend */
- if (evt == PM_EVENT_RESUME && host->pm_state.event == PM_EVENT_USER_SUSPEND) {
- ERR_MSG("PM Resume when in USR Suspend"); /* won't happen. */
- return;
- }
-
- host->suspend = 0;
- host->pm_state = state;
-
- }
-}
-#endif
-
-static inline u32 msdc_cmd_find_resp(struct mmc_command *cmd)
-{
- u32 opcode = cmd->opcode;
- u32 resp;
-
- if (opcode == MMC_SET_RELATIVE_ADDR) {
- resp = (mmc_cmd_type(cmd) == MMC_CMD_BCR) ? RESP_R6 : RESP_R1;
- } else if (opcode == MMC_FAST_IO) {
- resp = RESP_R4;
- } else if (opcode == MMC_GO_IRQ_STATE) {
- resp = RESP_R5;
- } else if (opcode == MMC_SELECT_CARD) {
- resp = (cmd->arg != 0) ? RESP_R1B : RESP_NONE;
- } else if (opcode == SD_IO_RW_DIRECT || opcode == SD_IO_RW_EXTENDED) {
- resp = RESP_R1; /* SDIO workaround. */
- } else if (opcode == SD_SEND_IF_COND && (mmc_cmd_type(cmd) == MMC_CMD_BCR)) {
- resp = RESP_R1;
- } else {
- switch (mmc_resp_type(cmd)) {
- case MMC_RSP_R1:
- resp = RESP_R1;
- break;
- case MMC_RSP_R1B:
- resp = RESP_R1B;
- break;
- case MMC_RSP_R2:
- resp = RESP_R2;
- break;
- case MMC_RSP_R3:
- resp = RESP_R3;
- break;
- case MMC_RSP_NONE:
- default:
- resp = RESP_NONE;
- break;
- }
- }
-
- return resp;
-}
-
-/*--------------------------------------------------------------------------*/
-/* mmc_host_ops members */
-/*--------------------------------------------------------------------------*/
-static unsigned int msdc_command_start(struct msdc_host *host,
- struct mmc_command *cmd,
- unsigned long timeout)
-{
- u32 opcode = cmd->opcode;
- u32 rawcmd;
- u32 wints = MSDC_INT_CMDRDY | MSDC_INT_RSPCRCERR | MSDC_INT_CMDTMO |
- MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR | MSDC_INT_ACMDTMO |
- MSDC_INT_ACMD19_DONE;
-
- u32 resp;
- unsigned long tmo;
-
- /* Protocol layer does not provide response type, but our hardware needs
- * to know exact type, not just size!
- */
- resp = msdc_cmd_find_resp(cmd);
-
- cmd->error = 0;
- /* rawcmd :
- * vol_swt << 30 | auto_cmd << 28 | blklen << 16 | go_irq << 15 |
- * stop << 14 | rw << 13 | dtype << 11 | rsptyp << 7 | brk << 6 | opcode
- */
- rawcmd = opcode | msdc_rsp[resp] << 7 | host->blksz << 16;
-
- if (opcode == MMC_READ_MULTIPLE_BLOCK) {
- rawcmd |= (2 << 11);
- } else if (opcode == MMC_READ_SINGLE_BLOCK) {
- rawcmd |= (1 << 11);
- } else if (opcode == MMC_WRITE_MULTIPLE_BLOCK) {
- rawcmd |= ((2 << 11) | (1 << 13));
- } else if (opcode == MMC_WRITE_BLOCK) {
- rawcmd |= ((1 << 11) | (1 << 13));
- } else if (opcode == SD_IO_RW_EXTENDED) {
- if (cmd->data->flags & MMC_DATA_WRITE)
- rawcmd |= (1 << 13);
- if (cmd->data->blocks > 1)
- rawcmd |= (2 << 11);
- else
- rawcmd |= (1 << 11);
- } else if (opcode == SD_IO_RW_DIRECT && cmd->flags == (unsigned int)-1) {
- rawcmd |= (1 << 14);
- } else if ((opcode == SD_APP_SEND_SCR) ||
- (opcode == SD_APP_SEND_NUM_WR_BLKS) ||
- (opcode == SD_SWITCH && (mmc_cmd_type(cmd) == MMC_CMD_ADTC)) ||
- (opcode == SD_APP_SD_STATUS && (mmc_cmd_type(cmd) == MMC_CMD_ADTC)) ||
- (opcode == MMC_SEND_EXT_CSD && (mmc_cmd_type(cmd) == MMC_CMD_ADTC))) {
- rawcmd |= (1 << 11);
- } else if (opcode == MMC_STOP_TRANSMISSION) {
- rawcmd |= (1 << 14);
- rawcmd &= ~(0x0FFF << 16);
- }
-
- N_MSG(CMD, "CMD<%d><0x%.8x> Arg<0x%.8x>", opcode, rawcmd, cmd->arg);
-
- tmo = jiffies + timeout;
-
- if (opcode == MMC_SEND_STATUS) {
- for (;;) {
- if (!sdc_is_cmd_busy())
- break;
-
- if (time_after(jiffies, tmo)) {
- ERR_MSG("XXX cmd_busy timeout: before CMD<%d>", opcode);
- cmd->error = -ETIMEDOUT;
- msdc_reset_hw(host);
- goto end;
- }
- }
- } else {
- for (;;) {
- if (!sdc_is_busy())
- break;
- if (time_after(jiffies, tmo)) {
- ERR_MSG("XXX sdc_busy timeout: before CMD<%d>", opcode);
- cmd->error = -ETIMEDOUT;
- msdc_reset_hw(host);
- goto end;
- }
- }
- }
-
- //BUG_ON(in_interrupt());
- host->cmd = cmd;
- host->cmd_rsp = resp;
-
- init_completion(&host->cmd_done);
-
- sdr_set_bits(host->base + MSDC_INTEN, wints);
- sdc_send_cmd(rawcmd, cmd->arg);
-
-end:
- return cmd->error;
-}
-
-static unsigned int msdc_command_resp(struct msdc_host *host,
- struct mmc_command *cmd,
- int tune,
- unsigned long timeout)
- __must_hold(&host->lock)
-{
- u32 opcode = cmd->opcode;
- //u32 rawcmd;
- u32 wints = MSDC_INT_CMDRDY | MSDC_INT_RSPCRCERR | MSDC_INT_CMDTMO |
- MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR | MSDC_INT_ACMDTMO |
- MSDC_INT_ACMD19_DONE;
-
- BUG_ON(in_interrupt());
- //init_completion(&host->cmd_done);
- //sdr_set_bits(host->base + MSDC_INTEN, wints);
-
- spin_unlock(&host->lock);
- if (!wait_for_completion_timeout(&host->cmd_done, 10 * timeout)) {
- ERR_MSG("XXX CMD<%d> wait_for_completion timeout ARG<0x%.8x>", opcode, cmd->arg);
- cmd->error = -ETIMEDOUT;
- msdc_reset_hw(host);
- }
- spin_lock(&host->lock);
-
- sdr_clr_bits(host->base + MSDC_INTEN, wints);
- host->cmd = NULL;
-
-//end:
-#ifdef MT6575_SD_DEBUG
- switch (resp) {
- case RESP_NONE:
- N_MSG(RSP, "CMD_RSP(%d): %d RSP(%d)", opcode, cmd->error, resp);
- break;
- case RESP_R2:
- N_MSG(RSP, "CMD_RSP(%d): %d RSP(%d)= %.8x %.8x %.8x %.8x",
- opcode, cmd->error, resp, cmd->resp[0], cmd->resp[1],
- cmd->resp[2], cmd->resp[3]);
- break;
- default: /* Response types 1, 3, 4, 5, 6, 7(1b) */
- N_MSG(RSP, "CMD_RSP(%d): %d RSP(%d)= 0x%.8x",
- opcode, cmd->error, resp, cmd->resp[0]);
- if (cmd->error == 0) {
- switch (resp) {
- case RESP_R1:
- case RESP_R1B:
- msdc_dump_card_status(host, cmd->resp[0]);
- break;
- case RESP_R3:
- msdc_dump_ocr_reg(host, cmd->resp[0]);
- break;
- case RESP_R5:
- msdc_dump_io_resp(host, cmd->resp[0]);
- break;
- case RESP_R6:
- msdc_dump_rca_resp(host, cmd->resp[0]);
- break;
- }
- }
- break;
- }
-#endif
-
- /* do we need to save card's RCA when SD_SEND_RELATIVE_ADDR */
-
- if (!tune)
- return cmd->error;
-
- /* memory card CRC */
- if (host->hw->flags & MSDC_REMOVABLE && cmd->error == -EIO) {
- /* check if has data phase */
- if (readl(host->base + SDC_CMD) & 0x1800) {
- msdc_abort_data(host);
- } else {
- /* do basic: reset*/
- msdc_reset_hw(host);
- msdc_clr_fifo(host);
- msdc_clr_int();
- }
- cmd->error = msdc_tune_cmdrsp(host, cmd);
- }
-
- // check DAT0
- /* if (resp == RESP_R1B) {
- while ((readl(host->base + MSDC_PS) & 0x10000) != 0x10000);
- } */
- /* CMD12 Error Handle */
-
- return cmd->error;
-}
-
-static unsigned int msdc_do_command(struct msdc_host *host,
- struct mmc_command *cmd,
- int tune,
- unsigned long timeout)
-{
- if (msdc_command_start(host, cmd, timeout))
- goto end;
-
- if (msdc_command_resp(host, cmd, tune, timeout))
- goto end;
-
-end:
-
- N_MSG(CMD, " return<%d> resp<0x%.8x>", cmd->error, cmd->resp[0]);
- return cmd->error;
-}
-
-#if 0 /* --- by chhung */
-// DMA resume / start / stop
-static void msdc_dma_resume(struct msdc_host *host)
-{
- sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_RESUME, 1);
-
- N_MSG(DMA, "DMA resume");
-}
-#endif /* end of --- */
-
-static void msdc_dma_start(struct msdc_host *host)
-{
- u32 wints = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO | MSDC_INTEN_DATCRCERR;
-
- sdr_set_bits(host->base + MSDC_INTEN, wints);
- //dsb(); /* --- by chhung */
- sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_START, 1);
-
- N_MSG(DMA, "DMA start");
-}
-
-static void msdc_dma_stop(struct msdc_host *host)
-{
- //u32 retries=500;
- u32 wints = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO | MSDC_INTEN_DATCRCERR;
-
- N_MSG(DMA, "DMA status: 0x%.8x", readl(host->base + MSDC_DMA_CFG));
- //while (readl(host->base + MSDC_DMA_CFG) & MSDC_DMA_CFG_STS);
-
- sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP, 1);
- while (readl(host->base + MSDC_DMA_CFG) & MSDC_DMA_CFG_STS)
- ;
-
- //dsb(); /* --- by chhung */
- sdr_clr_bits(host->base + MSDC_INTEN, wints); /* Not just xfer_comp */
-
- N_MSG(DMA, "DMA stop");
-}
-
-/* calc checksum */
-static u8 msdc_dma_calcs(u8 *buf, u32 len)
-{
- u32 i, sum = 0;
-
- for (i = 0; i < len; i++)
- sum += buf[i];
- return 0xFF - (u8)sum;
-}
-
-static void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma,
- struct scatterlist *sg_cmd, unsigned int sglen)
-{
- struct scatterlist *sg;
- struct gpd *gpd;
- struct bd *bd;
- u32 j;
-
- BUG_ON(sglen > MAX_BD_NUM); /* not support currently */
-
- N_MSG(DMA, "DMA sglen<%d> xfersz<%d>", sglen, host->xfer_size);
-
- gpd = dma->gpd;
- bd = dma->bd;
-
- /* modify gpd*/
- //gpd->intr = 0;
- gpd->hwo = 1; /* hw will clear it */
- gpd->bdp = 1;
- gpd->chksum = 0; /* need to clear first. */
- gpd->chksum = msdc_dma_calcs((u8 *)gpd, 16);
-
- /* modify bd*/
- for_each_sg(sg_cmd, sg, sglen, j) {
- bd[j].blkpad = 0;
- bd[j].dwpad = 0;
- bd[j].ptr = (void *)sg_dma_address(sg);
- bd[j].buflen = sg_dma_len(sg);
-
- if (j == sglen - 1)
- bd[j].eol = 1; /* the last bd */
- else
- bd[j].eol = 0;
-
- bd[j].chksum = 0; /* checksume need to clear first */
- bd[j].chksum = msdc_dma_calcs((u8 *)(&bd[j]), 16);
- }
-
- sdr_set_field(host->base + MSDC_DMA_CFG, MSDC_DMA_CFG_DECSEN, 1);
- sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_BRUSTSZ,
- MSDC_BRUST_64B);
- sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_MODE, 1);
-
- writel(PHYSADDR((u32)dma->gpd_addr), host->base + MSDC_DMA_SA);
-
- N_MSG(DMA, "DMA_CTRL = 0x%x", readl(host->base + MSDC_DMA_CTRL));
- N_MSG(DMA, "DMA_CFG = 0x%x", readl(host->base + MSDC_DMA_CFG));
- N_MSG(DMA, "DMA_SA = 0x%x", readl(host->base + MSDC_DMA_SA));
-}
-
-static int msdc_do_request(struct mmc_host *mmc, struct mmc_request *mrq)
- __must_hold(&host->lock)
-{
- struct msdc_host *host = mmc_priv(mmc);
- struct mmc_command *cmd;
- struct mmc_data *data;
- //u32 intsts = 0;
- int read = 1, send_type = 0;
-
-#define SND_DAT 0
-#define SND_CMD 1
-
- BUG_ON(mmc == NULL);
- BUG_ON(mrq == NULL);
-
- host->error = 0;
-
- cmd = mrq->cmd;
- data = mrq->cmd->data;
-
-#if 0 /* --- by chhung */
- //if(host->id ==1){
- N_MSG(OPS, "enable clock!");
- msdc_ungate_clock(host->id);
- //}
-#endif /* end of --- */
-
- if (!data) {
- send_type = SND_CMD;
- if (msdc_do_command(host, cmd, 1, CMD_TIMEOUT) != 0)
- goto done;
- } else {
- BUG_ON(data->blksz > HOST_MAX_BLKSZ);
- send_type = SND_DAT;
-
- data->error = 0;
- read = data->flags & MMC_DATA_READ ? 1 : 0;
- host->data = data;
- host->xfer_size = data->blocks * data->blksz;
- host->blksz = data->blksz;
-
- if (read) {
- if ((host->timeout_ns != data->timeout_ns) ||
- (host->timeout_clks != data->timeout_clks)) {
- msdc_set_timeout(host, data->timeout_ns, data->timeout_clks);
- }
- }
-
- writel(data->blocks, host->base + SDC_BLK_NUM);
- //msdc_clr_fifo(host); /* no need */
-
- msdc_dma_on(); /* enable DMA mode first!! */
- init_completion(&host->xfer_done);
-
- /* start the command first*/
- if (msdc_command_start(host, cmd, CMD_TIMEOUT) != 0)
- goto done;
-
- data->sg_count = dma_map_sg(mmc_dev(mmc), data->sg,
- data->sg_len,
- mmc_get_dma_dir(data));
- msdc_dma_setup(host, &host->dma, data->sg,
- data->sg_count);
-
- /* then wait command done */
- if (msdc_command_resp(host, cmd, 1, CMD_TIMEOUT) != 0)
- goto done;
-
- /* for read, the data coming too fast, then CRC error
- start DMA no business with CRC. */
- //init_completion(&host->xfer_done);
- msdc_dma_start(host);
-
- spin_unlock(&host->lock);
- if (!wait_for_completion_timeout(&host->xfer_done, DAT_TIMEOUT)) {
- ERR_MSG("XXX CMD<%d> wait xfer_done<%d> timeout!!", cmd->opcode, data->blocks * data->blksz);
- ERR_MSG(" DMA_SA = 0x%x",
- readl(host->base + MSDC_DMA_SA));
- ERR_MSG(" DMA_CA = 0x%x",
- readl(host->base + MSDC_DMA_CA));
- ERR_MSG(" DMA_CTRL = 0x%x",
- readl(host->base + MSDC_DMA_CTRL));
- ERR_MSG(" DMA_CFG = 0x%x",
- readl(host->base + MSDC_DMA_CFG));
- data->error = -ETIMEDOUT;
-
- msdc_reset_hw(host);
- msdc_clr_fifo(host);
- msdc_clr_int();
- }
- spin_lock(&host->lock);
- msdc_dma_stop(host);
-
- /* Last: stop transfer */
- if (data->stop) {
- if (msdc_do_command(host, data->stop, 0, CMD_TIMEOUT) != 0)
- goto done;
- }
- }
-
-done:
- if (data != NULL) {
- host->data = NULL;
- dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
- mmc_get_dma_dir(data));
- host->blksz = 0;
-
-#if 0 // don't stop twice!
- if (host->hw->flags & MSDC_REMOVABLE && data->error) {
- msdc_abort_data(host);
- /* reset in IRQ, stop command has issued. -> No need */
- }
-#endif
-
- N_MSG(OPS, "CMD<%d> data<%s %s> blksz<%d> block<%d> error<%d>", cmd->opcode, (dma ? "dma" : "pio"),
- (read ? "read " : "write"), data->blksz, data->blocks, data->error);
- }
-
-#if 0 /* --- by chhung */
-#if 1
- //if(host->id==1) {
- if (send_type == SND_CMD) {
- if (cmd->opcode == MMC_SEND_STATUS) {
- if ((cmd->resp[0] & CARD_READY_FOR_DATA) || (CARD_CURRENT_STATE(cmd->resp[0]) != 7)) {
- N_MSG(OPS, "disable clock, CMD13 IDLE");
- msdc_gate_clock(host->id);
- }
- } else {
- N_MSG(OPS, "disable clock, CMD<%d>", cmd->opcode);
- msdc_gate_clock(host->id);
- }
- } else {
- if (read) {
- N_MSG(OPS, "disable clock!!! Read CMD<%d>", cmd->opcode);
- msdc_gate_clock(host->id);
- }
- }
- //}
-#else
- msdc_gate_clock(host->id);
-#endif
-#endif /* end of --- */
-
- if (mrq->cmd->error)
- host->error = 0x001;
- if (mrq->data && mrq->data->error)
- host->error |= 0x010;
- if (mrq->stop && mrq->stop->error)
- host->error |= 0x100;
-
- //if (host->error) ERR_MSG("host->error<%d>", host->error);
-
- return host->error;
-}
-
-static int msdc_app_cmd(struct mmc_host *mmc, struct msdc_host *host)
-{
- struct mmc_command cmd;
- struct mmc_request mrq;
- u32 err;
-
- memset(&cmd, 0, sizeof(struct mmc_command));
- cmd.opcode = MMC_APP_CMD;
-#if 0 /* bug: we meet mmc->card is null when ACMD6 */
- cmd.arg = mmc->card->rca << 16;
-#else
- cmd.arg = host->app_cmd_arg;
-#endif
- cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
-
- memset(&mrq, 0, sizeof(struct mmc_request));
- mrq.cmd = &cmd; cmd.mrq = &mrq;
- cmd.data = NULL;
-
- err = msdc_do_command(host, &cmd, 0, CMD_TIMEOUT);
- return err;
-}
-
-static int msdc_tune_cmdrsp(struct msdc_host *host, struct mmc_command *cmd)
-{
- int result = -1;
- u32 rsmpl, cur_rsmpl, orig_rsmpl;
- u32 rrdly, cur_rrdly = 0xffffffff, orig_rrdly;
- u32 skip = 1;
-
- /* ==== don't support 3.0 now ====
- 1: R_SMPL[1]
- 2: PAD_CMD_RESP_RXDLY[26:22]
- ==========================*/
-
- // save the previous tune result
- sdr_get_field(host->base + MSDC_IOCON, MSDC_IOCON_RSPL, &orig_rsmpl);
- sdr_get_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRRDLY,
- &orig_rrdly);
-
- rrdly = 0;
- do {
- for (rsmpl = 0; rsmpl < 2; rsmpl++) {
- /* Lv1: R_SMPL[1] */
- cur_rsmpl = (orig_rsmpl + rsmpl) % 2;
- if (skip == 1) {
- skip = 0;
- continue;
- }
- sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_RSPL,
- cur_rsmpl);
-
- if (host->app_cmd) {
- result = msdc_app_cmd(host->mmc, host);
- if (result) {
- ERR_MSG("TUNE_CMD app_cmd<%d> failed: RESP_RXDLY<%d>,R_SMPL<%d>",
- host->mrq->cmd->opcode, cur_rrdly, cur_rsmpl);
- continue;
- }
- }
- result = msdc_do_command(host, cmd, 0, CMD_TIMEOUT); // not tune.
- ERR_MSG("TUNE_CMD<%d> %s PAD_CMD_RESP_RXDLY[26:22]<%d> R_SMPL[1]<%d>", cmd->opcode,
- (result == 0) ? "PASS" : "FAIL", cur_rrdly, cur_rsmpl);
-
- if (result == 0)
- return 0;
- if (result != -EIO) {
- ERR_MSG("TUNE_CMD<%d> Error<%d> not -EIO", cmd->opcode, result);
- return result;
- }
-
- /* should be EIO */
- /* check if has data phase */
- if (readl(host->base + SDC_CMD) & 0x1800)
- msdc_abort_data(host);
- }
-
- /* Lv2: PAD_CMD_RESP_RXDLY[26:22] */
- cur_rrdly = (orig_rrdly + rrdly + 1) % 32;
- sdr_set_field(host->base + MSDC_PAD_TUNE,
- MSDC_PAD_TUNE_CMDRRDLY, cur_rrdly);
- } while (++rrdly < 32);
-
- return result;
-}
-
-/* Support SD2.0 Only */
-static int msdc_tune_bread(struct mmc_host *mmc, struct mmc_request *mrq)
-{
- struct msdc_host *host = mmc_priv(mmc);
- u32 ddr = 0;
- u32 dcrc = 0;
- u32 rxdly, cur_rxdly0, cur_rxdly1;
- u32 dsmpl, cur_dsmpl, orig_dsmpl;
- u32 cur_dat0, cur_dat1, cur_dat2, cur_dat3;
- u32 cur_dat4, cur_dat5, cur_dat6, cur_dat7;
- u32 orig_dat0, orig_dat1, orig_dat2, orig_dat3;
- u32 orig_dat4, orig_dat5, orig_dat6, orig_dat7;
- int result = -1;
- u32 skip = 1;
-
- sdr_get_field(host->base + MSDC_IOCON, MSDC_IOCON_DSPL, &orig_dsmpl);
-
- /* Tune Method 2. */
- sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 1);
-
- rxdly = 0;
- do {
- for (dsmpl = 0; dsmpl < 2; dsmpl++) {
- cur_dsmpl = (orig_dsmpl + dsmpl) % 2;
- if (skip == 1) {
- skip = 0;
- continue;
- }
- sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DSPL,
- cur_dsmpl);
-
- if (host->app_cmd) {
- result = msdc_app_cmd(host->mmc, host);
- if (result) {
- ERR_MSG("TUNE_BREAD app_cmd<%d> failed", host->mrq->cmd->opcode);
- continue;
- }
- }
- result = msdc_do_request(mmc, mrq);
-
- sdr_get_field(host->base + SDC_DCRC_STS,
- SDC_DCRC_STS_POS | SDC_DCRC_STS_NEG,
- &dcrc); /* RO */
- if (!ddr)
- dcrc &= ~SDC_DCRC_STS_NEG;
- ERR_MSG("TUNE_BREAD<%s> dcrc<0x%x> DATRDDLY0/1<0x%x><0x%x> dsmpl<0x%x>",
- (result == 0 && dcrc == 0) ? "PASS" : "FAIL", dcrc,
- readl(host->base + MSDC_DAT_RDDLY0),
- readl(host->base + MSDC_DAT_RDDLY1), cur_dsmpl);
-
- /* Fix me: result is 0, but dcrc is still exist */
- if (result == 0 && dcrc == 0) {
- goto done;
- } else {
- /* there is a case: command timeout, and data phase not processed */
- if (mrq->data->error != 0 &&
- mrq->data->error != -EIO) {
- ERR_MSG("TUNE_READ: result<0x%x> cmd_error<%d> data_error<%d>",
- result, mrq->cmd->error, mrq->data->error);
- goto done;
- }
- }
- }
-
- cur_rxdly0 = readl(host->base + MSDC_DAT_RDDLY0);
- cur_rxdly1 = readl(host->base + MSDC_DAT_RDDLY1);
-
- /* E1 ECO. YD: Reverse */
- if (readl(host->base + MSDC_ECO_VER) >= 4) {
- orig_dat0 = (cur_rxdly0 >> 24) & 0x1F;
- orig_dat1 = (cur_rxdly0 >> 16) & 0x1F;
- orig_dat2 = (cur_rxdly0 >> 8) & 0x1F;
- orig_dat3 = (cur_rxdly0 >> 0) & 0x1F;
- orig_dat4 = (cur_rxdly1 >> 24) & 0x1F;
- orig_dat5 = (cur_rxdly1 >> 16) & 0x1F;
- orig_dat6 = (cur_rxdly1 >> 8) & 0x1F;
- orig_dat7 = (cur_rxdly1 >> 0) & 0x1F;
- } else {
- orig_dat0 = (cur_rxdly0 >> 0) & 0x1F;
- orig_dat1 = (cur_rxdly0 >> 8) & 0x1F;
- orig_dat2 = (cur_rxdly0 >> 16) & 0x1F;
- orig_dat3 = (cur_rxdly0 >> 24) & 0x1F;
- orig_dat4 = (cur_rxdly1 >> 0) & 0x1F;
- orig_dat5 = (cur_rxdly1 >> 8) & 0x1F;
- orig_dat6 = (cur_rxdly1 >> 16) & 0x1F;
- orig_dat7 = (cur_rxdly1 >> 24) & 0x1F;
- }
-
- if (ddr) {
- cur_dat0 = (dcrc & (1 << 0) || dcrc & (1 << 8)) ? ((orig_dat0 + 1) % 32) : orig_dat0;
- cur_dat1 = (dcrc & (1 << 1) || dcrc & (1 << 9)) ? ((orig_dat1 + 1) % 32) : orig_dat1;
- cur_dat2 = (dcrc & (1 << 2) || dcrc & (1 << 10)) ? ((orig_dat2 + 1) % 32) : orig_dat2;
- cur_dat3 = (dcrc & (1 << 3) || dcrc & (1 << 11)) ? ((orig_dat3 + 1) % 32) : orig_dat3;
- } else {
- cur_dat0 = (dcrc & (1 << 0)) ? ((orig_dat0 + 1) % 32) : orig_dat0;
- cur_dat1 = (dcrc & (1 << 1)) ? ((orig_dat1 + 1) % 32) : orig_dat1;
- cur_dat2 = (dcrc & (1 << 2)) ? ((orig_dat2 + 1) % 32) : orig_dat2;
- cur_dat3 = (dcrc & (1 << 3)) ? ((orig_dat3 + 1) % 32) : orig_dat3;
- }
- cur_dat4 = (dcrc & (1 << 4)) ? ((orig_dat4 + 1) % 32) : orig_dat4;
- cur_dat5 = (dcrc & (1 << 5)) ? ((orig_dat5 + 1) % 32) : orig_dat5;
- cur_dat6 = (dcrc & (1 << 6)) ? ((orig_dat6 + 1) % 32) : orig_dat6;
- cur_dat7 = (dcrc & (1 << 7)) ? ((orig_dat7 + 1) % 32) : orig_dat7;
-
- cur_rxdly0 = (cur_dat0 << 24) | (cur_dat1 << 16) | (cur_dat2 << 8) | (cur_dat3 << 0);
- cur_rxdly1 = (cur_dat4 << 24) | (cur_dat5 << 16) | (cur_dat6 << 8) | (cur_dat7 << 0);
-
- writel(cur_rxdly0, host->base + MSDC_DAT_RDDLY0);
- writel(cur_rxdly1, host->base + MSDC_DAT_RDDLY1);
-
- } while (++rxdly < 32);
-
-done:
- return result;
-}
-
-static int msdc_tune_bwrite(struct mmc_host *mmc, struct mmc_request *mrq)
-{
- struct msdc_host *host = mmc_priv(mmc);
-
- u32 wrrdly, cur_wrrdly = 0xffffffff, orig_wrrdly;
- u32 dsmpl, cur_dsmpl, orig_dsmpl;
- u32 rxdly, cur_rxdly0;
- u32 orig_dat0, orig_dat1, orig_dat2, orig_dat3;
- u32 cur_dat0, cur_dat1, cur_dat2, cur_dat3;
- int result = -1;
- u32 skip = 1;
-
- // MSDC_IOCON_DDR50CKD need to check. [Fix me]
-
- sdr_get_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_DATWRDLY,
- &orig_wrrdly);
- sdr_get_field(host->base + MSDC_IOCON, MSDC_IOCON_DSPL, &orig_dsmpl);
-
- /* Tune Method 2. just DAT0 */
- sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 1);
- cur_rxdly0 = readl(host->base + MSDC_DAT_RDDLY0);
-
- /* E1 ECO. YD: Reverse */
- if (readl(host->base + MSDC_ECO_VER) >= 4) {
- orig_dat0 = (cur_rxdly0 >> 24) & 0x1F;
- orig_dat1 = (cur_rxdly0 >> 16) & 0x1F;
- orig_dat2 = (cur_rxdly0 >> 8) & 0x1F;
- orig_dat3 = (cur_rxdly0 >> 0) & 0x1F;
- } else {
- orig_dat0 = (cur_rxdly0 >> 0) & 0x1F;
- orig_dat1 = (cur_rxdly0 >> 8) & 0x1F;
- orig_dat2 = (cur_rxdly0 >> 16) & 0x1F;
- orig_dat3 = (cur_rxdly0 >> 24) & 0x1F;
- }
-
- rxdly = 0;
- do {
- wrrdly = 0;
- do {
- for (dsmpl = 0; dsmpl < 2; dsmpl++) {
- cur_dsmpl = (orig_dsmpl + dsmpl) % 2;
- if (skip == 1) {
- skip = 0;
- continue;
- }
- sdr_set_field(host->base + MSDC_IOCON,
- MSDC_IOCON_DSPL, cur_dsmpl);
-
- if (host->app_cmd) {
- result = msdc_app_cmd(host->mmc, host);
- if (result) {
- ERR_MSG("TUNE_BWRITE app_cmd<%d> failed", host->mrq->cmd->opcode);
- continue;
- }
- }
- result = msdc_do_request(mmc, mrq);
-
- ERR_MSG("TUNE_BWRITE<%s> DSPL<%d> DATWRDLY<%d> MSDC_DAT_RDDLY0<0x%x>",
- result == 0 ? "PASS" : "FAIL",
- cur_dsmpl, cur_wrrdly, cur_rxdly0);
-
- if (result == 0) {
- goto done;
- } else {
- /* there is a case: command timeout, and data phase not processed */
- if (mrq->data->error != -EIO) {
- ERR_MSG("TUNE_READ: result<0x%x> cmd_error<%d> data_error<%d>",
- result, mrq->cmd->error, mrq->data->error);
- goto done;
- }
- }
- }
- cur_wrrdly = (orig_wrrdly + wrrdly + 1) % 32;
- sdr_set_field(host->base + MSDC_PAD_TUNE,
- MSDC_PAD_TUNE_DATWRDLY, cur_wrrdly);
- } while (++wrrdly < 32);
-
- cur_dat0 = (orig_dat0 + rxdly) % 32; /* only adjust bit-1 for crc */
- cur_dat1 = orig_dat1;
- cur_dat2 = orig_dat2;
- cur_dat3 = orig_dat3;
-
- cur_rxdly0 = (cur_dat0 << 24) | (cur_dat1 << 16) | (cur_dat2 << 8) | (cur_dat3 << 0);
- writel(cur_rxdly0, host->base + MSDC_DAT_RDDLY0);
- } while (++rxdly < 32);
-
-done:
- return result;
-}
-
-static int msdc_get_card_status(struct mmc_host *mmc, struct msdc_host *host, u32 *status)
-{
- struct mmc_command cmd;
- struct mmc_request mrq;
- u32 err;
-
- memset(&cmd, 0, sizeof(struct mmc_command));
- cmd.opcode = MMC_SEND_STATUS;
- if (mmc->card) {
- cmd.arg = mmc->card->rca << 16;
- } else {
- ERR_MSG("cmd13 mmc card is null");
- cmd.arg = host->app_cmd_arg;
- }
- cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
-
- memset(&mrq, 0, sizeof(struct mmc_request));
- mrq.cmd = &cmd; cmd.mrq = &mrq;
- cmd.data = NULL;
-
- err = msdc_do_command(host, &cmd, 1, CMD_TIMEOUT);
-
- if (status)
- *status = cmd.resp[0];
-
- return err;
-}
-
-static int msdc_check_busy(struct mmc_host *mmc, struct msdc_host *host)
-{
- u32 err = 0;
- u32 status = 0;
-
- do {
- err = msdc_get_card_status(mmc, host, &status);
- if (err)
- return err;
- /* need cmd12? */
- ERR_MSG("cmd<13> resp<0x%x>", status);
- } while (R1_CURRENT_STATE(status) == 7);
-
- return err;
-}
-
-/* failed when msdc_do_request */
-static int msdc_tune_request(struct mmc_host *mmc, struct mmc_request *mrq)
-{
- struct msdc_host *host = mmc_priv(mmc);
- struct mmc_data *data;
- //u32 base = host->base;
- int ret = 0, read;
-
- data = mrq->cmd->data;
-
- read = data->flags & MMC_DATA_READ ? 1 : 0;
-
- if (read) {
- if (data->error == -EIO)
- ret = msdc_tune_bread(mmc, mrq);
- } else {
- ret = msdc_check_busy(mmc, host);
- if (ret) {
- ERR_MSG("XXX cmd13 wait program done failed");
- return ret;
- }
- /* CRC and TO */
- /* Fix me: don't care card status? */
- ret = msdc_tune_bwrite(mmc, mrq);
- }
-
- return ret;
-}
-
-/* ops.request */
-static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq)
-{
- struct msdc_host *host = mmc_priv(mmc);
-
- //=== for sdio profile ===
-#if 0 /* --- by chhung */
- u32 old_H32, old_L32, new_H32, new_L32;
- u32 ticks = 0, opcode = 0, sizes = 0, bRx = 0;
-#endif /* end of --- */
-
- WARN_ON(host->mrq);
-
- /* start to process */
- spin_lock(&host->lock);
-#if 0 /* --- by chhung */
- if (sdio_pro_enable) { //=== for sdio profile ===
- if (mrq->cmd->opcode == 52 || mrq->cmd->opcode == 53)
- GPT_GetCounter64(&old_L32, &old_H32);
- }
-#endif /* end of --- */
-
- host->mrq = mrq;
-
- if (msdc_do_request(mmc, mrq)) {
- if (host->hw->flags & MSDC_REMOVABLE && ralink_soc == MT762X_SOC_MT7621AT && mrq->data && mrq->data->error)
- msdc_tune_request(mmc, mrq);
- }
-
- /* ==== when request done, check if app_cmd ==== */
- if (mrq->cmd->opcode == MMC_APP_CMD) {
- host->app_cmd = 1;
- host->app_cmd_arg = mrq->cmd->arg; /* save the RCA */
- } else {
- host->app_cmd = 0;
- //host->app_cmd_arg = 0;
- }
-
- host->mrq = NULL;
-
-#if 0 /* --- by chhung */
- //=== for sdio profile ===
- if (sdio_pro_enable) {
- if (mrq->cmd->opcode == 52 || mrq->cmd->opcode == 53) {
- GPT_GetCounter64(&new_L32, &new_H32);
- ticks = msdc_time_calc(old_L32, old_H32, new_L32, new_H32);
-
- opcode = mrq->cmd->opcode;
- if (mrq->cmd->data) {
- sizes = mrq->cmd->data->blocks * mrq->cmd->data->blksz;
- bRx = mrq->cmd->data->flags & MMC_DATA_READ ? 1 : 0;
- } else {
- bRx = mrq->cmd->arg & 0x80000000 ? 1 : 0;
- }
-
- if (!mrq->cmd->error)
- msdc_performance(opcode, sizes, bRx, ticks);
- }
- }
-#endif /* end of --- */
- spin_unlock(&host->lock);
-
- mmc_request_done(mmc, mrq);
-
- return;
-}
-
-/* called by ops.set_ios */
-static void msdc_set_buswidth(struct msdc_host *host, u32 width)
-{
- u32 val = readl(host->base + SDC_CFG);
-
- val &= ~SDC_CFG_BUSWIDTH;
-
- switch (width) {
- default:
- case MMC_BUS_WIDTH_1:
- width = 1;
- val |= (MSDC_BUS_1BITS << 16);
- break;
- case MMC_BUS_WIDTH_4:
- val |= (MSDC_BUS_4BITS << 16);
- break;
- case MMC_BUS_WIDTH_8:
- val |= (MSDC_BUS_8BITS << 16);
- break;
- }
-
- writel(val, host->base + SDC_CFG);
-
- N_MSG(CFG, "Bus Width = %d", width);
-}
-
-/* ops.set_ios */
-static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
-{
- struct msdc_host *host = mmc_priv(mmc);
- u32 ddr = 0;
-
-#ifdef MT6575_SD_DEBUG
- static char *vdd[] = {
- "1.50v", "1.55v", "1.60v", "1.65v", "1.70v", "1.80v", "1.90v",
- "2.00v", "2.10v", "2.20v", "2.30v", "2.40v", "2.50v", "2.60v",
- "2.70v", "2.80v", "2.90v", "3.00v", "3.10v", "3.20v", "3.30v",
- "3.40v", "3.50v", "3.60v"
- };
- static char *power_mode[] = {
- "OFF", "UP", "ON"
- };
- static char *bus_mode[] = {
- "UNKNOWN", "OPENDRAIN", "PUSHPULL"
- };
- static char *timing[] = {
- "LEGACY", "MMC_HS", "SD_HS"
- };
-
- printk("SET_IOS: CLK(%dkHz), BUS(%s), BW(%u), PWR(%s), VDD(%s), TIMING(%s)",
- ios->clock / 1000, bus_mode[ios->bus_mode],
- (ios->bus_width == MMC_BUS_WIDTH_4) ? 4 : 1,
- power_mode[ios->power_mode], vdd[ios->vdd], timing[ios->timing]);
-#endif
-
- msdc_set_buswidth(host, ios->bus_width);
-
- /* Power control ??? */
- switch (ios->power_mode) {
- case MMC_POWER_OFF:
- case MMC_POWER_UP:
- // msdc_set_power_mode(host, ios->power_mode); /* --- by chhung */
- break;
- case MMC_POWER_ON:
- host->power_mode = MMC_POWER_ON;
- break;
- default:
- break;
- }
-
- /* Clock control */
- if (host->mclk != ios->clock) {
- if (ios->clock > 25000000) {
- //if (!(host->hw->flags & MSDC_REMOVABLE)) {
- INIT_MSG("SD data latch edge<%d>", MSDC_SMPL_FALLING);
- sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_RSPL,
- MSDC_SMPL_FALLING);
- sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DSPL,
- MSDC_SMPL_FALLING);
- //} /* for tuning debug */
- } else { /* default value */
- writel(0x00000000, host->base + MSDC_IOCON);
- // writel(0x00000000, host->base + MSDC_DAT_RDDLY0);
-
- // for MT7620 E2 and afterward
- writel(0x10101010, host->base + MSDC_DAT_RDDLY0);
-
- writel(0x00000000, host->base + MSDC_DAT_RDDLY1);
- // writel(0x00000000, host->base + MSDC_PAD_TUNE);
-
- // for MT7620 E2 and afterward
- writel(0x84101010, host->base + MSDC_PAD_TUNE);
- }
- msdc_set_mclk(host, ddr, ios->clock);
- }
-}
-
-/* ops.get_ro */
-static int msdc_ops_get_ro(struct mmc_host *mmc)
-{
- struct msdc_host *host = mmc_priv(mmc);
- unsigned long flags;
- int ro = 0;
-
- if (host->hw->flags & MSDC_WP_PIN_EN) { /* set for card */
- spin_lock_irqsave(&host->lock, flags);
- ro = (readl(host->base + MSDC_PS) >> 31);
- spin_unlock_irqrestore(&host->lock, flags);
- }
- return ro;
-}
-
-/* ops.get_cd */
-static int msdc_ops_get_cd(struct mmc_host *mmc)
-{
- struct msdc_host *host = mmc_priv(mmc);
- unsigned long flags;
- int present = 1;
-
- /* for sdio, MSDC_REMOVABLE not set, always return 1 */
- if (!(host->hw->flags & MSDC_REMOVABLE)) {
- /* For sdio, read H/W always get<1>, but may timeout some times */
-#if 1
- host->card_inserted = 1;
- return 1;
-#else
- host->card_inserted = (host->pm_state.event == PM_EVENT_USER_RESUME) ? 1 : 0;
- INIT_MSG("sdio ops_get_cd<%d>", host->card_inserted);
- return host->card_inserted;
-#endif
- }
-
- /* MSDC_CD_PIN_EN set for card */
- if (host->hw->flags & MSDC_CD_PIN_EN) {
- spin_lock_irqsave(&host->lock, flags);
-#if 0
- present = host->card_inserted; /* why not read from H/W: Fix me*/
-#else
- // CD
- present = readl(host->base + MSDC_PS) & MSDC_PS_CDSTS;
- if (cd_active_low)
- present = present ? 0 : 1;
- else
- present = present ? 1 : 0;
- host->card_inserted = present;
-#endif
- spin_unlock_irqrestore(&host->lock, flags);
- } else {
- present = 0; /* TODO? Check DAT3 pins for card detection */
- }
-
- INIT_MSG("ops_get_cd return<%d>", present);
- return present;
-}
-
-static struct mmc_host_ops mt_msdc_ops = {
- .request = msdc_ops_request,
- .set_ios = msdc_ops_set_ios,
- .get_ro = msdc_ops_get_ro,
- .get_cd = msdc_ops_get_cd,
-};
-
-/*--------------------------------------------------------------------------*/
-/* interrupt handler */
-/*--------------------------------------------------------------------------*/
-static irqreturn_t msdc_irq(int irq, void *dev_id)
-{
- struct msdc_host *host = (struct msdc_host *)dev_id;
- struct mmc_data *data = host->data;
- struct mmc_command *cmd = host->cmd;
-
- u32 cmdsts = MSDC_INT_RSPCRCERR | MSDC_INT_CMDTMO | MSDC_INT_CMDRDY |
- MSDC_INT_ACMDCRCERR | MSDC_INT_ACMDTMO | MSDC_INT_ACMDRDY |
- MSDC_INT_ACMD19_DONE;
- u32 datsts = MSDC_INT_DATCRCERR | MSDC_INT_DATTMO;
-
- u32 intsts = readl(host->base + MSDC_INT);
- u32 inten = readl(host->base + MSDC_INTEN); inten &= intsts;
-
- writel(intsts, host->base + MSDC_INT); /* clear interrupts */
- /* MSG will cause fatal error */
-
- /* card change interrupt */
- if (intsts & MSDC_INT_CDSC) {
- if (host->mmc->caps & MMC_CAP_NEEDS_POLL)
- return IRQ_HANDLED;
- IRQ_MSG("MSDC_INT_CDSC irq<0x%.8x>", intsts);
- schedule_delayed_work(&host->card_delaywork, HZ);
- /* tuning when plug card ? */
- }
-
- /* sdio interrupt */
- if (intsts & MSDC_INT_SDIOIRQ) {
- IRQ_MSG("XXX MSDC_INT_SDIOIRQ"); /* seems not sdio irq */
- //mmc_signal_sdio_irq(host->mmc);
- }
-
- /* transfer complete interrupt */
- if (data != NULL) {
- if (inten & MSDC_INT_XFER_COMPL) {
- data->bytes_xfered = host->xfer_size;
- complete(&host->xfer_done);
- }
-
- if (intsts & datsts) {
- /* do basic reset, or stop command will sdc_busy */
- msdc_reset_hw(host);
- msdc_clr_fifo(host);
- msdc_clr_int();
-
- if (intsts & MSDC_INT_DATTMO) {
- IRQ_MSG("XXX CMD<%d> MSDC_INT_DATTMO", host->mrq->cmd->opcode);
- data->error = -ETIMEDOUT;
- } else if (intsts & MSDC_INT_DATCRCERR) {
- IRQ_MSG("XXX CMD<%d> MSDC_INT_DATCRCERR, SDC_DCRC_STS<0x%x>", host->mrq->cmd->opcode, readl(host->base + SDC_DCRC_STS));
- data->error = -EIO;
- }
-
- //if(readl(MSDC_INTEN) & MSDC_INT_XFER_COMPL) {
- complete(&host->xfer_done); /* Read CRC come fast, XFER_COMPL not enabled */
- }
- }
-
- /* command interrupts */
- if ((cmd != NULL) && (intsts & cmdsts)) {
- if ((intsts & MSDC_INT_CMDRDY) || (intsts & MSDC_INT_ACMDRDY) ||
- (intsts & MSDC_INT_ACMD19_DONE)) {
- u32 *rsp = &cmd->resp[0];
-
- switch (host->cmd_rsp) {
- case RESP_NONE:
- break;
- case RESP_R2:
- *rsp++ = readl(host->base + SDC_RESP3);
- *rsp++ = readl(host->base + SDC_RESP2);
- *rsp++ = readl(host->base + SDC_RESP1);
- *rsp++ = readl(host->base + SDC_RESP0);
- break;
- default: /* Response types 1, 3, 4, 5, 6, 7(1b) */
- if ((intsts & MSDC_INT_ACMDRDY) || (intsts & MSDC_INT_ACMD19_DONE))
- *rsp = readl(host->base + SDC_ACMD_RESP);
- else
- *rsp = readl(host->base + SDC_RESP0);
- break;
- }
- } else if ((intsts & MSDC_INT_RSPCRCERR) || (intsts & MSDC_INT_ACMDCRCERR)) {
- if (intsts & MSDC_INT_ACMDCRCERR)
- IRQ_MSG("XXX CMD<%d> MSDC_INT_ACMDCRCERR", cmd->opcode);
- else
- IRQ_MSG("XXX CMD<%d> MSDC_INT_RSPCRCERR", cmd->opcode);
- cmd->error = -EIO;
- } else if ((intsts & MSDC_INT_CMDTMO) || (intsts & MSDC_INT_ACMDTMO)) {
- if (intsts & MSDC_INT_ACMDTMO)
- IRQ_MSG("XXX CMD<%d> MSDC_INT_ACMDTMO", cmd->opcode);
- else
- IRQ_MSG("XXX CMD<%d> MSDC_INT_CMDTMO", cmd->opcode);
- cmd->error = -ETIMEDOUT;
- msdc_reset_hw(host);
- msdc_clr_fifo(host);
- msdc_clr_int();
- }
- complete(&host->cmd_done);
- }
-
- /* mmc irq interrupts */
- if (intsts & MSDC_INT_MMCIRQ)
- printk(KERN_INFO "msdc[%d] MMCIRQ: SDC_CSTS=0x%.8x\r\n",
- host->id, readl(host->base + SDC_CSTS));
-
-#ifdef MT6575_SD_DEBUG
- {
-/* msdc_int_reg *int_reg = (msdc_int_reg*)&intsts;*/
- N_MSG(INT, "IRQ_EVT(0x%x): MMCIRQ(%d) CDSC(%d), ACRDY(%d), ACTMO(%d), ACCRE(%d) AC19DN(%d)",
- intsts,
- int_reg->mmcirq,
- int_reg->cdsc,
- int_reg->atocmdrdy,
- int_reg->atocmdtmo,
- int_reg->atocmdcrc,
- int_reg->atocmd19done);
- N_MSG(INT, "IRQ_EVT(0x%x): SDIO(%d) CMDRDY(%d), CMDTMO(%d), RSPCRC(%d), CSTA(%d)",
- intsts,
- int_reg->sdioirq,
- int_reg->cmdrdy,
- int_reg->cmdtmo,
- int_reg->rspcrc,
- int_reg->csta);
- N_MSG(INT, "IRQ_EVT(0x%x): XFCMP(%d) DXDONE(%d), DATTMO(%d), DATCRC(%d), DMAEMP(%d)",
- intsts,
- int_reg->xfercomp,
- int_reg->dxferdone,
- int_reg->dattmo,
- int_reg->datcrc,
- int_reg->dmaqempty);
- }
-#endif
-
- return IRQ_HANDLED;
-}
-
-/*--------------------------------------------------------------------------*/
-/* platform_driver members */
-/*--------------------------------------------------------------------------*/
-/* called by msdc_drv_probe/remove */
-static void msdc_enable_cd_irq(struct msdc_host *host, int enable)
-{
- struct msdc_hw *hw = host->hw;
-
- /* for sdio, not set */
- if ((hw->flags & MSDC_CD_PIN_EN) == 0) {
- /* Pull down card detection pin since it is not avaiable */
- /*
- if (hw->config_gpio_pin)
- hw->config_gpio_pin(MSDC_CD_PIN, GPIO_PULL_DOWN);
- */
- sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
- sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_CDSC);
- sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP);
- return;
- }
-
- N_MSG(CFG, "CD IRQ Enable(%d)", enable);
-
- if (enable) {
- /* card detection circuit relies on the core power so that the core power
- * shouldn't be turned off. Here adds a reference count to keep
- * the core power alive.
- */
- //msdc_vcore_on(host); //did in msdc_init_hw()
-
- if (hw->config_gpio_pin) /* NULL */
- hw->config_gpio_pin(MSDC_CD_PIN, GPIO_PULL_UP);
-
- sdr_set_field(host->base + MSDC_PS, MSDC_PS_CDDEBOUNCE,
- DEFAULT_DEBOUNCE);
- sdr_set_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
- sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_CDSC);
-
- /* not in document! Fix me */
- sdr_set_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP);
- } else {
- if (hw->config_gpio_pin) /* NULL */
- hw->config_gpio_pin(MSDC_CD_PIN, GPIO_PULL_DOWN);
-
- sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP);
- sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
- sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_CDSC);
-
- /* Here decreases a reference count to core power since card
- * detection circuit is shutdown.
- */
- //msdc_vcore_off(host);
- }
-}
-
-/* called by msdc_drv_probe */
-static void msdc_init_hw(struct msdc_host *host)
-{
-
- /* Power on */
-#if 0 /* --- by chhung */
- msdc_vcore_on(host);
- msdc_pin_reset(host, MSDC_PIN_PULL_UP);
- msdc_select_clksrc(host, hw->clk_src);
- enable_clock(PERI_MSDC0_PDN + host->id, "SD");
- msdc_vdd_on(host);
-#endif /* end of --- */
- /* Configure to MMC/SD mode */
- sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_MODE, MSDC_SDMMC);
-
- /* Reset */
- msdc_reset_hw(host);
- msdc_clr_fifo(host);
-
- /* Disable card detection */
- sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
-
- /* Disable and clear all interrupts */
- sdr_clr_bits(host->base + MSDC_INTEN, readl(host->base + MSDC_INTEN));
- writel(readl(host->base + MSDC_INT), host->base + MSDC_INT);
-
-#if 1
- /* reset tuning parameter */
- writel(0x00090000, host->base + MSDC_PAD_CTL0);
- writel(0x000A0000, host->base + MSDC_PAD_CTL1);
- writel(0x000A0000, host->base + MSDC_PAD_CTL2);
- // writel( 0x00000000, host->base + MSDC_PAD_TUNE);
-
- // for MT7620 E2 and afterward
- writel(0x84101010, host->base + MSDC_PAD_TUNE);
-
- // writel(0x00000000, host->base + MSDC_DAT_RDDLY0);
-
- // for MT7620 E2 and afterward
- writel(0x10101010, host->base + MSDC_DAT_RDDLY0);
-
- writel(0x00000000, host->base + MSDC_DAT_RDDLY1);
- writel(0x00000000, host->base + MSDC_IOCON);
-#if 0 // use MT7620 default value: 0x403c004f
- /* bit0 modified: Rx Data Clock Source: 1 -> 2.0*/
- writel(0x003C000F, host->base + MSDC_PATCH_BIT0);
-#endif
-
- if (readl(host->base + MSDC_ECO_VER) >= 4) {
- if (host->id == 1) {
- sdr_set_field(host->base + MSDC_PATCH_BIT1,
- MSDC_PATCH_BIT1_WRDAT_CRCS, 1);
- sdr_set_field(host->base + MSDC_PATCH_BIT1,
- MSDC_PATCH_BIT1_CMD_RSP, 1);
-
- /* internal clock: latch read data */
- sdr_set_bits(host->base + MSDC_PATCH_BIT0,
- MSDC_PATCH_BIT_CKGEN_CK);
- }
- }
-#endif
-
- /* for safety, should clear SDC_CFG.SDIO_INT_DET_EN & set SDC_CFG.SDIO in
- pre-loader,uboot,kernel drivers. and SDC_CFG.SDIO_INT_DET_EN will be only
- set when kernel driver wants to use SDIO bus interrupt */
- /* Configure to enable SDIO mode. it's must otherwise sdio cmd5 failed */
- sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIO);
-
- /* disable detect SDIO device interupt function */
- sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
-
- /* eneable SMT for glitch filter */
- sdr_set_bits(host->base + MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKSMT);
- sdr_set_bits(host->base + MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDSMT);
- sdr_set_bits(host->base + MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATSMT);
-
-#if 1
- /* set clk, cmd, dat pad driving */
- sdr_set_field(host->base + MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVN, 4);
- sdr_set_field(host->base + MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVP, 4);
- sdr_set_field(host->base + MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVN, 4);
- sdr_set_field(host->base + MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVP, 4);
- sdr_set_field(host->base + MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVN, 4);
- sdr_set_field(host->base + MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVP, 4);
-#else
- sdr_set_field(host->base + MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVN, 0);
- sdr_set_field(host->base + MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVP, 0);
- sdr_set_field(host->base + MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVN, 0);
- sdr_set_field(host->base + MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVP, 0);
- sdr_set_field(host->base + MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVN, 0);
- sdr_set_field(host->base + MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVP, 0);
-#endif
-
- /* set sampling edge */
-
- /* write crc timeout detection */
- sdr_set_field(host->base + MSDC_PATCH_BIT0, 1 << 30, 1);
-
- /* Configure to default data timeout */
- sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, DEFAULT_DTOC);
-
- msdc_set_buswidth(host, MMC_BUS_WIDTH_1);
-
- N_MSG(FUC, "init hardware done!");
-}
-
-/* called by msdc_drv_remove */
-static void msdc_deinit_hw(struct msdc_host *host)
-{
- /* Disable and clear all interrupts */
- sdr_clr_bits(host->base + MSDC_INTEN, readl(host->base + MSDC_INTEN));
- writel(readl(host->base + MSDC_INT), host->base + MSDC_INT);
-
- /* Disable card detection */
- msdc_enable_cd_irq(host, 0);
- // msdc_set_power_mode(host, MMC_POWER_OFF); /* make sure power down */ /* --- by chhung */
-}
-
-/* init gpd and bd list in msdc_drv_probe */
-static void msdc_init_gpd_bd(struct msdc_host *host, struct msdc_dma *dma)
-{
- struct gpd *gpd = dma->gpd;
- struct bd *bd = dma->bd;
- int i;
-
- /* we just support one gpd, but gpd->next must be set for desc
- * DMA. That's why we alloc 2 gpd structurs.
- */
-
- memset(gpd, 0, sizeof(struct gpd) * 2);
-
- gpd->bdp = 1; /* hwo, cs, bd pointer */
- gpd->ptr = (void *)dma->bd_addr; /* physical address */
- gpd->next = (void *)((u32)dma->gpd_addr + sizeof(struct gpd));
-
- memset(bd, 0, sizeof(struct bd) * MAX_BD_NUM);
- for (i = 0; i < (MAX_BD_NUM - 1); i++)
- bd[i].next = (void *)(dma->bd_addr + sizeof(*bd) * (i + 1));
-}
-
-static int msdc_drv_probe(struct platform_device *pdev)
-{
- struct resource *res;
- __iomem void *base;
- struct mmc_host *mmc;
- struct msdc_host *host;
- struct msdc_hw *hw;
- int ret;
-
- hw = &msdc0_hw;
-
- if (of_property_read_bool(pdev->dev.of_node, "mtk,wp-en"))
- msdc0_hw.flags |= MSDC_WP_PIN_EN;
-
- /* Allocate MMC host for this device */
- mmc = mmc_alloc_host(sizeof(struct msdc_host), &pdev->dev);
- if (!mmc)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base)) {
- ret = PTR_ERR(base);
- goto host_free;
- }
-
- /* Set host parameters to mmc */
- mmc->ops = &mt_msdc_ops;
- mmc->f_min = HOST_MIN_MCLK;
- mmc->f_max = HOST_MAX_MCLK;
- mmc->ocr_avail = MSDC_OCR_AVAIL;
-
- mmc->caps = MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
-
- //TODO: read this as bus-width from dt (via mmc_of_parse)
- mmc->caps |= MMC_CAP_4_BIT_DATA;
-
- cd_active_low = !of_property_read_bool(pdev->dev.of_node, "mediatek,cd-high");
-
- if (of_property_read_bool(pdev->dev.of_node, "mediatek,cd-poll"))
- mmc->caps |= MMC_CAP_NEEDS_POLL;
-
- /* MMC core transfer sizes tunable parameters */
- mmc->max_segs = MAX_HW_SGMTS;
-
- mmc->max_seg_size = MAX_SGMT_SZ;
- mmc->max_blk_size = HOST_MAX_BLKSZ;
- mmc->max_req_size = MAX_REQ_SZ;
- mmc->max_blk_count = mmc->max_req_size;
-
- host = mmc_priv(mmc);
- host->hw = hw;
- host->mmc = mmc;
- host->id = pdev->id;
- if (host->id < 0 || host->id >= 4)
- host->id = 0;
- host->error = 0;
-
- host->irq = platform_get_irq(pdev, 0);
- if (host->irq < 0) {
- ret = -EINVAL;
- goto host_free;
- }
-
- host->base = base;
- host->mclk = 0; /* mclk: the request clock of mmc sub-system */
- host->hclk = hclks[hw->clk_src]; /* hclk: clock of clock source to msdc controller */
- host->sclk = 0; /* sclk: the really clock after divition */
- host->pm_state = PMSG_RESUME;
- host->suspend = 0;
- host->core_clkon = 0;
- host->card_clkon = 0;
- host->core_power = 0;
- host->power_mode = MMC_POWER_OFF;
-// host->card_inserted = hw->flags & MSDC_REMOVABLE ? 0 : 1;
- host->timeout_ns = 0;
- host->timeout_clks = DEFAULT_DTOC * 65536;
-
- host->mrq = NULL;
- //init_MUTEX(&host->sem); /* we don't need to support multiple threads access */
-
- mmc_dev(mmc)->dma_mask = NULL;
-
- /* using dma_alloc_coherent*/ /* todo: using 1, for all 4 slots */
- host->dma.gpd = dma_alloc_coherent(&pdev->dev,
- MAX_GPD_NUM * sizeof(struct gpd),
- &host->dma.gpd_addr, GFP_KERNEL);
- host->dma.bd = dma_alloc_coherent(&pdev->dev,
- MAX_BD_NUM * sizeof(struct bd),
- &host->dma.bd_addr, GFP_KERNEL);
- if (!host->dma.gpd || !host->dma.bd) {
- ret = -ENOMEM;
- goto release_mem;
- }
- msdc_init_gpd_bd(host, &host->dma);
-
- INIT_DELAYED_WORK(&host->card_delaywork, msdc_tasklet_card);
- spin_lock_init(&host->lock);
- msdc_init_hw(host);
-
- /* TODO check weather flags 0 is correct, the mtk-sd driver uses
- * IRQF_TRIGGER_LOW | IRQF_ONESHOT for flags
- *
- * for flags 0 the trigger polarity is determined by the
- * device tree, but not the oneshot flag, but maybe it is also
- * not needed because the soc could be oneshot safe.
- */
- ret = devm_request_irq(&pdev->dev, host->irq, msdc_irq, 0, pdev->name,
- host);
- if (ret)
- goto release;
-
- platform_set_drvdata(pdev, mmc);
-
- ret = mmc_add_host(mmc);
- if (ret)
- goto release;
-
- /* Config card detection pin and enable interrupts */
- if (hw->flags & MSDC_CD_PIN_EN) { /* set for card */
- msdc_enable_cd_irq(host, 1);
- } else {
- msdc_enable_cd_irq(host, 0);
- }
-
- return 0;
-
-release:
- platform_set_drvdata(pdev, NULL);
- msdc_deinit_hw(host);
- cancel_delayed_work_sync(&host->card_delaywork);
-
-release_mem:
- if (host->dma.gpd)
- dma_free_coherent(&pdev->dev, MAX_GPD_NUM * sizeof(struct gpd),
- host->dma.gpd, host->dma.gpd_addr);
- if (host->dma.bd)
- dma_free_coherent(&pdev->dev, MAX_BD_NUM * sizeof(struct bd),
- host->dma.bd, host->dma.bd_addr);
-host_free:
- mmc_free_host(mmc);
-
- return ret;
-}
-
-/* 4 device share one driver, using "drvdata" to show difference */
-static int msdc_drv_remove(struct platform_device *pdev)
-{
- struct mmc_host *mmc;
- struct msdc_host *host;
-
- mmc = platform_get_drvdata(pdev);
- BUG_ON(!mmc);
-
- host = mmc_priv(mmc);
- BUG_ON(!host);
-
- ERR_MSG("removed !!!");
-
- platform_set_drvdata(pdev, NULL);
- mmc_remove_host(host->mmc);
- msdc_deinit_hw(host);
-
- cancel_delayed_work_sync(&host->card_delaywork);
-
- dma_free_coherent(&pdev->dev, MAX_GPD_NUM * sizeof(struct gpd),
- host->dma.gpd, host->dma.gpd_addr);
- dma_free_coherent(&pdev->dev, MAX_BD_NUM * sizeof(struct bd),
- host->dma.bd, host->dma.bd_addr);
-
- mmc_free_host(host->mmc);
-
- return 0;
-}
-
-/* Fix me: Power Flow */
-#ifdef CONFIG_PM
-
-static void msdc_drv_pm(struct platform_device *pdev, pm_message_t state)
-{
- struct mmc_host *mmc = platform_get_drvdata(pdev);
- if (mmc) {
- struct msdc_host *host = mmc_priv(mmc);
- msdc_pm(state, (void *)host);
- }
-}
-
-static int msdc_drv_suspend(struct platform_device *pdev, pm_message_t state)
-{
- if (state.event == PM_EVENT_SUSPEND)
- msdc_drv_pm(pdev, state);
- return 0;
-}
-
-static int msdc_drv_resume(struct platform_device *pdev)
-{
- struct pm_message state;
-
- state.event = PM_EVENT_RESUME;
- msdc_drv_pm(pdev, state);
- return 0;
-}
-#endif
-
-static const struct of_device_id mt7620_sdhci_match[] = {
- { .compatible = "ralink,mt7620-sdhci" },
- {},
-};
-MODULE_DEVICE_TABLE(of, mt7620_sdhci_match);
-
-static struct platform_driver mt_msdc_driver = {
- .probe = msdc_drv_probe,
- .remove = msdc_drv_remove,
-#ifdef CONFIG_PM
- .suspend = msdc_drv_suspend,
- .resume = msdc_drv_resume,
-#endif
- .driver = {
- .name = DRV_NAME,
- .of_match_table = mt7620_sdhci_match,
- },
-};
-
-/*--------------------------------------------------------------------------*/
-/* module init/exit */
-/*--------------------------------------------------------------------------*/
-static int __init mt_msdc_init(void)
-{
- int ret;
- u32 reg;
-
- // Set the pins for sdxc to sdxc mode
- //FIXME: this should be done by pinctl and not by the sd driver
- reg = readl((void __iomem *)(RALINK_SYSCTL_BASE + 0x60)) & ~(0x3 << 18);
- writel(reg, (void __iomem *)(RALINK_SYSCTL_BASE + 0x60));
-
- ret = platform_driver_register(&mt_msdc_driver);
- if (ret) {
- printk(KERN_ERR DRV_NAME ": Can't register driver");
- return ret;
- }
-
-#if defined(MT6575_SD_DEBUG)
- msdc_debug_proc_init();
-#endif
- return 0;
-}
-
-static void __exit mt_msdc_exit(void)
-{
- platform_driver_unregister(&mt_msdc_driver);
-}
-
-module_init(mt_msdc_init);
-module_exit(mt_msdc_exit);
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("MediaTek MT6575 SD/MMC Card Driver");
-MODULE_AUTHOR("Infinity Chen <infinity.chen@mediatek.com>");
diff --git a/drivers/staging/mt7621-spi/spi-mt7621.c b/drivers/staging/mt7621-spi/spi-mt7621.c
index 578aa6824ad3..75ed48f60c8c 100644
--- a/drivers/staging/mt7621-spi/spi-mt7621.c
+++ b/drivers/staging/mt7621-spi/spi-mt7621.c
@@ -452,9 +452,10 @@ static int mt7621_spi_probe(struct platform_device *pdev)
if (status)
return status;
- master = spi_alloc_master(&pdev->dev, sizeof(*rs));
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(*rs));
if (master == NULL) {
dev_info(&pdev->dev, "master allocation failed\n");
+ clk_disable_unprepare(clk);
return -ENOMEM;
}
@@ -480,12 +481,17 @@ static int mt7621_spi_probe(struct platform_device *pdev)
ret = device_reset(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "SPI reset failed!\n");
+ clk_disable_unprepare(clk);
return ret;
}
mt7621_spi_reset(rs, 0);
- return spi_register_master(master);
+ ret = spi_register_master(master);
+ if (ret)
+ clk_disable_unprepare(clk);
+
+ return ret;
}
static int mt7621_spi_remove(struct platform_device *pdev)
@@ -496,8 +502,8 @@ static int mt7621_spi_remove(struct platform_device *pdev)
master = dev_get_drvdata(&pdev->dev);
rs = spi_master_get_devdata(master);
- clk_disable(rs->clk);
spi_unregister_master(master);
+ clk_disable_unprepare(rs->clk);
return 0;
}
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index f67f95043887..5761a31e2318 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -152,12 +152,6 @@ int cvm_oct_phy_setup_device(struct net_device *dev)
phy_node = of_parse_phandle(priv->of_node, "phy-handle", 0);
if (!phy_node && of_phy_is_fixed_link(priv->of_node)) {
- int rc;
-
- rc = of_phy_register_fixed_link(priv->of_node);
- if (rc)
- return rc;
-
phy_node = of_node_get(priv->of_node);
}
if (!phy_node)
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 5e271245273c..6c644ef6f3ff 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -80,15 +80,17 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
else
port = work->word1.cn38xx.ipprt;
- if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) {
+ if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64))
/*
* Ignore length errors on min size packets. Some
* equipment incorrectly pads packets to 64+4FCS
* instead of 60+4FCS. Note these packets still get
* counted as frame errors.
*/
- } else if (work->word2.snoip.err_code == 5 ||
- work->word2.snoip.err_code == 7) {
+ return 0;
+
+ if (work->word2.snoip.err_code == 5 ||
+ work->word2.snoip.err_code == 7) {
/*
* We received a packet with either an alignment error
* or a FCS error. This may be signalling that we are
@@ -119,7 +121,10 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
/* Port received 0xd5 preamble */
work->packet_ptr.s.addr += i + 1;
work->word1.len -= i + 5;
- } else if ((*ptr & 0xf) == 0xd) {
+ return 0;
+ }
+
+ if ((*ptr & 0xf) == 0xd) {
/* Port received 0xd preamble */
work->packet_ptr.s.addr += i;
work->word1.len -= i + 4;
@@ -129,21 +134,20 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
((*(ptr + 1) & 0xf) << 4);
ptr++;
}
- } else {
- printk_ratelimited("Port %d unknown preamble, packet dropped\n",
- port);
- cvm_oct_free_work(work);
- return 1;
+ return 0;
}
+
+ printk_ratelimited("Port %d unknown preamble, packet dropped\n",
+ port);
+ cvm_oct_free_work(work);
+ return 1;
}
- } else {
- printk_ratelimited("Port %d receive error code %d, packet dropped\n",
- port, work->word2.snoip.err_code);
- cvm_oct_free_work(work);
- return 1;
}
- return 0;
+ printk_ratelimited("Port %d receive error code %d, packet dropped\n",
+ port, work->word2.snoip.err_code);
+ cvm_oct_free_work(work);
+ return 1;
}
static void copy_segments_to_skb(cvmx_wqe_t *work, struct sk_buff *skb)
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 9b15c9ed844b..b680e5785ae3 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -13,6 +13,7 @@
#include <linux/phy.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
@@ -875,6 +876,14 @@ static int cvm_oct_probe(struct platform_device *pdev)
break;
}
+ if (priv->of_node && of_phy_is_fixed_link(priv->of_node)) {
+ if (of_phy_register_fixed_link(priv->of_node)) {
+ netdev_err(dev, "Failed to register fixed link for interface %d, port %d\n",
+ interface, priv->port);
+ dev->netdev_ops = NULL;
+ }
+ }
+
if (!dev->netdev_ops) {
free_netdev(dev);
} else if (register_netdev(dev) < 0) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 676d549ef786..a8365e23157b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -794,6 +794,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
/* SSID */
p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SSID_IE_, &ie_len, (pbss_network->ie_length - _BEACON_IE_OFFSET_));
if (p && ie_len > 0) {
+ ie_len = min_t(int, ie_len, sizeof(pbss_network->Ssid.Ssid));
memset(&pbss_network->Ssid, 0, sizeof(struct ndis_802_11_ssid));
memcpy(pbss_network->Ssid.Ssid, (p + 2), ie_len);
pbss_network->Ssid.SsidLength = ie_len;
@@ -812,6 +813,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
/* get supported rates */
p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SUPPORTEDRATES_IE_, &ie_len, (pbss_network->ie_length - _BEACON_IE_OFFSET_));
if (p) {
+ ie_len = min_t(int, ie_len, NDIS_802_11_LENGTH_RATES_EX);
memcpy(supportRate, p + 2, ie_len);
supportRateNum = ie_len;
}
@@ -819,6 +821,8 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
/* get ext_supported rates */
p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _EXT_SUPPORTEDRATES_IE_, &ie_len, pbss_network->ie_length - _BEACON_IE_OFFSET_);
if (p) {
+ ie_len = min_t(int, ie_len,
+ NDIS_802_11_LENGTH_RATES_EX - supportRateNum);
memcpy(supportRate + supportRateNum, p + 2, ie_len);
supportRateNum += ie_len;
}
@@ -932,6 +936,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
pht_cap->mcs.rx_mask[0] = 0xff;
pht_cap->mcs.rx_mask[1] = 0x0;
+ ie_len = min_t(int, ie_len, sizeof(pmlmepriv->htpriv.ht_cap));
memcpy(&pmlmepriv->htpriv.ht_cap, p+2, ie_len);
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index 17b4b9257b49..0ddf41b5a734 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -1535,21 +1535,14 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe)
/* Allocate new skb for releasing to upper layer */
sub_skb = dev_alloc_skb(nSubframe_Length + 12);
- if (sub_skb) {
- skb_reserve(sub_skb, 12);
- skb_put_data(sub_skb, pdata, nSubframe_Length);
- } else {
- sub_skb = skb_clone(prframe->pkt, GFP_ATOMIC);
- if (sub_skb) {
- sub_skb->data = pdata;
- sub_skb->len = nSubframe_Length;
- skb_set_tail_pointer(sub_skb, nSubframe_Length);
- } else {
- DBG_88E("skb_clone() Fail!!! , nr_subframes=%d\n", nr_subframes);
- break;
- }
+ if (!sub_skb) {
+ DBG_88E("dev_alloc_skb() Fail!!! , nr_subframes=%d\n", nr_subframes);
+ break;
}
+ skb_reserve(sub_skb, 12);
+ skb_put_data(sub_skb, pdata, nSubframe_Length);
+
subframes[nr_subframes++] = sub_skb;
if (nr_subframes >= MAX_SUBFRAME_COUNT) {
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index 0003f0c38038..2e1f31b08e8b 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -1161,9 +1161,11 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
break;
}
sec_len = *(pos++); len -= 1;
- if (sec_len > 0 && sec_len <= len) {
+ if (sec_len > 0 &&
+ sec_len <= len &&
+ sec_len <= 32) {
ssid[ssid_index].SsidLength = sec_len;
- memcpy(ssid[ssid_index].Ssid, pos, ssid[ssid_index].SsidLength);
+ memcpy(ssid[ssid_index].Ssid, pos, sec_len);
ssid_index++;
}
pos += sec_len;
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 276c965afc11..5c6630255ac2 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -41,6 +41,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = {
{USB_DEVICE(0x2357, 0x0111)}, /* TP-Link TL-WN727N v5.21 */
{USB_DEVICE(0x2C4E, 0x0102)}, /* MERCUSYS MW150US v2 */
{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
+ {USB_DEVICE(0x7392, 0xb811)}, /* Edimax EW-7811UN V2 */
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
{} /* Terminating entry */
};
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
index 843e874b8a06..c5d67525b030 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
@@ -415,9 +415,10 @@ static int _rtl92e_wx_set_scan(struct net_device *dev,
struct iw_scan_req *req = (struct iw_scan_req *)b;
if (req->essid_len) {
- ieee->current_network.ssid_len = req->essid_len;
- memcpy(ieee->current_network.ssid, req->essid,
- req->essid_len);
+ int len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE);
+
+ ieee->current_network.ssid_len = len;
+ memcpy(ieee->current_network.ssid, req->essid, len);
}
}
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index c01474a6db1e..2574275475d1 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -1110,7 +1110,7 @@ struct rtllib_network {
bool bWithAironetIE;
bool bCkipSupported;
bool bCcxRmEnable;
- u16 CcxRmState[2];
+ u8 CcxRmState[2];
bool bMBssidValid;
u8 MBssidMask;
u8 MBssid[ETH_ALEN];
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index fa580ce1cf43..3bfd63fe4ac1 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -1978,7 +1978,7 @@ static void rtllib_parse_mife_generic(struct rtllib_device *ieee,
info_element->data[2] == 0x96 &&
info_element->data[3] == 0x01) {
if (info_element->len == 6) {
- memcpy(network->CcxRmState, &info_element[4], 2);
+ memcpy(network->CcxRmState, &info_element->data[4], 2);
if (network->CcxRmState[0] != 0)
network->bCcxRmEnable = true;
else
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index 28cae82d795c..fb824c517449 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -599,7 +599,7 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
prxbIndicateArray = kmalloc_array(REORDER_WIN_SIZE,
sizeof(struct ieee80211_rxb *),
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!prxbIndicateArray)
return;
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 2066a1d9bc84..cc12e6c36fed 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -2484,7 +2484,7 @@ static int rtl8192_read_eeprom_info(struct net_device *dev)
ret = eprom_read(dev, (EEPROM_TxPwIndex_CCK >> 1));
if (ret < 0)
return ret;
- priv->EEPROMTxPowerLevelCCK = ((u16)ret & 0xff) >> 8;
+ priv->EEPROMTxPowerLevelCCK = ((u16)ret & 0xff00) >> 8;
} else
priv->EEPROMTxPowerLevelCCK = 0x10;
RT_TRACE(COMP_EPROM, "CCK Tx Power Levl: 0x%02x\n", priv->EEPROMTxPowerLevelCCK);
@@ -3379,7 +3379,7 @@ static void rtl819x_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum,
u32 *TotalRxDataNum)
{
u16 SlotIndex;
- u8 i;
+ u16 i;
*TotalRxBcnNum = 0;
*TotalRxDataNum = 0;
diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c
index e4e6c979bedf..aa2206ccecee 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.c
+++ b/drivers/staging/rtl8192u/r8192U_wx.c
@@ -333,8 +333,10 @@ static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
struct iw_scan_req *req = (struct iw_scan_req *)b;
if (req->essid_len) {
- ieee->current_network.ssid_len = req->essid_len;
- memcpy(ieee->current_network.ssid, req->essid, req->essid_len);
+ int len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE);
+
+ ieee->current_network.ssid_len = len;
+ memcpy(ieee->current_network.ssid, req->essid, len);
}
}
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index 620cee8b8514..db7bfdf9b4b2 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -242,8 +242,10 @@ u8 r8712_sitesurvey_cmd(struct _adapter *padapter,
psurveyPara->ss_ssidlen = 0;
memset(psurveyPara->ss_ssid, 0, IW_ESSID_MAX_SIZE + 1);
if ((pssid != NULL) && (pssid->SsidLength)) {
- memcpy(psurveyPara->ss_ssid, pssid->Ssid, pssid->SsidLength);
- psurveyPara->ss_ssidlen = cpu_to_le32(pssid->SsidLength);
+ int len = min_t(int, pssid->SsidLength, IW_ESSID_MAX_SIZE);
+
+ memcpy(psurveyPara->ss_ssid, pssid->Ssid, len);
+ psurveyPara->ss_ssidlen = cpu_to_le32(len);
}
set_fwstate(pmlmepriv, _FW_UNDER_SURVEY);
r8712_enqueue_cmd(pcmdpriv, ph2c);
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index 2f490a4bf60a..4472dc76276a 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -946,7 +946,7 @@ static int r871x_wx_set_priv(struct net_device *dev,
struct iw_point *dwrq = (struct iw_point *)awrq;
len = dwrq->length;
- ext = memdup_user(dwrq->pointer, len);
+ ext = strndup_user(dwrq->pointer, len);
if (IS_ERR(ext))
return PTR_ERR(ext);
diff --git a/drivers/staging/rtl8712/wifi.h b/drivers/staging/rtl8712/wifi.h
index 00a4302e9983..0ea21f889be6 100644
--- a/drivers/staging/rtl8712/wifi.h
+++ b/drivers/staging/rtl8712/wifi.h
@@ -468,7 +468,7 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
/* block-ack parameters */
#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
-#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0
+#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0
#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
@@ -562,13 +562,6 @@ struct ieee80211_ht_addt_info {
#define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004
#define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010
-/* block-ack parameters */
-#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
-#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
-#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0
-#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
-#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
-
/*
* A-PMDU buffer sizes
* According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2)
diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
index 2c65af319a60..6c6bf03ac38a 100644
--- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
@@ -1856,12 +1856,14 @@ int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_l
pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
if (!pIE)
return _FAIL;
+ if (ie_len > sizeof(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates))
+ return _FAIL;
memcpy(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates, pIE->data, ie_len);
supportRateNum = ie_len;
pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _EXT_SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
- if (pIE)
+ if (pIE && (ie_len <= sizeof(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates) - supportRateNum))
memcpy((pmlmeinfo->FW_sta_info[cam_idx].SupportedRates + supportRateNum), pIE->data, ie_len);
return _SUCCESS;
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
index 49ea780f9f42..dc69c41f3e38 100644
--- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
@@ -20,6 +20,7 @@ static const struct sdio_device_id sdio_ids[] =
{ SDIO_DEVICE(0x024c, 0x0525), },
{ SDIO_DEVICE(0x024c, 0x0623), },
{ SDIO_DEVICE(0x024c, 0x0626), },
+ { SDIO_DEVICE(0x024c, 0x0627), },
{ SDIO_DEVICE(0x024c, 0xb723), },
{ /* end: all zeroes */ },
};
diff --git a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
index aa2f62acc994..4dd6f3fb5906 100644
--- a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
+++ b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
@@ -39,7 +39,7 @@
NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_OFDM)
static const struct ieee80211_regdomain rtw_regdom_rd = {
- .n_reg_rules = 3,
+ .n_reg_rules = 2,
.alpha2 = "99",
.reg_rules = {
RTW_2GHZ_CH01_11,
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index 846d7d243994..3972e215128e 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -897,6 +897,7 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
fix->visual = FB_VISUAL_PSEUDOCOLOR;
break;
case 16:
+ case 24:
case 32:
fix->visual = FB_VISUAL_TRUECOLOR;
break;
diff --git a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c
index a144f28ee1a8..04de81559c6e 100644
--- a/drivers/staging/speakup/speakup_dectlk.c
+++ b/drivers/staging/speakup/speakup_dectlk.c
@@ -37,7 +37,7 @@ static unsigned char get_index(struct spk_synth *synth);
static int in_escape;
static int is_flushing;
-static spinlock_t flush_lock;
+static DEFINE_SPINLOCK(flush_lock);
static DECLARE_WAIT_QUEUE_HEAD(flush);
static struct var_t vars[] = {
diff --git a/drivers/staging/speakup/spk_ttyio.c b/drivers/staging/speakup/spk_ttyio.c
index 93742dbdee77..5c282e8522fb 100644
--- a/drivers/staging/speakup/spk_ttyio.c
+++ b/drivers/staging/speakup/spk_ttyio.c
@@ -47,9 +47,12 @@ static int spk_ttyio_ldisc_open(struct tty_struct *tty)
{
struct spk_ldisc_data *ldisc_data;
+ if (tty != speakup_tty)
+ /* Somebody tried to use this line discipline outside speakup */
+ return -ENODEV;
+
if (tty->ops->write == NULL)
return -EOPNOTSUPP;
- speakup_tty = tty;
ldisc_data = kmalloc(sizeof(struct spk_ldisc_data), GFP_KERNEL);
if (!ldisc_data)
@@ -57,7 +60,7 @@ static int spk_ttyio_ldisc_open(struct tty_struct *tty)
sema_init(&ldisc_data->sem, 0);
ldisc_data->buf_free = true;
- speakup_tty->disc_data = ldisc_data;
+ tty->disc_data = ldisc_data;
return 0;
}
@@ -177,9 +180,25 @@ static int spk_ttyio_initialise_ldisc(struct spk_synth *synth)
tty_unlock(tty);
+ mutex_lock(&speakup_tty_mutex);
+ speakup_tty = tty;
ret = tty_set_ldisc(tty, N_SPEAKUP);
if (ret)
- pr_err("speakup: Failed to set N_SPEAKUP on tty\n");
+ speakup_tty = NULL;
+ mutex_unlock(&speakup_tty_mutex);
+
+ if (!ret)
+ /* Success */
+ return 0;
+
+ pr_err("speakup: Failed to set N_SPEAKUP on tty\n");
+
+ tty_lock(tty);
+ if (tty->ops->close)
+ tty->ops->close(tty, NULL);
+ tty_unlock(tty);
+
+ tty_kclose(tty);
return ret;
}
diff --git a/drivers/staging/vt6656/int.c b/drivers/staging/vt6656/int.c
index af0060c74530..43f461f75b97 100644
--- a/drivers/staging/vt6656/int.c
+++ b/drivers/staging/vt6656/int.c
@@ -143,7 +143,8 @@ void vnt_int_process_data(struct vnt_private *priv)
priv->wake_up_count =
priv->hw->conf.listen_interval;
- --priv->wake_up_count;
+ if (priv->wake_up_count)
+ --priv->wake_up_count;
/* Turn on wake up to listen next beacon */
if (priv->wake_up_count == 1)
diff --git a/drivers/staging/vt6656/key.c b/drivers/staging/vt6656/key.c
index 91dede54cc1f..be8dbf6c2c2f 100644
--- a/drivers/staging/vt6656/key.c
+++ b/drivers/staging/vt6656/key.c
@@ -81,9 +81,6 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr,
case VNT_KEY_PAIRWISE:
key_mode |= mode;
key_inx = 4;
- /* Don't save entry for pairwise key for station mode */
- if (priv->op_mode == NL80211_IFTYPE_STATION)
- clear_bit(entry, &priv->key_entry_inuse);
break;
default:
return -EINVAL;
@@ -107,7 +104,6 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr,
int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
struct ieee80211_vif *vif, struct ieee80211_key_conf *key)
{
- struct ieee80211_bss_conf *conf = &vif->bss_conf;
struct vnt_private *priv = hw->priv;
u8 *mac_addr = NULL;
u8 key_dec_mode = 0;
@@ -149,16 +145,12 @@ int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
}
- if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
vnt_set_keymode(hw, mac_addr, key, VNT_KEY_PAIRWISE,
key_dec_mode, true);
- } else {
- vnt_set_keymode(hw, mac_addr, key, VNT_KEY_DEFAULTKEY,
+ else
+ vnt_set_keymode(hw, mac_addr, key, VNT_KEY_GROUP_ADDRESS,
key_dec_mode, true);
- vnt_set_keymode(hw, (u8 *)conf->bssid, key,
- VNT_KEY_GROUP_ADDRESS, key_dec_mode, true);
- }
-
return 0;
}
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 36562ac94c1f..586a3d331511 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -595,8 +595,6 @@ static int vnt_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
priv->op_mode = vif->type;
- vnt_set_bss_mode(priv);
-
/* LED blink on TX */
vnt_mac_set_led(priv, LEDSTS_STS, LEDSTS_INTER);
@@ -683,7 +681,6 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
priv->basic_rates = conf->basic_rates;
vnt_update_top_rates(priv);
- vnt_set_bss_mode(priv);
dev_dbg(&priv->usb->dev, "basic rates %x\n", conf->basic_rates);
}
@@ -712,11 +709,14 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
priv->short_slot_time = false;
vnt_set_short_slot_time(priv);
- vnt_update_ifs(priv);
vnt_set_vga_gain_offset(priv, priv->bb_vga[0]);
vnt_update_pre_ed_threshold(priv, false);
}
+ if (changed & (BSS_CHANGED_BASIC_RATES | BSS_CHANGED_ERP_PREAMBLE |
+ BSS_CHANGED_ERP_SLOT))
+ vnt_set_bss_mode(priv);
+
if (changed & BSS_CHANGED_TXPOWER)
vnt_rf_setpower(priv, priv->current_rate,
conf->chandef.chan->hw_value);
@@ -740,12 +740,15 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
vnt_mac_reg_bits_on(priv, MAC_REG_TFTCTL,
TFTCTL_TSFCNTREN);
- vnt_adjust_tsf(priv, conf->beacon_rate->hw_value,
- conf->sync_tsf, priv->current_tsf);
-
vnt_mac_set_beacon_interval(priv, conf->beacon_int);
vnt_reset_next_tbtt(priv, conf->beacon_int);
+
+ vnt_adjust_tsf(priv, conf->beacon_rate->hw_value,
+ conf->sync_tsf, priv->current_tsf);
+
+ vnt_update_next_tbtt(priv,
+ conf->sync_tsf, conf->beacon_int);
} else {
vnt_clear_current_tsf(priv);
@@ -780,15 +783,11 @@ static void vnt_configure(struct ieee80211_hw *hw,
{
struct vnt_private *priv = hw->priv;
u8 rx_mode = 0;
- int rc;
*total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC;
- rc = vnt_control_in(priv, MESSAGE_TYPE_READ, MAC_REG_RCR,
- MESSAGE_REQUEST_MACREG, sizeof(u8), &rx_mode);
-
- if (!rc)
- rx_mode = RCR_MULTICAST | RCR_BROADCAST;
+ vnt_control_in(priv, MESSAGE_TYPE_READ, MAC_REG_RCR,
+ MESSAGE_REQUEST_MACREG, sizeof(u8), &rx_mode);
dev_dbg(&priv->usb->dev, "rx mode in = %x\n", rx_mode);
@@ -829,8 +828,12 @@ static int vnt_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
break;
case DISABLE_KEY:
- if (test_bit(key->hw_key_idx, &priv->key_entry_inuse))
+ if (test_bit(key->hw_key_idx, &priv->key_entry_inuse)) {
clear_bit(key->hw_key_idx, &priv->key_entry_inuse);
+
+ vnt_mac_disable_keyentry(priv, key->hw_key_idx);
+ }
+
default:
break;
}
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 65ad9773018e..7686805dfe0f 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -532,13 +532,8 @@ static void hfa384x_usb_defer(struct work_struct *data)
*/
void hfa384x_create(struct hfa384x *hw, struct usb_device *usb)
{
- memset(hw, 0, sizeof(*hw));
hw->usb = usb;
- /* set up the endpoints */
- hw->endp_in = usb_rcvbulkpipe(usb, 1);
- hw->endp_out = usb_sndbulkpipe(usb, 2);
-
/* Set up the waitq */
init_waitqueue_head(&hw->cmdq);
diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
index d8d86761b790..9eee72aff723 100644
--- a/drivers/staging/wlan-ng/prism2usb.c
+++ b/drivers/staging/wlan-ng/prism2usb.c
@@ -61,11 +61,16 @@ static int prism2sta_probe_usb(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *dev;
-
+ struct usb_endpoint_descriptor *bulk_in, *bulk_out;
+ struct usb_host_interface *iface_desc = interface->cur_altsetting;
struct wlandevice *wlandev = NULL;
struct hfa384x *hw = NULL;
int result = 0;
+ result = usb_find_common_endpoints(iface_desc, &bulk_in, &bulk_out, NULL, NULL);
+ if (result)
+ goto failed;
+
dev = interface_to_usbdev(interface);
wlandev = create_wlan();
if (!wlandev) {
@@ -82,6 +87,8 @@ static int prism2sta_probe_usb(struct usb_interface *interface,
}
/* Initialize the hw data */
+ hw->endp_in = usb_rcvbulkpipe(dev, bulk_in->bEndpointAddress);
+ hw->endp_out = usb_sndbulkpipe(dev, bulk_out->bEndpointAddress);
hfa384x_create(hw, dev);
hw->wlandev = wlandev;
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
index 25eb3891e34b..56bfb30b0ef5 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
@@ -89,8 +89,7 @@ static int cxgbit_is_ofld_imm(const struct sk_buff *skb)
if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_ISO))
length += sizeof(struct cpl_tx_data_iso);
-#define MAX_IMM_TX_PKT_LEN 256
- return length <= MAX_IMM_TX_PKT_LEN;
+ return length <= MAX_IMM_OFLD_TX_DATA_WR_LEN;
}
/*
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 03e9cb156df9..58ccded1be85 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -492,8 +492,7 @@ EXPORT_SYMBOL(iscsit_queue_rsp);
void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
{
spin_lock_bh(&conn->cmd_lock);
- if (!list_empty(&cmd->i_conn_node) &&
- !(cmd->se_cmd.transport_state & CMD_T_FABRIC_STOP))
+ if (!list_empty(&cmd->i_conn_node))
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
@@ -1381,14 +1380,27 @@ static u32 iscsit_do_crypto_hash_sg(
sg = cmd->first_data_sg;
page_off = cmd->first_data_sg_off;
+ if (data_length && page_off) {
+ struct scatterlist first_sg;
+ u32 len = min_t(u32, data_length, sg->length - page_off);
+
+ sg_init_table(&first_sg, 1);
+ sg_set_page(&first_sg, sg_page(sg), len, sg->offset + page_off);
+
+ ahash_request_set_crypt(hash, &first_sg, NULL, len);
+ crypto_ahash_update(hash);
+
+ data_length -= len;
+ sg = sg_next(sg);
+ }
+
while (data_length) {
- u32 cur_len = min_t(u32, data_length, (sg->length - page_off));
+ u32 cur_len = min_t(u32, data_length, sg->length);
ahash_request_set_crypt(hash, sg, NULL, cur_len);
crypto_ahash_update(hash);
data_length -= cur_len;
- page_off = 0;
/* iscsit_map_iovec has already checked for invalid sg pointers */
sg = sg_next(sg);
}
@@ -4041,12 +4053,22 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
spin_lock_bh(&conn->cmd_lock);
list_splice_init(&conn->conn_cmd_list, &tmp_list);
- list_for_each_entry(cmd, &tmp_list, i_conn_node) {
+ list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
struct se_cmd *se_cmd = &cmd->se_cmd;
if (se_cmd->se_tfo != NULL) {
spin_lock_irq(&se_cmd->t_state_lock);
- se_cmd->transport_state |= CMD_T_FABRIC_STOP;
+ if (se_cmd->transport_state & CMD_T_ABORTED) {
+ /*
+ * LIO's abort path owns the cleanup for this,
+ * so put it back on the list and let
+ * aborted_task handle it.
+ */
+ list_move_tail(&cmd->i_conn_node,
+ &conn->conn_cmd_list);
+ } else {
+ se_cmd->transport_state |= CMD_T_FABRIC_STOP;
+ }
spin_unlock_irq(&se_cmd->t_state_lock);
}
}
@@ -4275,30 +4297,37 @@ int iscsit_close_connection(
if (!atomic_read(&sess->session_reinstatement) &&
atomic_read(&sess->session_fall_back_to_erl0)) {
spin_unlock_bh(&sess->conn_lock);
+ complete_all(&sess->session_wait_comp);
iscsit_close_session(sess);
return 0;
} else if (atomic_read(&sess->session_logout)) {
pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
sess->session_state = TARG_SESS_STATE_FREE;
- spin_unlock_bh(&sess->conn_lock);
- if (atomic_read(&sess->sleep_on_sess_wait_comp))
- complete(&sess->session_wait_comp);
+ if (atomic_read(&sess->session_close)) {
+ spin_unlock_bh(&sess->conn_lock);
+ complete_all(&sess->session_wait_comp);
+ iscsit_close_session(sess);
+ } else {
+ spin_unlock_bh(&sess->conn_lock);
+ }
return 0;
} else {
pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
sess->session_state = TARG_SESS_STATE_FAILED;
- if (!atomic_read(&sess->session_continuation)) {
- spin_unlock_bh(&sess->conn_lock);
+ if (!atomic_read(&sess->session_continuation))
iscsit_start_time2retain_handler(sess);
- } else
- spin_unlock_bh(&sess->conn_lock);
- if (atomic_read(&sess->sleep_on_sess_wait_comp))
- complete(&sess->session_wait_comp);
+ if (atomic_read(&sess->session_close)) {
+ spin_unlock_bh(&sess->conn_lock);
+ complete_all(&sess->session_wait_comp);
+ iscsit_close_session(sess);
+ } else {
+ spin_unlock_bh(&sess->conn_lock);
+ }
return 0;
}
@@ -4404,9 +4433,9 @@ static void iscsit_logout_post_handler_closesession(
complete(&conn->conn_logout_comp);
iscsit_dec_conn_usage_count(conn);
+ atomic_set(&sess->session_close, 1);
iscsit_stop_session(sess, sleep, sleep);
iscsit_dec_session_usage_count(sess);
- iscsit_close_session(sess);
}
static void iscsit_logout_post_handler_samecid(
@@ -4541,49 +4570,6 @@ void iscsit_fail_session(struct iscsi_session *sess)
sess->session_state = TARG_SESS_STATE_FAILED;
}
-int iscsit_free_session(struct iscsi_session *sess)
-{
- u16 conn_count = atomic_read(&sess->nconn);
- struct iscsi_conn *conn, *conn_tmp = NULL;
- int is_last;
-
- spin_lock_bh(&sess->conn_lock);
- atomic_set(&sess->sleep_on_sess_wait_comp, 1);
-
- list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
- conn_list) {
- if (conn_count == 0)
- break;
-
- if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
- is_last = 1;
- } else {
- iscsit_inc_conn_usage_count(conn_tmp);
- is_last = 0;
- }
- iscsit_inc_conn_usage_count(conn);
-
- spin_unlock_bh(&sess->conn_lock);
- iscsit_cause_connection_reinstatement(conn, 1);
- spin_lock_bh(&sess->conn_lock);
-
- iscsit_dec_conn_usage_count(conn);
- if (is_last == 0)
- iscsit_dec_conn_usage_count(conn_tmp);
-
- conn_count--;
- }
-
- if (atomic_read(&sess->nconn)) {
- spin_unlock_bh(&sess->conn_lock);
- wait_for_completion(&sess->session_wait_comp);
- } else
- spin_unlock_bh(&sess->conn_lock);
-
- iscsit_close_session(sess);
- return 0;
-}
-
void iscsit_stop_session(
struct iscsi_session *sess,
int session_sleep,
@@ -4594,8 +4580,6 @@ void iscsit_stop_session(
int is_last;
spin_lock_bh(&sess->conn_lock);
- if (session_sleep)
- atomic_set(&sess->sleep_on_sess_wait_comp, 1);
if (connection_sleep) {
list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
@@ -4653,12 +4637,15 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
spin_lock(&sess->conn_lock);
if (atomic_read(&sess->session_fall_back_to_erl0) ||
atomic_read(&sess->session_logout) ||
+ atomic_read(&sess->session_close) ||
(sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
spin_unlock(&sess->conn_lock);
continue;
}
+ iscsit_inc_session_usage_count(sess);
atomic_set(&sess->session_reinstatement, 1);
atomic_set(&sess->session_fall_back_to_erl0, 1);
+ atomic_set(&sess->session_close, 1);
spin_unlock(&sess->conn_lock);
list_move_tail(&se_sess->sess_list, &free_list);
@@ -4668,7 +4655,9 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
- iscsit_free_session(sess);
+ list_del_init(&se_sess->sess_list);
+ iscsit_stop_session(sess, 1, 1);
+ iscsit_dec_session_usage_count(sess);
session_count++;
}
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index 48bac0acf8c7..11a481cf6ead 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -43,7 +43,6 @@ extern int iscsi_target_rx_thread(void *);
extern int iscsit_close_connection(struct iscsi_conn *);
extern int iscsit_close_session(struct iscsi_session *);
extern void iscsit_fail_session(struct iscsi_session *);
-extern int iscsit_free_session(struct iscsi_session *);
extern void iscsit_stop_session(struct iscsi_session *, int, int);
extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 95d0a22b2ad6..d25cadc4f4f1 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1501,20 +1501,23 @@ static void lio_tpg_close_session(struct se_session *se_sess)
spin_lock(&sess->conn_lock);
if (atomic_read(&sess->session_fall_back_to_erl0) ||
atomic_read(&sess->session_logout) ||
+ atomic_read(&sess->session_close) ||
(sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
spin_unlock(&sess->conn_lock);
spin_unlock_bh(&se_tpg->session_lock);
return;
}
+ iscsit_inc_session_usage_count(sess);
atomic_set(&sess->session_reinstatement, 1);
atomic_set(&sess->session_fall_back_to_erl0, 1);
+ atomic_set(&sess->session_close, 1);
spin_unlock(&sess->conn_lock);
iscsit_stop_time2retain_timer(sess);
spin_unlock_bh(&se_tpg->session_lock);
iscsit_stop_session(sess, 1, 1);
- iscsit_close_session(sess);
+ iscsit_dec_session_usage_count(sess);
}
static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index bb90c80ff388..db93bd0a9b88 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -164,6 +164,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
spin_lock(&sess_p->conn_lock);
if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
atomic_read(&sess_p->session_logout) ||
+ atomic_read(&sess_p->session_close) ||
(sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
spin_unlock(&sess_p->conn_lock);
continue;
@@ -174,6 +175,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
(sess_p->sess_ops->SessionType == sessiontype))) {
atomic_set(&sess_p->session_reinstatement, 1);
atomic_set(&sess_p->session_fall_back_to_erl0, 1);
+ atomic_set(&sess_p->session_close, 1);
spin_unlock(&sess_p->conn_lock);
iscsit_inc_session_usage_count(sess_p);
iscsit_stop_time2retain_timer(sess_p);
@@ -198,7 +200,6 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
if (sess->session_state == TARG_SESS_STATE_FAILED) {
spin_unlock_bh(&sess->conn_lock);
iscsit_dec_session_usage_count(sess);
- iscsit_close_session(sess);
return 0;
}
spin_unlock_bh(&sess->conn_lock);
@@ -206,7 +207,6 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
iscsit_stop_session(sess, 1, 1);
iscsit_dec_session_usage_count(sess);
- iscsit_close_session(sess);
return 0;
}
@@ -494,6 +494,7 @@ static int iscsi_login_non_zero_tsih_s2(
sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr;
if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
atomic_read(&sess_p->session_logout) ||
+ atomic_read(&sess_p->session_close) ||
(sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED))
continue;
if (!memcmp(sess_p->isid, pdu->isid, 6) &&
@@ -1182,7 +1183,7 @@ void iscsit_free_conn(struct iscsi_conn *conn)
}
void iscsi_target_login_sess_out(struct iscsi_conn *conn,
- struct iscsi_np *np, bool zero_tsih, bool new_sess)
+ bool zero_tsih, bool new_sess)
{
if (!new_sess)
goto old_sess_out;
@@ -1200,7 +1201,6 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn,
conn->sess = NULL;
old_sess_out:
- iscsi_stop_login_thread_timer(np);
/*
* If login negotiation fails check if the Time2Retain timer
* needs to be restarted.
@@ -1440,8 +1440,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
new_sess_out:
new_sess = true;
old_sess_out:
+ iscsi_stop_login_thread_timer(np);
tpg_np = conn->tpg_np;
- iscsi_target_login_sess_out(conn, np, zero_tsih, new_sess);
+ iscsi_target_login_sess_out(conn, zero_tsih, new_sess);
new_sess = false;
if (tpg) {
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index 3b8e3639ff5d..fc95e6150253 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -22,8 +22,7 @@ extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
extern void iscsit_free_conn(struct iscsi_conn *);
extern int iscsit_start_kthreads(struct iscsi_conn *);
extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
-extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
- bool, bool);
+extern void iscsi_target_login_sess_out(struct iscsi_conn *, bool, bool);
extern int iscsi_target_login_thread(void *);
extern void iscsi_handle_login_thread_timeout(struct timer_list *t);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 8a5e8d17a942..5db8842a8026 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -554,12 +554,11 @@ static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned in
static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login)
{
- struct iscsi_np *np = login->np;
bool zero_tsih = login->zero_tsih;
iscsi_remove_failed_auth_entry(conn);
iscsi_target_nego_release(conn);
- iscsi_target_login_sess_out(conn, np, zero_tsih, true);
+ iscsi_target_login_sess_out(conn, zero_tsih, true);
}
struct conn_timeout {
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index 10fae26b44ad..939c6212d2ac 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -76,7 +76,7 @@ static int fc_get_pr_transport_id(
* encoded TransportID.
*/
ptr = &se_nacl->initiatorname[0];
- for (i = 0; i < 24; ) {
+ for (i = 0; i < 23; ) {
if (!strncmp(&ptr[i], ":", 1)) {
i++;
continue;
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 854b2bcca7c1..5668d02de10b 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -445,7 +445,7 @@ iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
target_to_linux_sector(dev, cmd->t_task_lba),
target_to_linux_sector(dev,
sbc_get_write_same_sectors(cmd)),
- GFP_KERNEL, false);
+ GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
if (ret)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 10db5656fd5d..949879f2f1d1 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -3742,6 +3742,7 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd)
spin_unlock(&dev->t10_pr.registration_lock);
put_unaligned_be32(add_len, &buf[4]);
+ target_set_cmd_data_length(cmd, 8 + add_len);
transport_kunmap_data_sg(cmd);
@@ -3760,7 +3761,7 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd)
struct t10_pr_registration *pr_reg;
unsigned char *buf;
u64 pr_res_key;
- u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */
+ u32 add_len = 0;
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
@@ -3778,8 +3779,9 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd)
pr_reg = dev->dev_pr_res_holder;
if (pr_reg) {
/*
- * Set the hardcoded Additional Length
+ * Set the Additional Length to 16 when a reservation is held
*/
+ add_len = 16;
put_unaligned_be32(add_len, &buf[4]);
if (cmd->data_length < 22)
@@ -3815,6 +3817,8 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd)
(pr_reg->pr_res_type & 0x0f);
}
+ target_set_cmd_data_length(cmd, 8 + add_len);
+
err:
spin_unlock(&dev->dev_reservation_lock);
transport_kunmap_data_sg(cmd);
@@ -3833,7 +3837,7 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
struct se_device *dev = cmd->se_dev;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char *buf;
- u16 add_len = 8; /* Hardcoded to 8. */
+ u16 len = 8; /* Hardcoded to 8. */
if (cmd->data_length < 6) {
pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
@@ -3845,7 +3849,7 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
if (!buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- put_unaligned_be16(add_len, &buf[0]);
+ put_unaligned_be16(len, &buf[0]);
buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */
buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */
buf[2] |= 0x04; /* ATP_C: All Target Ports Capable bit */
@@ -3874,6 +3878,8 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
+ target_set_cmd_data_length(cmd, len);
+
transport_kunmap_data_sg(cmd);
return 0;
@@ -4034,6 +4040,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
* Set ADDITIONAL_LENGTH
*/
put_unaligned_be32(add_len, &buf[4]);
+ target_set_cmd_data_length(cmd, 8 + add_len);
transport_kunmap_data_sg(cmd);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 47d76c862014..1b52cd4d793f 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -633,8 +633,9 @@ static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status,
unsigned char *buf;
buf = transport_kmap_data_sg(cmd);
- if (!buf)
+ if (!buf) {
; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */
+ }
if (cdb[0] == MODE_SENSE_10) {
if (!(buf[3] & 0x80))
@@ -970,6 +971,14 @@ new_bio:
return 0;
fail:
+ if (bio)
+ bio_put(bio);
+ while (req->bio) {
+ bio = req->bio;
+ req->bio = bio->bi_next;
+ bio_put(bio);
+ }
+ req->biotail = NULL;
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index f1b730b77a31..bdada97cd4fe 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -841,11 +841,9 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
}
EXPORT_SYMBOL(target_complete_cmd);
-void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
+void target_set_cmd_data_length(struct se_cmd *cmd, int length)
{
- if ((scsi_status == SAM_STAT_GOOD ||
- cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
- length < cmd->data_length) {
+ if (length < cmd->data_length) {
if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
cmd->residual_count += cmd->data_length - length;
} else {
@@ -855,6 +853,15 @@ void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int len
cmd->data_length = length;
}
+}
+EXPORT_SYMBOL(target_set_cmd_data_length);
+
+void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
+{
+ if (scsi_status == SAM_STAT_GOOD ||
+ cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) {
+ target_set_cmd_data_length(cmd, length);
+ }
target_complete_cmd(cmd, scsi_status);
}
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 7ee0a75ce452..dd7307375504 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -612,7 +612,7 @@ static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
size = round_up(size+offset, PAGE_SIZE);
while (size) {
- flush_dcache_page(virt_to_page(start));
+ flush_dcache_page(vmalloc_to_page(start));
start += PAGE_SIZE;
size -= PAGE_SIZE;
}
@@ -680,15 +680,17 @@ static void scatter_data_area(struct tcmu_dev *udev,
void *from, *to = NULL;
size_t copy_bytes, to_offset, offset;
struct scatterlist *sg;
- struct page *page;
+ struct page *page = NULL;
for_each_sg(data_sg, sg, data_nents, i) {
int sg_remaining = sg->length;
from = kmap_atomic(sg_page(sg)) + sg->offset;
while (sg_remaining > 0) {
if (block_remaining == 0) {
- if (to)
+ if (to) {
+ flush_dcache_page(page);
kunmap_atomic(to);
+ }
block_remaining = DATA_BLOCK_SIZE;
dbi = tcmu_cmd_get_dbi(tcmu_cmd);
@@ -733,7 +735,6 @@ static void scatter_data_area(struct tcmu_dev *udev,
memcpy(to + offset,
from + sg->length - sg_remaining,
copy_bytes);
- tcmu_flush_dcache_range(to, copy_bytes);
}
sg_remaining -= copy_bytes;
@@ -742,8 +743,10 @@ static void scatter_data_area(struct tcmu_dev *udev,
kunmap_atomic(from - sg->offset);
}
- if (to)
+ if (to) {
+ flush_dcache_page(page);
kunmap_atomic(to);
+ }
}
static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
@@ -789,13 +792,13 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
dbi = tcmu_cmd_get_dbi(cmd);
page = tcmu_get_block_page(udev, dbi);
from = kmap_atomic(page);
+ flush_dcache_page(page);
}
copy_bytes = min_t(size_t, sg_remaining,
block_remaining);
if (read_len < copy_bytes)
copy_bytes = read_len;
offset = DATA_BLOCK_SIZE - block_remaining;
- tcmu_flush_dcache_range(from, copy_bytes);
memcpy(to + sg->length - sg_remaining, from + offset,
copy_bytes);
@@ -893,41 +896,24 @@ static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
return command_size;
}
-static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
- struct timer_list *timer)
+static void tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
+ struct timer_list *timer)
{
- struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
- int cmd_id;
-
- if (tcmu_cmd->cmd_id)
- goto setup_timer;
-
- cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
- if (cmd_id < 0) {
- pr_err("tcmu: Could not allocate cmd id.\n");
- return cmd_id;
- }
- tcmu_cmd->cmd_id = cmd_id;
-
- pr_debug("allocated cmd %u for dev %s tmo %lu\n", tcmu_cmd->cmd_id,
- udev->name, tmo / MSEC_PER_SEC);
-
-setup_timer:
if (!tmo)
- return 0;
+ return;
tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
if (!timer_pending(timer))
mod_timer(timer, tcmu_cmd->deadline);
- return 0;
+ pr_debug("Timeout set up for cmd %p, dev = %s, tmo = %lu\n", tcmu_cmd,
+ tcmu_cmd->tcmu_dev->name, tmo / MSEC_PER_SEC);
}
static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
{
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
unsigned int tmo;
- int ret;
/*
* For backwards compat if qfull_time_out is not set use
@@ -942,13 +928,11 @@ static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
else
tmo = TCMU_TIME_OUT;
- ret = tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer);
- if (ret)
- return ret;
+ tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer);
list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
- pr_debug("adding cmd %u on dev %s to ring space wait queue\n",
- tcmu_cmd->cmd_id, udev->name);
+ pr_debug("adding cmd %p on dev %s to ring space wait queue\n",
+ tcmu_cmd, udev->name);
return 0;
}
@@ -970,7 +954,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
struct tcmu_mailbox *mb;
struct tcmu_cmd_entry *entry;
struct iovec *iov;
- int iov_cnt, ret;
+ int iov_cnt, cmd_id;
uint32_t cmd_head;
uint64_t cdb_off;
bool copy_to_data_area;
@@ -1037,7 +1021,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
entry->hdr.cmd_id = 0; /* not used for PAD */
entry->hdr.kflags = 0;
entry->hdr.uflags = 0;
- tcmu_flush_dcache_range(entry, sizeof(*entry));
+ tcmu_flush_dcache_range(entry, sizeof(entry->hdr));
UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
tcmu_flush_dcache_range(mb, sizeof(*mb));
@@ -1071,14 +1055,21 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
}
entry->req.iov_bidi_cnt = iov_cnt;
- ret = tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out,
- &udev->cmd_timer);
- if (ret) {
- tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
+ cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
+ if (cmd_id < 0) {
+ pr_err("tcmu: Could not allocate cmd id.\n");
+ tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
*scsi_err = TCM_OUT_OF_RESOURCES;
return -1;
}
+ tcmu_cmd->cmd_id = cmd_id;
+
+ pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id,
+ tcmu_cmd, udev->name);
+
+ tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer);
+
entry->hdr.cmd_id = tcmu_cmd->cmd_id;
/*
@@ -1095,7 +1086,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
cdb_off = CMDR_OFF + cmd_head + base_command_size;
memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
entry->req.cdb_off = cdb_off;
- tcmu_flush_dcache_range(entry, sizeof(*entry));
+ tcmu_flush_dcache_range(entry, command_size);
UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
tcmu_flush_dcache_range(mb, sizeof(*mb));
@@ -1225,7 +1216,7 @@ static void tcmu_set_next_deadline(struct list_head *queue,
del_timer(timer);
}
-static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
+static bool tcmu_handle_completions(struct tcmu_dev *udev)
{
struct tcmu_mailbox *mb;
struct tcmu_cmd *cmd;
@@ -1243,7 +1234,14 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
- tcmu_flush_dcache_range(entry, sizeof(*entry));
+ /*
+ * Flush max. up to end of cmd ring since current entry might
+ * be a padding that is shorter than sizeof(*entry)
+ */
+ size_t ring_left = head_to_end(udev->cmdr_last_cleaned,
+ udev->cmdr_size);
+ tcmu_flush_dcache_range(entry, ring_left < sizeof(*entry) ?
+ ring_left : sizeof(*entry));
if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) {
UPDATE_HEAD(udev->cmdr_last_cleaned,
@@ -1258,7 +1256,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
pr_err("cmd_id %u not found, ring is broken\n",
entry->hdr.cmd_id);
set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
- break;
+ return false;
}
tcmu_handle_completion(cmd, entry);
@@ -1290,50 +1288,39 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
return handled;
}
-static int tcmu_check_expired_cmd(int id, void *p, void *data)
+static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd)
{
- struct tcmu_cmd *cmd = p;
- struct tcmu_dev *udev = cmd->tcmu_dev;
- u8 scsi_status;
struct se_cmd *se_cmd;
- bool is_running;
-
- if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
- return 0;
if (!time_after(jiffies, cmd->deadline))
- return 0;
+ return;
- is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags);
+ set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
+ list_del_init(&cmd->queue_entry);
se_cmd = cmd->se_cmd;
+ cmd->se_cmd = NULL;
- if (is_running) {
- /*
- * If cmd_time_out is disabled but qfull is set deadline
- * will only reflect the qfull timeout. Ignore it.
- */
- if (!udev->cmd_time_out)
- return 0;
+ pr_debug("Timing out inflight cmd %u on dev %s.\n",
+ cmd->cmd_id, cmd->tcmu_dev->name);
- set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
- /*
- * target_complete_cmd will translate this to LUN COMM FAILURE
- */
- scsi_status = SAM_STAT_CHECK_CONDITION;
- list_del_init(&cmd->queue_entry);
- cmd->se_cmd = NULL;
- } else {
- list_del_init(&cmd->queue_entry);
- idr_remove(&udev->commands, id);
- tcmu_free_cmd(cmd);
- scsi_status = SAM_STAT_TASK_SET_FULL;
- }
+ target_complete_cmd(se_cmd, SAM_STAT_CHECK_CONDITION);
+}
- pr_debug("Timing out cmd %u on dev %s that is %s.\n",
- id, udev->name, is_running ? "inflight" : "queued");
+static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd)
+{
+ struct se_cmd *se_cmd;
- target_complete_cmd(se_cmd, scsi_status);
- return 0;
+ if (!time_after(jiffies, cmd->deadline))
+ return;
+
+ pr_debug("Timing out queued cmd %p on dev %s.\n",
+ cmd, cmd->tcmu_dev->name);
+
+ list_del_init(&cmd->queue_entry);
+ se_cmd = cmd->se_cmd;
+ tcmu_free_cmd(cmd);
+
+ target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL);
}
static void tcmu_device_timedout(struct tcmu_dev *udev)
@@ -1418,16 +1405,15 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
return &udev->se_dev;
}
-static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
+static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
{
struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
LIST_HEAD(cmds);
- bool drained = true;
sense_reason_t scsi_ret;
int ret;
if (list_empty(&udev->qfull_queue))
- return true;
+ return;
pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
@@ -1436,11 +1422,10 @@ static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
list_del_init(&tcmu_cmd->queue_entry);
- pr_debug("removing cmd %u on dev %s from queue\n",
- tcmu_cmd->cmd_id, udev->name);
+ pr_debug("removing cmd %p on dev %s from queue\n",
+ tcmu_cmd, udev->name);
if (fail) {
- idr_remove(&udev->commands, tcmu_cmd->cmd_id);
/*
* We were not able to even start the command, so
* fail with busy to allow a retry in case runner
@@ -1455,10 +1440,8 @@ static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
if (ret < 0) {
- pr_debug("cmd %u on dev %s failed with %u\n",
- tcmu_cmd->cmd_id, udev->name, scsi_ret);
-
- idr_remove(&udev->commands, tcmu_cmd->cmd_id);
+ pr_debug("cmd %p on dev %s failed with %u\n",
+ tcmu_cmd, udev->name, scsi_ret);
/*
* Ignore scsi_ret for now. target_complete_cmd
* drops it.
@@ -1473,13 +1456,11 @@ static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
* the queue
*/
list_splice_tail(&cmds, &udev->qfull_queue);
- drained = false;
break;
}
}
tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
- return drained;
}
static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
@@ -1663,6 +1644,8 @@ static void tcmu_dev_kref_release(struct kref *kref)
if (tcmu_check_and_free_pending_cmd(cmd) != 0)
all_expired = false;
}
+ if (!list_empty(&udev->qfull_queue))
+ all_expired = false;
idr_destroy(&udev->commands);
WARN_ON(!all_expired);
@@ -2031,9 +2014,6 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
mutex_lock(&udev->cmdr_lock);
idr_for_each_entry(&udev->commands, cmd, i) {
- if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags))
- continue;
-
pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
cmd->cmd_id, udev->name,
test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags));
@@ -2067,9 +2047,12 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
mb->cmd_tail = 0;
mb->cmd_head = 0;
tcmu_flush_dcache_range(mb, sizeof(*mb));
+ clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
del_timer(&udev->cmd_timer);
+ run_qfull_queue(udev, false);
+
mutex_unlock(&udev->cmdr_lock);
}
@@ -2691,6 +2674,7 @@ static void find_free_blocks(void)
static void check_timedout_devices(void)
{
struct tcmu_dev *udev, *tmp_dev;
+ struct tcmu_cmd *cmd, *tmp_cmd;
LIST_HEAD(devs);
spin_lock_bh(&timed_out_udevs_lock);
@@ -2701,9 +2685,24 @@ static void check_timedout_devices(void)
spin_unlock_bh(&timed_out_udevs_lock);
mutex_lock(&udev->cmdr_lock);
- idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
- tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
+ /*
+ * If cmd_time_out is disabled but qfull is set deadline
+ * will only reflect the qfull timeout. Ignore it.
+ */
+ if (udev->cmd_time_out) {
+ list_for_each_entry_safe(cmd, tmp_cmd,
+ &udev->inflight_queue,
+ queue_entry) {
+ tcmu_check_expired_ring_cmd(cmd);
+ }
+ tcmu_set_next_deadline(&udev->inflight_queue,
+ &udev->cmd_timer);
+ }
+ list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue,
+ queue_entry) {
+ tcmu_check_expired_queue_cmd(cmd);
+ }
tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
mutex_unlock(&udev->cmdr_lock);
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 7cdb5d7f6538..1709b8a99bd7 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -55,60 +55,83 @@ static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
return 0;
}
-struct xcopy_dev_search_info {
- const unsigned char *dev_wwn;
- struct se_device *found_dev;
-};
-
+/**
+ * target_xcopy_locate_se_dev_e4_iter - compare XCOPY NAA device identifiers
+ *
+ * @se_dev: device being considered for match
+ * @dev_wwn: XCOPY requested NAA dev_wwn
+ * @return: 1 on match, 0 on no-match
+ */
static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev,
- void *data)
+ const unsigned char *dev_wwn)
{
- struct xcopy_dev_search_info *info = data;
unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
int rc;
- if (!se_dev->dev_attrib.emulate_3pc)
+ if (!se_dev->dev_attrib.emulate_3pc) {
+ pr_debug("XCOPY: emulate_3pc disabled on se_dev %p\n", se_dev);
return 0;
+ }
memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
- rc = memcmp(&tmp_dev_wwn[0], info->dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
- if (rc != 0)
- return 0;
-
- info->found_dev = se_dev;
- pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
-
- rc = target_depend_item(&se_dev->dev_group.cg_item);
+ rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
if (rc != 0) {
- pr_err("configfs_depend_item attempt failed: %d for se_dev: %p\n",
- rc, se_dev);
- return rc;
+ pr_debug("XCOPY: skip non-matching: %*ph\n",
+ XCOPY_NAA_IEEE_REGEX_LEN, tmp_dev_wwn);
+ return 0;
}
+ pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
- pr_debug("Called configfs_depend_item for se_dev: %p se_dev->se_dev_group: %p\n",
- se_dev, &se_dev->dev_group);
return 1;
}
-static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn,
- struct se_device **found_dev)
+static int target_xcopy_locate_se_dev_e4(struct se_session *sess,
+ const unsigned char *dev_wwn,
+ struct se_device **_found_dev,
+ struct percpu_ref **_found_lun_ref)
{
- struct xcopy_dev_search_info info;
- int ret;
-
- memset(&info, 0, sizeof(info));
- info.dev_wwn = dev_wwn;
-
- ret = target_for_each_device(target_xcopy_locate_se_dev_e4_iter, &info);
- if (ret == 1) {
- *found_dev = info.found_dev;
- return 0;
- } else {
- pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
- return -EINVAL;
+ struct se_dev_entry *deve;
+ struct se_node_acl *nacl;
+ struct se_lun *this_lun = NULL;
+ struct se_device *found_dev = NULL;
+
+ /* cmd with NULL sess indicates no associated $FABRIC_MOD */
+ if (!sess)
+ goto err_out;
+
+ pr_debug("XCOPY 0xe4: searching for: %*ph\n",
+ XCOPY_NAA_IEEE_REGEX_LEN, dev_wwn);
+
+ nacl = sess->se_node_acl;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
+ struct se_device *this_dev;
+ int rc;
+
+ this_lun = rcu_dereference(deve->se_lun);
+ this_dev = rcu_dereference_raw(this_lun->lun_se_dev);
+
+ rc = target_xcopy_locate_se_dev_e4_iter(this_dev, dev_wwn);
+ if (rc) {
+ if (percpu_ref_tryget_live(&this_lun->lun_ref))
+ found_dev = this_dev;
+ break;
+ }
}
+ rcu_read_unlock();
+ if (found_dev == NULL)
+ goto err_out;
+
+ pr_debug("lun_ref held for se_dev: %p se_dev->se_dev_group: %p\n",
+ found_dev, &found_dev->dev_group);
+ *_found_dev = found_dev;
+ *_found_lun_ref = &this_lun->lun_ref;
+ return 0;
+err_out:
+ pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
+ return -EINVAL;
}
static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
@@ -255,12 +278,16 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
switch (xop->op_origin) {
case XCOL_SOURCE_RECV_OP:
- rc = target_xcopy_locate_se_dev_e4(xop->dst_tid_wwn,
- &xop->dst_dev);
+ rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess,
+ xop->dst_tid_wwn,
+ &xop->dst_dev,
+ &xop->remote_lun_ref);
break;
case XCOL_DEST_RECV_OP:
- rc = target_xcopy_locate_se_dev_e4(xop->src_tid_wwn,
- &xop->src_dev);
+ rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess,
+ xop->src_tid_wwn,
+ &xop->src_dev,
+ &xop->remote_lun_ref);
break;
default:
pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - "
@@ -412,18 +439,12 @@ static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
{
- struct se_device *remote_dev;
-
if (xop->op_origin == XCOL_SOURCE_RECV_OP)
- remote_dev = xop->dst_dev;
+ pr_debug("putting dst lun_ref for %p\n", xop->dst_dev);
else
- remote_dev = xop->src_dev;
-
- pr_debug("Calling configfs_undepend_item for"
- " remote_dev: %p remote_dev->dev_group: %p\n",
- remote_dev, &remote_dev->dev_group.cg_item);
+ pr_debug("putting src lun_ref for %p\n", xop->src_dev);
- target_undepend_item(&remote_dev->dev_group.cg_item);
+ percpu_ref_put(xop->remote_lun_ref);
}
static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h
index 26ba4c3c9cff..974bc1e19ff2 100644
--- a/drivers/target/target_core_xcopy.h
+++ b/drivers/target/target_core_xcopy.h
@@ -29,6 +29,7 @@ struct xcopy_op {
struct se_device *dst_dev;
unsigned char dst_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
unsigned char local_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
+ struct percpu_ref *remote_lun_ref;
sector_t src_lba;
sector_t dst_lba;
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
index a5afbe6dee68..7cb7efe62b01 100644
--- a/drivers/tee/optee/call.c
+++ b/drivers/tee/optee/call.c
@@ -538,7 +538,8 @@ void optee_free_pages_list(void *list, size_t num_entries)
static bool is_normal_memory(pgprot_t p)
{
#if defined(CONFIG_ARM)
- return (pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC;
+ return (((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC) ||
+ ((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEBACK));
#elif defined(CONFIG_ARM64)
return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL);
#else
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index 2f254f957b0a..1d71fcb13dba 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -87,16 +87,6 @@ int optee_from_msg_param(struct tee_param *params, size_t num_params,
return rc;
p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
p->u.memref.shm = shm;
-
- /* Check that the memref is covered by the shm object */
- if (p->u.memref.size) {
- size_t o = p->u.memref.shm_offs +
- p->u.memref.size - 1;
-
- rc = tee_shm_get_pa(shm, o, NULL);
- if (rc)
- return rc;
- }
break;
case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 9df6b7260466..8592ef694a5a 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -278,11 +278,11 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev,
int i;
struct freq_table *freq_table = cpufreq_cdev->freq_table;
- for (i = 1; i <= cpufreq_cdev->max_level; i++)
- if (power > freq_table[i].power)
+ for (i = 0; i < cpufreq_cdev->max_level; i++)
+ if (power >= freq_table[i].power)
break;
- return freq_table[i - 1].frequency;
+ return freq_table[i].frequency;
}
/**
diff --git a/drivers/thermal/fair_share.c b/drivers/thermal/fair_share.c
index d3469fbc5207..26d7387f5834 100644
--- a/drivers/thermal/fair_share.c
+++ b/drivers/thermal/fair_share.c
@@ -94,6 +94,8 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
int total_instance = 0;
int cur_trip_level = get_trip_level(tz);
+ mutex_lock(&tz->lock);
+
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
if (instance->trip != trip)
continue;
@@ -122,6 +124,8 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
mutex_unlock(&instance->cdev->lock);
thermal_cdev_update(cdev);
}
+
+ mutex_unlock(&tz->lock);
return 0;
}
diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
index f64643629d8b..0691f260f6ea 100644
--- a/drivers/thermal/mtk_thermal.c
+++ b/drivers/thermal/mtk_thermal.c
@@ -431,8 +431,7 @@ static int mtk_thermal_bank_temperature(struct mtk_thermal_bank *bank)
u32 raw;
for (i = 0; i < conf->bank_data[bank->id].num_sensors; i++) {
- raw = readl(mt->thermal_base +
- conf->msr[conf->bank_data[bank->id].sensors[i]]);
+ raw = readl(mt->thermal_base + conf->msr[i]);
temp = raw_to_mcelsius(mt,
conf->bank_data[bank->id].sensors[i],
@@ -569,8 +568,7 @@ static void mtk_thermal_init_bank(struct mtk_thermal *mt, int num,
for (i = 0; i < conf->bank_data[num].num_sensors; i++)
writel(conf->sensor_mux_values[conf->bank_data[num].sensors[i]],
- mt->thermal_base +
- conf->adcpnp[conf->bank_data[num].sensors[i]]);
+ mt->thermal_base + conf->adcpnp[i]);
writel((1 << conf->bank_data[num].num_sensors) - 1,
mt->thermal_base + TEMP_MONCTL0);
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 4dc30e7890f6..140386d7c75a 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -505,8 +505,10 @@ static int rcar_thermal_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM,
mres++);
common->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(common->base))
- return PTR_ERR(common->base);
+ if (IS_ERR(common->base)) {
+ ret = PTR_ERR(common->base);
+ goto error_unregister;
+ }
idle = 0; /* polling delay is not needed */
}
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index aa99edb4dff7..4dce4a8f71ed 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -770,6 +770,9 @@ void thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev,
{
struct cooling_dev_stats *stats = cdev->stats;
+ if (!stats)
+ return;
+
spin_lock(&stats->lock);
if (stats->state == new_state)
diff --git a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c
index c12211eaaac4..0b9f835d931f 100644
--- a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c
+++ b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c
@@ -46,20 +46,21 @@ static struct temp_sensor_data omap4430_mpu_temp_sensor_data = {
/*
* Temperature values in milli degree celsius
- * ADC code values from 530 to 923
+ * ADC code values from 13 to 107, see TRM
+ * "18.4.10.2.3 ADC Codes Versus Temperature".
*/
static const int
omap4430_adc_to_temp[OMAP4430_ADC_END_VALUE - OMAP4430_ADC_START_VALUE + 1] = {
- -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000, -22000,
- -20000, -18000, -17000, -15000, -13000, -12000, -10000, -8000, -6000,
- -5000, -3000, -1000, 0, 2000, 3000, 5000, 6000, 8000, 10000, 12000,
- 13000, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28000, 30000,
- 32000, 33000, 35000, 37000, 38000, 40000, 42000, 43000, 45000, 47000,
- 48000, 50000, 52000, 53000, 55000, 57000, 58000, 60000, 62000, 64000,
- 66000, 68000, 70000, 71000, 73000, 75000, 77000, 78000, 80000, 82000,
- 83000, 85000, 87000, 88000, 90000, 92000, 93000, 95000, 97000, 98000,
- 100000, 102000, 103000, 105000, 107000, 109000, 111000, 113000, 115000,
- 117000, 118000, 120000, 122000, 123000,
+ -40000, -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000,
+ -22000, -20000, -18500, -17000, -15000, -13500, -12000, -10000, -8000,
+ -6500, -5000, -3500, -1500, 0, 2000, 3500, 5000, 6500, 8500, 10000,
+ 12000, 13500, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28500,
+ 30000, 32000, 33500, 35000, 37000, 38500, 40000, 42000, 43500, 45000,
+ 47000, 48500, 50000, 52000, 53500, 55000, 57000, 58500, 60000, 62000,
+ 64000, 66000, 68000, 70000, 71500, 73500, 75000, 77000, 78500, 80000,
+ 82000, 83500, 85000, 87000, 88500, 90000, 92000, 93500, 95000, 97000,
+ 98500, 100000, 102000, 103500, 105000, 107000, 109000, 111000, 113000,
+ 115000, 117000, 118500, 120000, 122000, 123500, 125000,
};
/* OMAP4430 data */
diff --git a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h
index b87c8659ec60..8a081abce4b5 100644
--- a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h
+++ b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h
@@ -67,9 +67,13 @@
* and thresholds for OMAP4430.
*/
-/* ADC conversion table limits */
-#define OMAP4430_ADC_START_VALUE 0
-#define OMAP4430_ADC_END_VALUE 127
+/*
+ * ADC conversion table limits. Ignore values outside the TRM listed
+ * range to avoid bogus thermal shutdowns. See omap4430 TRM chapter
+ * "18.4.10.2.3 ADC Codes Versus Temperature".
+ */
+#define OMAP4430_ADC_START_VALUE 13
+#define OMAP4430_ADC_END_VALUE 107
/* bandgap clock limits (no control on 4430) */
#define OMAP4430_MAX_FREQ 32768
#define OMAP4430_MIN_FREQ 32768
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index b4f981daeaf2..343da0031299 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -183,7 +183,7 @@ int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id,
data = ti_bandgap_get_sensor_data(bgp, id);
- if (!data || IS_ERR(data))
+ if (IS_ERR_OR_NULL(data))
data = ti_thermal_build_data(bgp, id);
if (!data)
@@ -210,7 +210,7 @@ int ti_thermal_remove_sensor(struct ti_bandgap *bgp, int id)
data = ti_bandgap_get_sensor_data(bgp, id);
- if (data && data->ti_thermal) {
+ if (!IS_ERR_OR_NULL(data) && data->ti_thermal) {
if (data->our_zone)
thermal_zone_device_unregister(data->ti_thermal);
}
@@ -276,7 +276,7 @@ int ti_thermal_unregister_cpu_cooling(struct ti_bandgap *bgp, int id)
data = ti_bandgap_get_sensor_data(bgp, id);
- if (data) {
+ if (!IS_ERR_OR_NULL(data)) {
cpufreq_cooling_unregister(data->cool_dev);
if (data->policy)
cpufreq_cpu_put(data->policy);
diff --git a/drivers/thunderbolt/dma_port.c b/drivers/thunderbolt/dma_port.c
index f2701194f810..994ac759d364 100644
--- a/drivers/thunderbolt/dma_port.c
+++ b/drivers/thunderbolt/dma_port.c
@@ -367,15 +367,15 @@ int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
void *buf, size_t size)
{
unsigned int retries = DMA_PORT_RETRIES;
- unsigned int offset;
-
- offset = address & 3;
- address = address & ~3;
do {
- u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
+ unsigned int offset;
+ size_t nbytes;
int ret;
+ offset = address & 3;
+ nbytes = min_t(size_t, size + offset, MAIL_DATA_DWORDS * 4);
+
ret = dma_port_flash_read_block(dma, address, dma->buf,
ALIGN(nbytes, 4));
if (ret) {
@@ -387,6 +387,7 @@ int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
return ret;
}
+ nbytes -= offset;
memcpy(buf, dma->buf + offset, nbytes);
size -= nbytes;
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 8490a1b6b615..2b83d8b02f81 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -801,9 +801,11 @@ icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
* connected another host to the same port, remove the switch
* first.
*/
- sw = get_switch_at_route(tb->root_switch, route);
- if (sw)
+ sw = tb_switch_find_by_route(tb, route);
+ if (sw) {
remove_switch(sw);
+ tb_switch_put(sw);
+ }
sw = tb_switch_find_by_link_depth(tb, link, depth);
if (!sw) {
@@ -1146,9 +1148,11 @@ icm_tr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
* connected another host to the same port, remove the switch
* first.
*/
- sw = get_switch_at_route(tb->root_switch, route);
- if (sw)
+ sw = tb_switch_find_by_route(tb, route);
+ if (sw) {
remove_switch(sw);
+ tb_switch_put(sw);
+ }
sw = tb_switch_find_by_route(tb, get_parent_route(route));
if (!sw) {
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index d436a1534fc2..384623c49cfe 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -408,12 +408,23 @@ static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
ring->vector = ret;
- ring->irq = pci_irq_vector(ring->nhi->pdev, ring->vector);
- if (ring->irq < 0)
- return ring->irq;
+ ret = pci_irq_vector(ring->nhi->pdev, ring->vector);
+ if (ret < 0)
+ goto err_ida_remove;
+
+ ring->irq = ret;
irqflags = no_suspend ? IRQF_NO_SUSPEND : 0;
- return request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
+ ret = request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
+ if (ret)
+ goto err_ida_remove;
+
+ return 0;
+
+err_ida_remove:
+ ida_simple_remove(&nhi->msix_ida, ring->vector);
+
+ return ret;
}
static void ring_release_msix(struct tb_ring *ring)
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 42d90ceec279..010a50ac4881 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -664,24 +664,6 @@ int tb_switch_reset(struct tb *tb, u64 route)
return res.err;
}
-struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route)
-{
- u8 next_port = route; /*
- * Routes use a stride of 8 bits,
- * eventhough a port index has 6 bits at most.
- * */
- if (route == 0)
- return sw;
- if (next_port > sw->config.max_port_number)
- return NULL;
- if (tb_is_upstream_port(&sw->ports[next_port]))
- return NULL;
- if (!sw->ports[next_port].remote)
- return NULL;
- return get_switch_at_route(sw->ports[next_port].remote->sw,
- route >> TB_ROUTE_SHIFT);
-}
-
/**
* tb_plug_events_active() - enable/disable plug events on a switch
*
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 1424581fd9af..146f261bf2c3 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -258,7 +258,7 @@ static void tb_handle_hotplug(struct work_struct *work)
if (!tcm->hotplug_active)
goto out; /* during init, suspend or shutdown */
- sw = get_switch_at_route(tb->root_switch, ev->route);
+ sw = tb_switch_find_by_route(tb, ev->route);
if (!sw) {
tb_warn(tb,
"hotplug event from non existent switch %llx:%x (unplug: %d)\n",
@@ -269,14 +269,14 @@ static void tb_handle_hotplug(struct work_struct *work)
tb_warn(tb,
"hotplug event from non existent port %llx:%x (unplug: %d)\n",
ev->route, ev->port, ev->unplug);
- goto out;
+ goto put_sw;
}
port = &sw->ports[ev->port];
if (tb_is_upstream_port(port)) {
tb_warn(tb,
"hotplug event for upstream port %llx:%x (unplug: %d)\n",
ev->route, ev->port, ev->unplug);
- goto out;
+ goto put_sw;
}
if (ev->unplug) {
if (port->remote) {
@@ -306,6 +306,9 @@ static void tb_handle_hotplug(struct work_struct *work)
tb_activate_pcie_devices(tb);
}
}
+
+put_sw:
+ tb_switch_put(sw);
out:
mutex_unlock(&tb->lock);
kfree(ev);
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 7a0ee9836a8a..d927cf7b14d2 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -397,7 +397,6 @@ void tb_switch_suspend(struct tb_switch *sw);
int tb_switch_resume(struct tb_switch *sw);
int tb_switch_reset(struct tb *tb, u64 route);
void tb_sw_set_unplugged(struct tb_switch *sw);
-struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route);
struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
u8 depth);
struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid);
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index befe75490697..4eb51a123a6f 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -774,6 +774,7 @@ static void enumerate_services(struct tb_xdomain *xd)
id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
if (id < 0) {
+ kfree(svc->key);
kfree(svc);
break;
}
diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c
index eea4049b5dcc..ca5004ae3024 100644
--- a/drivers/tty/ehv_bytechan.c
+++ b/drivers/tty/ehv_bytechan.c
@@ -136,6 +136,21 @@ static int find_console_handle(void)
return 1;
}
+static unsigned int local_ev_byte_channel_send(unsigned int handle,
+ unsigned int *count,
+ const char *p)
+{
+ char buffer[EV_BYTE_CHANNEL_MAX_BYTES];
+ unsigned int c = *count;
+
+ if (c < sizeof(buffer)) {
+ memcpy(buffer, p, c);
+ memset(&buffer[c], 0, sizeof(buffer) - c);
+ p = buffer;
+ }
+ return ev_byte_channel_send(handle, count, p);
+}
+
/*************************** EARLY CONSOLE DRIVER ***************************/
#ifdef CONFIG_PPC_EARLY_DEBUG_EHV_BC
@@ -154,7 +169,7 @@ static void byte_channel_spin_send(const char data)
do {
count = 1;
- ret = ev_byte_channel_send(CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE,
+ ret = local_ev_byte_channel_send(CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE,
&count, &data);
} while (ret == EV_EAGAIN);
}
@@ -221,7 +236,7 @@ static int ehv_bc_console_byte_channel_send(unsigned int handle, const char *s,
while (count) {
len = min_t(unsigned int, count, EV_BYTE_CHANNEL_MAX_BYTES);
do {
- ret = ev_byte_channel_send(handle, &len, s);
+ ret = local_ev_byte_channel_send(handle, &len, s);
} while (ret == EV_EAGAIN);
count -= len;
s += len;
@@ -401,7 +416,7 @@ static void ehv_bc_tx_dequeue(struct ehv_bc_data *bc)
CIRC_CNT_TO_END(bc->head, bc->tail, BUF_SIZE),
EV_BYTE_CHANNEL_MAX_BYTES);
- ret = ev_byte_channel_send(bc->handle, &len, bc->buf + bc->tail);
+ ret = local_ev_byte_channel_send(bc->handle, &len, bc->buf + bc->tail);
/* 'len' is valid only if the return code is 0 or EV_EAGAIN */
if (!ret || (ret == EV_EAGAIN))
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index 27284a2dcd2b..cdcc64ea2554 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -302,10 +302,6 @@ int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
vtermnos[index] = vtermno;
cons_ops[index] = ops;
- /* reserve all indices up to and including this index */
- if (last_hvc < index)
- last_hvc = index;
-
/* check if we need to re-register the kernel console */
hvc_check_console(index);
@@ -375,15 +371,14 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
* tty fields and return the kref reference.
*/
if (rc) {
- tty_port_tty_set(&hp->port, NULL);
- tty->driver_data = NULL;
- tty_port_put(&hp->port);
printk(KERN_ERR "hvc_open: request_irq failed with rc %d.\n", rc);
- } else
+ } else {
/* We are ready... raise DTR/RTS */
if (C_BAUD(tty))
if (hp->ops->dtr_rts)
hp->ops->dtr_rts(hp, 1);
+ tty_port_set_initialized(&hp->port, true);
+ }
/* Force wakeup of the polling thread */
hvc_kick();
@@ -393,22 +388,12 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
static void hvc_close(struct tty_struct *tty, struct file * filp)
{
- struct hvc_struct *hp;
+ struct hvc_struct *hp = tty->driver_data;
unsigned long flags;
if (tty_hung_up_p(filp))
return;
- /*
- * No driver_data means that this close was issued after a failed
- * hvc_open by the tty layer's release_dev() function and we can just
- * exit cleanly because the kref reference wasn't made.
- */
- if (!tty->driver_data)
- return;
-
- hp = tty->driver_data;
-
spin_lock_irqsave(&hp->port.lock, flags);
if (--hp->port.count == 0) {
@@ -416,6 +401,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
/* We are done with the tty pointer now. */
tty_port_tty_set(&hp->port, NULL);
+ if (!tty_port_initialized(&hp->port))
+ return;
+
if (C_HUPCL(tty))
if (hp->ops->dtr_rts)
hp->ops->dtr_rts(hp, 0);
@@ -432,6 +420,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
* waking periodically to check chars_in_buffer().
*/
tty_wait_until_sent(tty, HVC_CLOSE_WAIT);
+ tty_port_set_initialized(&hp->port, false);
} else {
if (hp->port.count < 0)
printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
@@ -960,13 +949,22 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
cons_ops[i] == hp->ops)
break;
- /* no matching slot, just use a counter */
- if (i >= MAX_NR_HVC_CONSOLES)
- i = ++last_hvc;
+ if (i >= MAX_NR_HVC_CONSOLES) {
+
+ /* find 'empty' slot for console */
+ for (i = 0; i < MAX_NR_HVC_CONSOLES && vtermnos[i] != -1; i++) {
+ }
+
+ /* no matching slot, just use a counter */
+ if (i == MAX_NR_HVC_CONSOLES)
+ i = ++last_hvc + MAX_NR_HVC_CONSOLES;
+ }
hp->index = i;
- cons_ops[i] = ops;
- vtermnos[i] = vtermno;
+ if (i < MAX_NR_HVC_CONSOLES) {
+ cons_ops[i] = ops;
+ vtermnos[i] = vtermno;
+ }
list_add_tail(&(hp->next), &hvc_structs);
mutex_unlock(&hvc_structs_mutex);
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index cb4db1b3ca3c..7853c6375325 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -1218,13 +1218,6 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
tty_wait_until_sent(tty, HVCS_CLOSE_WAIT);
- /*
- * This line is important because it tells hvcs_open that this
- * device needs to be re-configured the next time hvcs_open is
- * called.
- */
- tty->driver_data = NULL;
-
free_irq(irq, hvcsd);
return;
} else if (hvcsd->port.count < 0) {
@@ -1239,6 +1232,13 @@ static void hvcs_cleanup(struct tty_struct * tty)
{
struct hvcs_struct *hvcsd = tty->driver_data;
+ /*
+ * This line is important because it tells hvcs_open that this
+ * device needs to be re-configured the next time hvcs_open is
+ * called.
+ */
+ tty->driver_data = NULL;
+
tty_port_put(&hvcsd->port);
}
diff --git a/drivers/tty/ipwireless/network.c b/drivers/tty/ipwireless/network.c
index cf20616340a1..fe569f6294a2 100644
--- a/drivers/tty/ipwireless/network.c
+++ b/drivers/tty/ipwireless/network.c
@@ -117,7 +117,7 @@ static int ipwireless_ppp_start_xmit(struct ppp_channel *ppp_channel,
skb->len,
notify_packet_sent,
network);
- if (ret == -1) {
+ if (ret < 0) {
skb_pull(skb, 2);
return 0;
}
@@ -134,7 +134,7 @@ static int ipwireless_ppp_start_xmit(struct ppp_channel *ppp_channel,
notify_packet_sent,
network);
kfree(buf);
- if (ret == -1)
+ if (ret < 0)
return 0;
}
kfree_skb(skb);
diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
index 1ef751c27ac6..cb0497184330 100644
--- a/drivers/tty/ipwireless/tty.c
+++ b/drivers/tty/ipwireless/tty.c
@@ -218,7 +218,7 @@ static int ipw_write(struct tty_struct *linux_tty,
ret = ipwireless_send_packet(tty->hardware, IPW_CHANNEL_RAS,
buf, count,
ipw_write_packet_sent_callback, tty);
- if (ret == -1) {
+ if (ret < 0) {
mutex_unlock(&tty->ipw_tty_mutex);
return 0;
}
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 86b7e20ffd7f..5e9457d19927 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -665,11 +665,10 @@ static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len,
* FIXME: lock against link layer control transmissions
*/
-static void gsm_data_kick(struct gsm_mux *gsm)
+static void gsm_data_kick(struct gsm_mux *gsm, struct gsm_dlci *dlci)
{
struct gsm_msg *msg, *nmsg;
int len;
- int skip_sof = 0;
list_for_each_entry_safe(msg, nmsg, &gsm->tx_list, list) {
if (gsm->constipated && msg->addr)
@@ -691,18 +690,23 @@ static void gsm_data_kick(struct gsm_mux *gsm)
print_hex_dump_bytes("gsm_data_kick: ",
DUMP_PREFIX_OFFSET,
gsm->txframe, len);
-
- if (gsm->output(gsm, gsm->txframe + skip_sof,
- len - skip_sof) < 0)
+ if (gsm->output(gsm, gsm->txframe, len) < 0)
break;
/* FIXME: Can eliminate one SOF in many more cases */
gsm->tx_bytes -= msg->len;
- /* For a burst of frames skip the extra SOF within the
- burst */
- skip_sof = 1;
list_del(&msg->list);
kfree(msg);
+
+ if (dlci) {
+ tty_port_tty_wakeup(&dlci->port);
+ } else {
+ int i = 0;
+
+ for (i = 0; i < NUM_DLCI; i++)
+ if (gsm->dlci[i])
+ tty_port_tty_wakeup(&gsm->dlci[i]->port);
+ }
}
}
@@ -754,7 +758,7 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
/* Add to the actual output queue */
list_add_tail(&msg->list, &gsm->tx_list);
gsm->tx_bytes += msg->len;
- gsm_data_kick(gsm);
+ gsm_data_kick(gsm, dlci);
}
/**
@@ -1215,7 +1219,7 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
gsm_control_reply(gsm, CMD_FCON, NULL, 0);
/* Kick the link in case it is idling */
spin_lock_irqsave(&gsm->tx_lock, flags);
- gsm_data_kick(gsm);
+ gsm_data_kick(gsm, NULL);
spin_unlock_irqrestore(&gsm->tx_lock, flags);
break;
case CMD_FCOFF:
@@ -2412,7 +2416,7 @@ static void gsmld_write_wakeup(struct tty_struct *tty)
/* Queue poll */
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
spin_lock_irqsave(&gsm->tx_lock, flags);
- gsm_data_kick(gsm);
+ gsm_data_kick(gsm, NULL);
if (gsm->tx_bytes < TX_THRESH_LO) {
gsm_dlci_data_sweep(gsm);
}
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 00099a8439d2..c6a1d8c4e689 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -120,10 +120,10 @@ static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
spin_lock_irqsave(&to->port->lock, flags);
/* Stuff the data into the input queue of the other end */
c = tty_insert_flip_string(to->port, buf, c);
+ spin_unlock_irqrestore(&to->port->lock, flags);
/* And shovel */
if (c)
tty_flip_buffer_push(to->port);
- spin_unlock_irqrestore(&to->port->lock, flags);
}
return c;
}
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index 27aeca30eeae..6133830f52a3 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -632,18 +632,21 @@ init_r_port(int board, int aiop, int chan, struct pci_dev *pci_dev)
tty_port_init(&info->port);
info->port.ops = &rocket_port_ops;
info->flags &= ~ROCKET_MODE_MASK;
- switch (pc104[board][line]) {
- case 422:
- info->flags |= ROCKET_MODE_RS422;
- break;
- case 485:
- info->flags |= ROCKET_MODE_RS485;
- break;
- case 232:
- default:
+ if (board < ARRAY_SIZE(pc104) && line < ARRAY_SIZE(pc104_1))
+ switch (pc104[board][line]) {
+ case 422:
+ info->flags |= ROCKET_MODE_RS422;
+ break;
+ case 485:
+ info->flags |= ROCKET_MODE_RS485;
+ break;
+ case 232:
+ default:
+ info->flags |= ROCKET_MODE_RS232;
+ break;
+ }
+ else
info->flags |= ROCKET_MODE_RS232;
- break;
- }
info->intmask = RXF_TRIG | TXFIFO_MT | SRC_INT | DELTA_CD | DELTA_CTS | DELTA_DSR;
if (sInitChan(ctlp, &info->channel, aiop, chan) == 0) {
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index b9567ef843fc..d2df7d71d666 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -527,6 +527,7 @@ static void __init serial8250_isa_init_ports(void)
*/
up->mcr_mask = ~ALPHA_KLUDGE_MCR;
up->mcr_force = ALPHA_KLUDGE_MCR;
+ serial8250_set_defaults(up);
}
/* chain base port ops to support Remote Supervisor Adapter */
@@ -550,7 +551,6 @@ static void __init serial8250_isa_init_ports(void)
port->membase = old_serial_port[i].iomem_base;
port->iotype = old_serial_port[i].io_type;
port->regshift = old_serial_port[i].iomem_reg_shift;
- serial8250_set_defaults(up);
port->irqflags |= irqflag;
if (serial8250_isa_config != NULL)
@@ -1062,8 +1062,10 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
serial8250_apply_quirks(uart);
ret = uart_add_one_port(&serial8250_reg,
&uart->port);
- if (ret == 0)
- ret = uart->port.line;
+ if (ret)
+ goto err;
+
+ ret = uart->port.line;
} else {
dev_info(uart->port.dev,
"skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n",
@@ -1088,6 +1090,11 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
mutex_unlock(&serial_mutex);
return ret;
+
+err:
+ uart->port.dev = NULL;
+ mutex_unlock(&serial_mutex);
+ return ret;
}
EXPORT_SYMBOL(serial8250_register_8250_port);
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 870735776437..195f58c5b477 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -227,7 +227,17 @@ static void setup_gpio(struct pci_dev *pcidev, u8 __iomem *p)
* devices will export them as GPIOs, so we pre-configure them safely
* as inputs.
*/
- u8 dir = pcidev->vendor == PCI_VENDOR_ID_EXAR ? 0xff : 0x00;
+
+ u8 dir = 0x00;
+
+ if ((pcidev->vendor == PCI_VENDOR_ID_EXAR) &&
+ (pcidev->subsystem_vendor != PCI_VENDOR_ID_SEALEVEL)) {
+ // Configure GPIO as inputs for Commtech adapters
+ dir = 0xff;
+ } else {
+ // Configure GPIO as outputs for SeaLevel adapters
+ dir = 0x00;
+ }
writeb(0x00, p + UART_EXAR_MPIOINT_7_0);
writeb(0x00, p + UART_EXAR_MPIOLVL_7_0);
@@ -628,6 +638,24 @@ static const struct exar8250_board pbn_exar_XR17V35x = {
.exit = pci_xr17v35x_exit,
};
+static const struct exar8250_board pbn_fastcom35x_2 = {
+ .num_ports = 2,
+ .setup = pci_xr17v35x_setup,
+ .exit = pci_xr17v35x_exit,
+};
+
+static const struct exar8250_board pbn_fastcom35x_4 = {
+ .num_ports = 4,
+ .setup = pci_xr17v35x_setup,
+ .exit = pci_xr17v35x_exit,
+};
+
+static const struct exar8250_board pbn_fastcom35x_8 = {
+ .num_ports = 8,
+ .setup = pci_xr17v35x_setup,
+ .exit = pci_xr17v35x_exit,
+};
+
static const struct exar8250_board pbn_exar_XR17V4358 = {
.num_ports = 12,
.setup = pci_xr17v35x_setup,
@@ -698,9 +726,9 @@ static const struct pci_device_id exar_pci_tbl[] = {
EXAR_DEVICE(EXAR, EXAR_XR17V358, pbn_exar_XR17V35x),
EXAR_DEVICE(EXAR, EXAR_XR17V4358, pbn_exar_XR17V4358),
EXAR_DEVICE(EXAR, EXAR_XR17V8358, pbn_exar_XR17V8358),
- EXAR_DEVICE(COMMTECH, COMMTECH_4222PCIE, pbn_exar_XR17V35x),
- EXAR_DEVICE(COMMTECH, COMMTECH_4224PCIE, pbn_exar_XR17V35x),
- EXAR_DEVICE(COMMTECH, COMMTECH_4228PCIE, pbn_exar_XR17V35x),
+ EXAR_DEVICE(COMMTECH, COMMTECH_4222PCIE, pbn_fastcom35x_2),
+ EXAR_DEVICE(COMMTECH, COMMTECH_4224PCIE, pbn_fastcom35x_4),
+ EXAR_DEVICE(COMMTECH, COMMTECH_4228PCIE, pbn_fastcom35x_8),
EXAR_DEVICE(COMMTECH, COMMTECH_4222PCI335, pbn_fastcom335_2),
EXAR_DEVICE(COMMTECH, COMMTECH_4224PCI335, pbn_fastcom335_4),
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index c3f933d10295..cc4f0c5e5ddf 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -36,7 +36,20 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
unsigned long flags;
unsigned int baud, quot;
- serial8250_do_set_termios(port, termios, old);
+ /*
+ * Store the requested baud rate before calling the generic 8250
+ * set_termios method. Standard 8250 port expects bauds to be
+ * no higher than (uartclk / 16) so the baud will be clamped if it
+ * gets out of that bound. Mediatek 8250 port supports speed
+ * higher than that, therefore we'll get original baud rate back
+ * after calling the generic set_termios method and recalculate
+ * the speed later in this method.
+ */
+ baud = tty_termios_baud_rate(termios);
+
+ serial8250_do_set_termios(port, termios, NULL);
+
+ tty_termios_encode_baud_rate(termios, baud, baud);
/*
* Mediatek UARTs use an extra highspeed register (UART_MTK_HIGHS)
@@ -76,6 +89,11 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
*/
spin_lock_irqsave(&port->lock, flags);
+ /*
+ * Update the per-port timeout.
+ */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
/* set DLAB we have cval saved in up->lcr from the call to the core */
serial_port_out(port, UART_LCR, up->lcr | UART_LCR_DLAB);
serial_dl_write(up, quot);
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index a019286f8bb6..c1166b45c288 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -163,11 +163,6 @@ static void omap_8250_mdr1_errataset(struct uart_8250_port *up,
struct omap8250_priv *priv)
{
u8 timeout = 255;
- u8 old_mdr1;
-
- old_mdr1 = serial_in(up, UART_OMAP_MDR1);
- if (old_mdr1 == priv->mdr1)
- return;
serial_out(up, UART_OMAP_MDR1, priv->mdr1);
udelay(2);
@@ -781,7 +776,10 @@ static void __dma_rx_do_complete(struct uart_8250_port *p)
dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
count = dma->rx_size - state.residue;
-
+ if (count < dma->rx_size)
+ dmaengine_terminate_async(dma->rxchan);
+ if (!count)
+ goto unlock;
ret = tty_insert_flip_string(tty_port, dma->rx_buf, count);
p->port.icount.rx += ret;
@@ -843,7 +841,6 @@ static void omap_8250_rx_dma_flush(struct uart_8250_port *p)
spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
__dma_rx_do_complete(p);
- dmaengine_terminate_all(dma->rxchan);
}
static int omap_8250_rx_dma(struct uart_8250_port *p)
@@ -1227,11 +1224,11 @@ static int omap8250_probe(struct platform_device *pdev)
spin_lock_init(&priv->rx_dma_lock);
device_init_wakeup(&pdev->dev, true);
+ pm_runtime_enable(&pdev->dev);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, -1);
pm_runtime_irq_safe(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index bbe5cba21522..725e5842b8ac 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1690,12 +1690,6 @@ pci_wch_ch38x_setup(struct serial_private *priv,
#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470
#define PCIE_DEVICE_ID_WCH_CH382_2S 0x3253
-#define PCI_VENDOR_ID_PERICOM 0x12D8
-#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951
-#define PCI_DEVICE_ID_PERICOM_PI7C9X7952 0x7952
-#define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954
-#define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958
-
#define PCI_VENDOR_ID_ACCESIO 0x494f
#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB 0x1051
#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S 0x1053
@@ -5242,6 +5236,17 @@ static const struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
0, 0, pbn_wch384_4 },
+ /*
+ * Realtek RealManage
+ */
+ { PCI_VENDOR_ID_REALTEK, 0x816a,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0, pbn_b0_1_115200 },
+
+ { PCI_VENDOR_ID_REALTEK, 0x816b,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0, pbn_b0_1_115200 },
+
/* Fintek PCI serial cards */
{ PCI_DEVICE(0x1c29, 0x1104), .driver_data = pbn_fintek_4 },
{ PCI_DEVICE(0x1c29, 0x1108), .driver_data = pbn_fintek_8 },
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 5a04d4ddca73..60ca19eca1f6 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1861,6 +1861,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
unsigned char status;
unsigned long flags;
struct uart_8250_port *up = up_to_u8250p(port);
+ bool skip_rx = false;
if (iir & UART_IIR_NO_INT)
return 0;
@@ -1869,7 +1870,20 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
status = serial_port_in(port, UART_LSR);
- if (status & (UART_LSR_DR | UART_LSR_BI)) {
+ /*
+ * If port is stopped and there are no error conditions in the
+ * FIFO, then don't drain the FIFO, as this may lead to TTY buffer
+ * overflow. Not servicing, RX FIFO would trigger auto HW flow
+ * control when FIFO occupancy reaches preset threshold, thus
+ * halting RX. This only works when auto HW flow control is
+ * available.
+ */
+ if (!(status & (UART_LSR_FIFOE | UART_LSR_BRK_ERROR_BITS)) &&
+ (port->status & (UPSTAT_AUTOCTS | UPSTAT_AUTORTS)) &&
+ !(port->read_status_mask & UART_LSR_DR))
+ skip_rx = true;
+
+ if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) {
if (!up->dma || handle_rx_dma(up, iir))
status = serial8250_rx_chars(up, status);
}
@@ -2259,6 +2273,10 @@ int serial8250_do_startup(struct uart_port *port)
if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) {
unsigned char iir1;
+
+ if (port->irqflags & IRQF_SHARED)
+ disable_irq_nosync(port->irq);
+
/*
* Test for UARTs that do not reassert THRE when the
* transmitter is idle and the interrupt has already
@@ -2268,8 +2286,6 @@ int serial8250_do_startup(struct uart_port *port)
* allow register changes to become visible.
*/
spin_lock_irqsave(&port->lock, flags);
- if (up->port.irqflags & IRQF_SHARED)
- disable_irq_nosync(port->irq);
wait_for_xmitr(up, UART_LSR_THRE);
serial_port_out_sync(port, UART_IER, UART_IER_THRI);
@@ -2281,9 +2297,10 @@ int serial8250_do_startup(struct uart_port *port)
iir = serial_port_in(port, UART_IIR);
serial_port_out(port, UART_IER, 0);
+ spin_unlock_irqrestore(&port->lock, flags);
+
if (port->irqflags & IRQF_SHARED)
enable_irq(port->irq);
- spin_unlock_irqrestore(&port->lock, flags);
/*
* If the interrupt is not reasserted, or we otherwise
@@ -2628,6 +2645,8 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port,
struct ktermios *termios,
struct ktermios *old)
{
+ unsigned int tolerance = port->uartclk / 100;
+
/*
* Ask the core to calculate the divisor for us.
* Allow 1% tolerance at the upper limit so uart clks marginally
@@ -2636,7 +2655,7 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port,
*/
return uart_get_baud_rate(port, termios, old,
port->uartclk / 16 / UART_DIV_MAX,
- port->uartclk);
+ (port->uartclk + tolerance) / 16);
}
void
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index df8bd0c7b97d..cd13065095bc 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -9,6 +9,7 @@ menu "Serial drivers"
config SERIAL_EARLYCON
bool
+ depends on SERIAL_CORE
help
Support for early consoles with the earlycon parameter. This enables
the console before standard serial driver is probed. The console is
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index af21122dfade..1306ce5c5d9b 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -313,8 +313,9 @@ static void pl011_write(unsigned int val, const struct uart_amba_port *uap,
*/
static int pl011_fifo_to_tty(struct uart_amba_port *uap)
{
- u16 status;
unsigned int ch, flag, fifotaken;
+ int sysrq;
+ u16 status;
for (fifotaken = 0; fifotaken != 256; fifotaken++) {
status = pl011_read(uap, REG_FR);
@@ -349,10 +350,12 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
flag = TTY_FRAME;
}
- if (uart_handle_sysrq_char(&uap->port, ch & 255))
- continue;
+ spin_unlock(&uap->port.lock);
+ sysrq = uart_handle_sysrq_char(&uap->port, ch & 255);
+ spin_lock(&uap->port.lock);
- uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
+ if (!sysrq)
+ uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
}
return fifotaken;
@@ -2252,9 +2255,8 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
clk_disable(uap->clk);
}
-static void __init
-pl011_console_get_options(struct uart_amba_port *uap, int *baud,
- int *parity, int *bits)
+static void pl011_console_get_options(struct uart_amba_port *uap, int *baud,
+ int *parity, int *bits)
{
if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) {
unsigned int lcr_h, ibrd, fbrd;
@@ -2287,7 +2289,7 @@ pl011_console_get_options(struct uart_amba_port *uap, int *baud,
}
}
-static int __init pl011_console_setup(struct console *co, char *options)
+static int pl011_console_setup(struct console *co, char *options)
{
struct uart_amba_port *uap;
int baud = 38400;
@@ -2355,8 +2357,8 @@ static int __init pl011_console_setup(struct console *co, char *options)
*
* Returns 0 if console matches; otherwise non-zero to use default matching
*/
-static int __init pl011_console_match(struct console *co, char *name, int idx,
- char *options)
+static int pl011_console_match(struct console *co, char *name, int idx,
+ char *options)
{
unsigned char iotype;
resource_size_t addr;
@@ -2585,6 +2587,7 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
uap->port.fifosize = uap->fifosize;
uap->port.flags = UPF_BOOT_AUTOCONF;
uap->port.line = index;
+ spin_lock_init(&uap->port.lock);
amba_ports[index] = uap;
@@ -2593,7 +2596,7 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
static int pl011_register_port(struct uart_amba_port *uap)
{
- int ret;
+ int ret, i;
/* Ensure interrupts from this UART are masked and cleared */
pl011_write(0, uap, REG_IMSC);
@@ -2604,6 +2607,9 @@ static int pl011_register_port(struct uart_amba_port *uap)
if (ret < 0) {
dev_err(uap->port.dev,
"Failed to register AMBA-PL011 driver\n");
+ for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
+ if (amba_ports[i] == uap)
+ amba_ports[i] = NULL;
return ret;
}
}
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 2daccb10ae2f..4b9f42269477 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -563,7 +563,7 @@ static void lpuart32_poll_put_char(struct uart_port *port, unsigned char c)
static int lpuart32_poll_get_char(struct uart_port *port)
{
- if (!(lpuart32_read(port, UARTSTAT) & UARTSTAT_RDRF))
+ if (!(lpuart32_read(port, UARTWATER) >> UARTWATER_RXCNT_OFF))
return NO_POLL_CHAR;
return lpuart32_read(port, UARTDATA);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 4066cb2b79cb..7a6e26b12bf6 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1915,16 +1915,6 @@ imx_uart_console_write(struct console *co, const char *s, unsigned int count)
unsigned int ucr1;
unsigned long flags = 0;
int locked = 1;
- int retval;
-
- retval = clk_enable(sport->clk_per);
- if (retval)
- return;
- retval = clk_enable(sport->clk_ipg);
- if (retval) {
- clk_disable(sport->clk_per);
- return;
- }
if (sport->port.sysrq)
locked = 0;
@@ -1960,9 +1950,6 @@ imx_uart_console_write(struct console *co, const char *s, unsigned int count)
if (locked)
spin_unlock_irqrestore(&sport->port.lock, flags);
-
- clk_disable(sport->clk_ipg);
- clk_disable(sport->clk_per);
}
/*
@@ -2063,15 +2050,14 @@ imx_uart_console_setup(struct console *co, char *options)
retval = uart_set_options(&sport->port, co, baud, parity, bits, flow);
- clk_disable(sport->clk_ipg);
if (retval) {
- clk_unprepare(sport->clk_ipg);
+ clk_disable_unprepare(sport->clk_ipg);
goto error_console;
}
- retval = clk_prepare(sport->clk_per);
+ retval = clk_prepare_enable(sport->clk_per);
if (retval)
- clk_unprepare(sport->clk_ipg);
+ clk_disable_unprepare(sport->clk_ipg);
error_console:
return retval;
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 0c35c3c5e373..c1ab0dbda8a9 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1480,10 +1480,12 @@ static int __init max310x_uart_init(void)
return ret;
#ifdef CONFIG_SPI_MASTER
- spi_register_driver(&max310x_spi_driver);
+ ret = spi_register_driver(&max310x_spi_driver);
+ if (ret)
+ uart_unregister_driver(&max310x_uart);
#endif
- return 0;
+ return ret;
}
module_init(max310x_uart_init);
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index fb9d369e0f50..0515b5e6326d 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -637,6 +637,14 @@ static void wait_for_xmitr(struct uart_port *port)
(val & STAT_TX_RDY(port)), 1, 10000);
}
+static void wait_for_xmite(struct uart_port *port)
+{
+ u32 val;
+
+ readl_poll_timeout_atomic(port->membase + UART_STAT, val,
+ (val & STAT_TX_EMP), 1, 10000);
+}
+
static void mvebu_uart_console_putchar(struct uart_port *port, int ch)
{
wait_for_xmitr(port);
@@ -664,7 +672,7 @@ static void mvebu_uart_console_write(struct console *co, const char *s,
uart_console_write(port, s, count, mvebu_uart_console_putchar);
- wait_for_xmitr(port);
+ wait_for_xmite(port);
if (ier)
writel(ier, port->membase + UART_CTRL(port));
@@ -799,9 +807,6 @@ static int mvebu_uart_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (!match)
- return -ENODEV;
-
/* Assume that all UART ports have a DT alias or none has */
id = of_alias_get_id(pdev->dev.of_node, "serial");
if (!pdev->dev.of_node || id < 0)
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 4c188f4079b3..63810eefa44b 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1703,21 +1703,21 @@ static int mxs_auart_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
- goto out_disable_clks;
+ goto out_iounmap;
}
s->port.irq = irq;
ret = devm_request_irq(&pdev->dev, irq, mxs_auart_irq_handle, 0,
dev_name(&pdev->dev), s);
if (ret)
- goto out_disable_clks;
+ goto out_iounmap;
platform_set_drvdata(pdev, s);
ret = mxs_auart_init_gpios(s, &pdev->dev);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize GPIOs.\n");
- goto out_disable_clks;
+ goto out_iounmap;
}
/*
@@ -1725,7 +1725,7 @@ static int mxs_auart_probe(struct platform_device *pdev)
*/
ret = mxs_auart_request_gpio_irq(s);
if (ret)
- goto out_disable_clks;
+ goto out_iounmap;
auart_port[s->port.line] = s;
@@ -1751,6 +1751,9 @@ out_free_qpio_irq:
mxs_auart_free_gpio_irq(s);
auart_port[pdev->id] = NULL;
+out_iounmap:
+ iounmap(s->port.membase);
+
out_disable_clks:
if (is_asm9260_auart(s)) {
clk_disable_unprepare(s->clk);
@@ -1766,6 +1769,7 @@ static int mxs_auart_remove(struct platform_device *pdev)
uart_remove_one_port(&auart_driver, &s->port);
auart_port[pdev->id] = NULL;
mxs_auart_free_gpio_irq(s);
+ iounmap(s->port.membase);
if (is_asm9260_auart(s)) {
clk_disable_unprepare(s->clk);
clk_disable_unprepare(s->clk_ahb);
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 3245cdbf9116..e5ff30544bd0 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -192,8 +192,6 @@ enum {
#define PCH_UART_HAL_LOOP (PCH_UART_MCR_LOOP)
#define PCH_UART_HAL_AFE (PCH_UART_MCR_AFE)
-#define PCI_VENDOR_ID_ROHM 0x10DB
-
#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
#define DEFAULT_UARTCLK 1843200 /* 1.8432 MHz */
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 4458419f053b..cd0768c3e773 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -705,7 +705,7 @@ static void qcom_geni_serial_handle_tx(struct uart_port *uport, bool done,
avail *= port->tx_bytes_pw;
tail = xmit->tail;
- chunk = min3(avail, pending, (size_t)(UART_XMIT_SIZE - tail));
+ chunk = min(avail, pending);
if (!chunk)
goto out_write_wakeup;
@@ -727,19 +727,21 @@ static void qcom_geni_serial_handle_tx(struct uart_port *uport, bool done,
memset(buf, 0, ARRAY_SIZE(buf));
tx_bytes = min_t(size_t, remaining, port->tx_bytes_pw);
- for (c = 0; c < tx_bytes ; c++)
- buf[c] = xmit->buf[tail + c];
+
+ for (c = 0; c < tx_bytes ; c++) {
+ buf[c] = xmit->buf[tail++];
+ tail &= UART_XMIT_SIZE - 1;
+ }
iowrite32_rep(uport->membase + SE_GENI_TX_FIFOn, buf, 1);
i += tx_bytes;
- tail += tx_bytes;
uport->icount.tx += tx_bytes;
remaining -= tx_bytes;
port->tx_remaining -= tx_bytes;
}
- xmit->tail = tail & (UART_XMIT_SIZE - 1);
+ xmit->tail = tail;
/*
* The tx fifo watermark is level triggered and latched. Though we had
@@ -1048,7 +1050,7 @@ static unsigned int qcom_geni_serial_tx_empty(struct uart_port *uport)
}
#ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE
-static int __init qcom_geni_console_setup(struct console *co, char *options)
+static int qcom_geni_console_setup(struct console *co, char *options)
{
struct uart_port *uport;
struct qcom_geni_serial_port *port;
diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c
index 5690c09cc041..944a4c010579 100644
--- a/drivers/tty/serial/rp2.c
+++ b/drivers/tty/serial/rp2.c
@@ -195,7 +195,6 @@ struct rp2_card {
void __iomem *bar0;
void __iomem *bar1;
spinlock_t card_lock;
- struct completion fw_loaded;
};
#define RP_ID(prod) PCI_VDEVICE(RP, (prod))
@@ -664,17 +663,10 @@ static void rp2_remove_ports(struct rp2_card *card)
card->initialized_ports = 0;
}
-static void rp2_fw_cb(const struct firmware *fw, void *context)
+static int rp2_load_firmware(struct rp2_card *card, const struct firmware *fw)
{
- struct rp2_card *card = context;
resource_size_t phys_base;
- int i, rc = -ENOENT;
-
- if (!fw) {
- dev_err(&card->pdev->dev, "cannot find '%s' firmware image\n",
- RP2_FW_NAME);
- goto no_fw;
- }
+ int i, rc = 0;
phys_base = pci_resource_start(card->pdev, 1);
@@ -720,23 +712,13 @@ static void rp2_fw_cb(const struct firmware *fw, void *context)
card->initialized_ports++;
}
- release_firmware(fw);
-no_fw:
- /*
- * rp2_fw_cb() is called from a workqueue long after rp2_probe()
- * has already returned success. So if something failed here,
- * we'll just leave the now-dormant device in place until somebody
- * unbinds it.
- */
- if (rc)
- dev_warn(&card->pdev->dev, "driver initialization failed\n");
-
- complete(&card->fw_loaded);
+ return rc;
}
static int rp2_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
+ const struct firmware *fw;
struct rp2_card *card;
struct rp2_uart_port *ports;
void __iomem * const *bars;
@@ -747,7 +729,6 @@ static int rp2_probe(struct pci_dev *pdev,
return -ENOMEM;
pci_set_drvdata(pdev, card);
spin_lock_init(&card->card_lock);
- init_completion(&card->fw_loaded);
rc = pcim_enable_device(pdev);
if (rc)
@@ -780,21 +761,23 @@ static int rp2_probe(struct pci_dev *pdev,
return -ENOMEM;
card->ports = ports;
- rc = devm_request_irq(&pdev->dev, pdev->irq, rp2_uart_interrupt,
- IRQF_SHARED, DRV_NAME, card);
- if (rc)
+ rc = request_firmware(&fw, RP2_FW_NAME, &pdev->dev);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "cannot find '%s' firmware image\n",
+ RP2_FW_NAME);
return rc;
+ }
- /*
- * Only catastrophic errors (e.g. ENOMEM) are reported here.
- * If the FW image is missing, we'll find out in rp2_fw_cb()
- * and print an error message.
- */
- rc = request_firmware_nowait(THIS_MODULE, 1, RP2_FW_NAME, &pdev->dev,
- GFP_KERNEL, card, rp2_fw_cb);
+ rc = rp2_load_firmware(card, fw);
+
+ release_firmware(fw);
+ if (rc < 0)
+ return rc;
+
+ rc = devm_request_irq(&pdev->dev, pdev->irq, rp2_uart_interrupt,
+ IRQF_SHARED, DRV_NAME, card);
if (rc)
return rc;
- dev_dbg(&pdev->dev, "waiting for firmware blob...\n");
return 0;
}
@@ -803,7 +786,6 @@ static void rp2_remove(struct pci_dev *pdev)
{
struct rp2_card *card = pci_get_drvdata(pdev);
- wait_for_completion(&card->fw_loaded);
rp2_remove_ports(card);
}
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 2a49b6d876b8..1528a7ba2bf4 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1187,14 +1187,14 @@ static unsigned int s3c24xx_serial_getclk(struct s3c24xx_uart_port *ourport,
struct s3c24xx_uart_info *info = ourport->info;
struct clk *clk;
unsigned long rate;
- unsigned int cnt, baud, quot, clk_sel, best_quot = 0;
+ unsigned int cnt, baud, quot, best_quot = 0;
char clkname[MAX_CLK_NAME_LENGTH];
int calc_deviation, deviation = (1 << 30) - 1;
- clk_sel = (ourport->cfg->clk_sel) ? ourport->cfg->clk_sel :
- ourport->info->def_clk_sel;
for (cnt = 0; cnt < info->num_clks; cnt++) {
- if (!(clk_sel & (1 << cnt)))
+ /* Keep selected clock if provided */
+ if (ourport->cfg->clk_sel &&
+ !(ourport->cfg->clk_sel & (1 << cnt)))
continue;
sprintf(clkname, "clk_uart_baud%d", cnt);
@@ -1755,9 +1755,11 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
ourport->tx_irq = ret + 1;
}
- ret = platform_get_irq(platdev, 1);
- if (ret > 0)
- ourport->tx_irq = ret;
+ if (!s3c24xx_serial_has_interrupt_mask(port)) {
+ ret = platform_get_irq(platdev, 1);
+ if (ret > 0)
+ ourport->tx_irq = ret;
+ }
/*
* DMA is currently supported only on DT platforms, if DMA properties
* are specified.
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 2a5bf4c14fb8..80fa06b16d9d 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1421,6 +1421,10 @@ static void uart_set_ldisc(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct uart_port *uport;
+ struct tty_port *port = &state->port;
+
+ if (!tty_port_initialized(port))
+ return;
mutex_lock(&state->port.mutex);
uport = uart_port_check(state);
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index 1b4008d022bf..2f7ef64536a0 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -1284,6 +1284,9 @@ static int __init serial_txx9_init(void)
#ifdef ENABLE_SERIAL_TXX9_PCI
ret = pci_register_driver(&serial_txx9_pci_driver);
+ if (ret) {
+ platform_driver_unregister(&serial_txx9_plat_driver);
+ }
#endif
if (ret == 0)
goto out;
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 9e1a6af23ca2..bfbfe0d68d82 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -873,9 +873,16 @@ static void sci_receive_chars(struct uart_port *port)
tty_insert_flip_char(tport, c, TTY_NORMAL);
} else {
for (i = 0; i < count; i++) {
- char c = serial_port_in(port, SCxRDR);
-
- status = serial_port_in(port, SCxSR);
+ char c;
+
+ if (port->type == PORT_SCIF ||
+ port->type == PORT_HSCIF) {
+ status = serial_port_in(port, SCxSR);
+ c = serial_port_in(port, SCxRDR);
+ } else {
+ c = serial_port_in(port, SCxRDR);
+ status = serial_port_in(port, SCxSR);
+ }
if (uart_handle_sysrq_char(port, c)) {
count--; i--;
continue;
@@ -1019,10 +1026,10 @@ static int scif_set_rtrg(struct uart_port *port, int rx_trig)
{
unsigned int bits;
+ if (rx_trig >= port->fifosize)
+ rx_trig = port->fifosize - 1;
if (rx_trig < 1)
rx_trig = 1;
- if (rx_trig >= port->fifosize)
- rx_trig = port->fifosize;
/* HSCIF can be set to an arbitrary level. */
if (sci_getreg(port, HSRTRGR)->size) {
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index bce4ac1787ad..50073ead5881 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -472,7 +472,10 @@ static unsigned int stm32_tx_empty(struct uart_port *port)
struct stm32_port *stm32_port = to_stm32_port(port);
struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
- return readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE;
+ if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC)
+ return TIOCSER_TEMT;
+
+ return 0;
}
static void stm32_set_mctrl(struct uart_port *port, unsigned int mctrl)
@@ -637,8 +640,9 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
unsigned int baud, bits;
u32 usartdiv, mantissa, fraction, oversampling;
tcflag_t cflag = termios->c_cflag;
- u32 cr1, cr2, cr3;
+ u32 cr1, cr2, cr3, isr;
unsigned long flags;
+ int ret;
if (!stm32_port->hw_flow_control)
cflag &= ~CRTSCTS;
@@ -647,6 +651,15 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
spin_lock_irqsave(&port->lock, flags);
+ ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
+ isr,
+ (isr & USART_SR_TC),
+ 10, 100000);
+
+ /* Send the TC error message only when ISR_TC is not set. */
+ if (ret)
+ dev_err(port->dev, "Transmission is not complete\n");
+
/* Stop serial port and reset value */
writel_relaxed(0, port->membase + ofs->cr1);
diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
index 30d2433e27c3..00daee7f83ee 100644
--- a/drivers/tty/serial/stm32-usart.h
+++ b/drivers/tty/serial/stm32-usart.h
@@ -123,9 +123,6 @@ struct stm32_usart_info stm32h7_info = {
/* Dummy bits */
#define USART_SR_DUMMY_RX BIT(16)
-/* USART_ICR (F7) */
-#define USART_CR_TC BIT(6)
-
/* USART_DR */
#define USART_DR_MASK GENMASK(8, 0)
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 31950a38f0fb..23f9b0cdff08 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -1236,6 +1236,7 @@ static int cdns_uart_console_setup(struct console *co, char *options)
int bits = 8;
int parity = 'n';
int flow = 'n';
+ unsigned long time_out;
if (!port->membase) {
pr_debug("console on " CDNS_UART_TTY_NAME "%i not present\n",
@@ -1246,6 +1247,13 @@ static int cdns_uart_console_setup(struct console *co, char *options)
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
+ /* Wait for tx_empty before setting up the console */
+ time_out = jiffies + usecs_to_jiffies(TX_TIMEOUT);
+
+ while (time_before(jiffies, time_out) &&
+ cdns_uart_tx_empty(port) != TIOCSER_TEMT)
+ cpu_relax();
+
return uart_set_options(port, co, baud, parity, bits, flow);
}
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index ac8025cd4a1f..9e9343adc2b4 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2432,14 +2432,14 @@ out:
* @p: pointer to result
*
* Obtain the modem status bits from the tty driver if the feature
- * is supported. Return -EINVAL if it is not available.
+ * is supported. Return -ENOTTY if it is not available.
*
* Locking: none (up to the driver)
*/
static int tty_tiocmget(struct tty_struct *tty, int __user *p)
{
- int retval = -EINVAL;
+ int retval = -ENOTTY;
if (tty->ops->tiocmget) {
retval = tty->ops->tiocmget(tty);
@@ -2457,7 +2457,7 @@ static int tty_tiocmget(struct tty_struct *tty, int __user *p)
* @p: pointer to desired bits
*
* Set the modem status bits from the tty driver if the feature
- * is supported. Return -EINVAL if it is not available.
+ * is supported. Return -ENOTTY if it is not available.
*
* Locking: none (up to the driver)
*/
@@ -2469,7 +2469,7 @@ static int tty_tiocmset(struct tty_struct *tty, unsigned int cmd,
unsigned int set, clear, val;
if (tty->ops->tiocmset == NULL)
- return -EINVAL;
+ return -ENOTTY;
retval = get_user(val, p);
if (retval)
@@ -2747,10 +2747,14 @@ void __do_SAK(struct tty_struct *tty)
struct task_struct *g, *p;
struct pid *session;
int i;
+ unsigned long flags;
if (!tty)
return;
- session = tty->session;
+
+ spin_lock_irqsave(&tty->ctrl_lock, flags);
+ session = get_pid(tty->session);
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
tty_ldisc_flush(tty);
@@ -2782,6 +2786,7 @@ void __do_SAK(struct tty_struct *tty)
task_unlock(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
+ put_pid(session);
#endif
}
diff --git a/drivers/tty/tty_jobctrl.c b/drivers/tty/tty_jobctrl.c
index c4ecd66fafef..ffcab80ba77d 100644
--- a/drivers/tty/tty_jobctrl.c
+++ b/drivers/tty/tty_jobctrl.c
@@ -103,8 +103,8 @@ static void __proc_set_tty(struct tty_struct *tty)
put_pid(tty->session);
put_pid(tty->pgrp);
tty->pgrp = get_pid(task_pgrp(current));
- spin_unlock_irqrestore(&tty->ctrl_lock, flags);
tty->session = get_pid(task_session(current));
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
if (current->signal->tty) {
tty_debug(tty, "current tty %s not NULL!!\n",
current->signal->tty->name);
@@ -293,20 +293,23 @@ void disassociate_ctty(int on_exit)
spin_lock_irq(&current->sighand->siglock);
put_pid(current->signal->tty_old_pgrp);
current->signal->tty_old_pgrp = NULL;
-
tty = tty_kref_get(current->signal->tty);
+ spin_unlock_irq(&current->sighand->siglock);
+
if (tty) {
unsigned long flags;
+
+ tty_lock(tty);
spin_lock_irqsave(&tty->ctrl_lock, flags);
put_pid(tty->session);
put_pid(tty->pgrp);
tty->session = NULL;
tty->pgrp = NULL;
spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ tty_unlock(tty);
tty_kref_put(tty);
}
- spin_unlock_irq(&current->sighand->siglock);
/* Now clear signal->tty under the lock */
read_lock(&tasklist_lock);
session_clear_tty(task_session(current));
@@ -477,14 +480,19 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t
return -ENOTTY;
if (retval)
return retval;
- if (!current->signal->tty ||
- (current->signal->tty != real_tty) ||
- (real_tty->session != task_session(current)))
- return -ENOTTY;
+
if (get_user(pgrp_nr, p))
return -EFAULT;
if (pgrp_nr < 0)
return -EINVAL;
+
+ spin_lock_irq(&real_tty->ctrl_lock);
+ if (!current->signal->tty ||
+ (current->signal->tty != real_tty) ||
+ (real_tty->session != task_session(current))) {
+ retval = -ENOTTY;
+ goto out_unlock_ctrl;
+ }
rcu_read_lock();
pgrp = find_vpid(pgrp_nr);
retval = -ESRCH;
@@ -494,12 +502,12 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t
if (session_of_pgrp(pgrp) != task_session(current))
goto out_unlock;
retval = 0;
- spin_lock_irq(&tty->ctrl_lock);
put_pid(real_tty->pgrp);
real_tty->pgrp = get_pid(pgrp);
- spin_unlock_irq(&tty->ctrl_lock);
out_unlock:
rcu_read_unlock();
+out_unlock_ctrl:
+ spin_unlock_irq(&real_tty->ctrl_lock);
return retval;
}
@@ -511,20 +519,30 @@ out_unlock:
*
* Obtain the session id of the tty. If there is no session
* return an error.
- *
- * Locking: none. Reference to current->signal->tty is safe.
*/
static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
{
+ unsigned long flags;
+ pid_t sid;
+
/*
* (tty == real_tty) is a cheap way of
* testing if the tty is NOT a master pty.
*/
if (tty == real_tty && current->signal->tty != real_tty)
return -ENOTTY;
+
+ spin_lock_irqsave(&real_tty->ctrl_lock, flags);
if (!real_tty->session)
- return -ENOTTY;
- return put_user(pid_vnr(real_tty->session), p);
+ goto err;
+ sid = pid_vnr(real_tty->session);
+ spin_unlock_irqrestore(&real_tty->ctrl_lock, flags);
+
+ return put_user(sid, p);
+
+err:
+ spin_unlock_irqrestore(&real_tty->ctrl_lock, flags);
+ return -ENOTTY;
}
/*
diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c
index 58b454c34560..10a832a2135e 100644
--- a/drivers/tty/vcc.c
+++ b/drivers/tty/vcc.c
@@ -604,6 +604,7 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
port->index = vcc_table_add(port);
if (port->index == -1) {
pr_err("VCC: no more TTY indices left for allocation\n");
+ rv = -ENOMEM;
goto free_ldc;
}
diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
index 7c7ada0b3ea0..90c6e10b1ef9 100644
--- a/drivers/tty/vt/consolemap.c
+++ b/drivers/tty/vt/consolemap.c
@@ -495,7 +495,7 @@ con_insert_unipair(struct uni_pagedir *p, u_short unicode, u_short fontpos)
p2[unicode & 0x3f] = fontpos;
- p->sum += (fontpos << 20) + unicode;
+ p->sum += (fontpos << 20U) + unicode;
return 0;
}
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 8d6074f8ba41..94cad9f86ff9 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -127,7 +127,11 @@ static DEFINE_SPINLOCK(func_buf_lock); /* guard 'func_buf' and friends */
static unsigned long key_down[BITS_TO_LONGS(KEY_CNT)]; /* keyboard key bitmap */
static unsigned char shift_down[NR_SHIFT]; /* shift state counters.. */
static bool dead_key_next;
-static int npadch = -1; /* -1 or number assembled on pad */
+
+/* Handles a number being assembled on the number pad */
+static bool npadch_active;
+static unsigned int npadch_value;
+
static unsigned int diacr;
static char rep; /* flag telling character repeat */
@@ -738,8 +742,13 @@ static void k_fn(struct vc_data *vc, unsigned char value, char up_flag)
return;
if ((unsigned)value < ARRAY_SIZE(func_table)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&func_buf_lock, flags);
if (func_table[value])
puts_queue(vc, func_table[value]);
+ spin_unlock_irqrestore(&func_buf_lock, flags);
+
} else
pr_err("k_fn called with value=%d\n", value);
}
@@ -845,12 +854,12 @@ static void k_shift(struct vc_data *vc, unsigned char value, char up_flag)
shift_state &= ~(1 << value);
/* kludge */
- if (up_flag && shift_state != old_state && npadch != -1) {
+ if (up_flag && shift_state != old_state && npadch_active) {
if (kbd->kbdmode == VC_UNICODE)
- to_utf8(vc, npadch);
+ to_utf8(vc, npadch_value);
else
- put_queue(vc, npadch & 0xff);
- npadch = -1;
+ put_queue(vc, npadch_value & 0xff);
+ npadch_active = false;
}
}
@@ -868,7 +877,7 @@ static void k_meta(struct vc_data *vc, unsigned char value, char up_flag)
static void k_ascii(struct vc_data *vc, unsigned char value, char up_flag)
{
- int base;
+ unsigned int base;
if (up_flag)
return;
@@ -882,10 +891,12 @@ static void k_ascii(struct vc_data *vc, unsigned char value, char up_flag)
base = 16;
}
- if (npadch == -1)
- npadch = value;
- else
- npadch = npadch * base + value;
+ if (!npadch_active) {
+ npadch_value = 0;
+ npadch_active = true;
+ }
+
+ npadch_value = npadch_value * base + value;
}
static void k_lock(struct vc_data *vc, unsigned char value, char up_flag)
@@ -1984,13 +1995,11 @@ out:
#undef s
#undef v
-/* FIXME: This one needs untangling and locking */
+/* FIXME: This one needs untangling */
int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
{
struct kbsentry *kbs;
- char *p;
u_char *q;
- u_char __user *up;
int sz, fnw_sz;
int delta;
char *first_free, *fj, *fnw;
@@ -2016,23 +2025,19 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
i = kbs->kb_func;
switch (cmd) {
- case KDGKBSENT:
- sz = sizeof(kbs->kb_string) - 1; /* sz should have been
- a struct member */
- up = user_kdgkb->kb_string;
- p = func_table[i];
- if(p)
- for ( ; *p && sz; p++, sz--)
- if (put_user(*p, up++)) {
- ret = -EFAULT;
- goto reterr;
- }
- if (put_user('\0', up)) {
- ret = -EFAULT;
- goto reterr;
- }
- kfree(kbs);
- return ((p && *p) ? -EOVERFLOW : 0);
+ case KDGKBSENT: {
+ /* size should have been a struct member */
+ ssize_t len = sizeof(user_kdgkb->kb_string);
+
+ spin_lock_irqsave(&func_buf_lock, flags);
+ len = strlcpy(kbs->kb_string, func_table[i] ? : "", len);
+ spin_unlock_irqrestore(&func_buf_lock, flags);
+
+ ret = copy_to_user(user_kdgkb->kb_string, kbs->kb_string,
+ len + 1) ? -EFAULT : 0;
+
+ goto reterr;
+ }
case KDSKBSENT:
if (!perm) {
ret = -EPERM;
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 36c6f1b98372..b2b5f19fb2fb 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -81,6 +81,7 @@
#include <linux/errno.h>
#include <linux/kd.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <linux/major.h>
#include <linux/mm.h>
#include <linux/console.h>
@@ -350,7 +351,7 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows)
/* allocate everything in one go */
memsize = cols * rows * sizeof(char32_t);
memsize += rows * sizeof(char32_t *);
- p = kmalloc(memsize, GFP_KERNEL);
+ p = vmalloc(memsize);
if (!p)
return NULL;
@@ -364,9 +365,14 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows)
return uniscr;
}
+static void vc_uniscr_free(struct uni_screen *uniscr)
+{
+ vfree(uniscr);
+}
+
static void vc_uniscr_set(struct vc_data *vc, struct uni_screen *new_uniscr)
{
- kfree(vc->vc_uni_screen);
+ vc_uniscr_free(vc->vc_uni_screen);
vc->vc_uni_screen = new_uniscr;
}
@@ -1089,10 +1095,19 @@ static const struct tty_port_operations vc_port_ops = {
.destruct = vc_port_destruct,
};
+/*
+ * Change # of rows and columns (0 means unchanged/the size of fg_console)
+ * [this is to be used together with some user program
+ * like resize that changes the hardware videomode]
+ */
+#define VC_MAXCOL (32767)
+#define VC_MAXROW (32767)
+
int vc_allocate(unsigned int currcons) /* return 0 on success */
{
struct vt_notifier_param param;
struct vc_data *vc;
+ int err;
WARN_CONSOLE_UNLOCKED();
@@ -1122,6 +1137,11 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */
if (!*vc->vc_uni_pagedir_loc)
con_set_default_unimap(vc);
+ err = -EINVAL;
+ if (vc->vc_cols > VC_MAXCOL || vc->vc_rows > VC_MAXROW ||
+ vc->vc_screenbuf_size > KMALLOC_MAX_SIZE || !vc->vc_screenbuf_size)
+ goto err_free;
+ err = -ENOMEM;
vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_KERNEL);
if (!vc->vc_screenbuf)
goto err_free;
@@ -1140,7 +1160,7 @@ err_free:
visual_deinit(vc);
kfree(vc);
vc_cons[currcons].d = NULL;
- return -ENOMEM;
+ return err;
}
static inline int resize_screen(struct vc_data *vc, int width, int height,
@@ -1149,20 +1169,12 @@ static inline int resize_screen(struct vc_data *vc, int width, int height,
/* Resizes the resolution of the display adapater */
int err = 0;
- if (vc->vc_mode != KD_GRAPHICS && vc->vc_sw->con_resize)
+ if (vc->vc_sw->con_resize)
err = vc->vc_sw->con_resize(vc, width, height, user);
return err;
}
-/*
- * Change # of rows and columns (0 means unchanged/the size of fg_console)
- * [this is to be used together with some user program
- * like resize that changes the hardware videomode]
- */
-#define VC_RESIZE_MAXCOL (32767)
-#define VC_RESIZE_MAXROW (32767)
-
/**
* vc_do_resize - resizing method for the tty
* @tty: tty being resized
@@ -1187,7 +1199,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
unsigned int old_rows, old_row_size, first_copied_row;
unsigned int new_cols, new_rows, new_row_size, new_screen_size;
unsigned int user;
- unsigned short *newscreen;
+ unsigned short *oldscreen, *newscreen;
struct uni_screen *new_uniscr = NULL;
WARN_CONSOLE_UNLOCKED();
@@ -1198,7 +1210,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
user = vc->vc_resize_user;
vc->vc_resize_user = 0;
- if (cols > VC_RESIZE_MAXCOL || lines > VC_RESIZE_MAXROW)
+ if (cols > VC_MAXCOL || lines > VC_MAXROW)
return -EINVAL;
new_cols = (cols ? cols : vc->vc_cols);
@@ -1209,7 +1221,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
return 0;
- if (new_screen_size > (4 << 20))
+ if (new_screen_size > KMALLOC_MAX_SIZE || !new_screen_size)
return -EINVAL;
newscreen = kzalloc(new_screen_size, GFP_USER);
if (!newscreen)
@@ -1232,7 +1244,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
err = resize_screen(vc, new_cols, new_rows, user);
if (err) {
kfree(newscreen);
- kfree(new_uniscr);
+ vc_uniscr_free(new_uniscr);
return err;
}
@@ -1285,10 +1297,11 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
if (new_scr_end > new_origin)
scr_memsetw((void *)new_origin, vc->vc_video_erase_char,
new_scr_end - new_origin);
- kfree(vc->vc_screenbuf);
+ oldscreen = vc->vc_screenbuf;
vc->vc_screenbuf = newscreen;
vc->vc_screenbuf_size = new_screen_size;
set_origin(vc);
+ kfree(oldscreen);
/* do part of a reset_terminal() */
vc->vc_top = 0;
@@ -1367,6 +1380,7 @@ struct vc_data *vc_deallocate(unsigned int currcons)
atomic_notifier_call_chain(&vt_notifier_list, VT_DEALLOCATE, &param);
vcs_remove_sysfs(currcons);
visual_deinit(vc);
+ con_free_unimap(vc);
put_pid(vc->vt_pid);
vc_uniscr_set(vc, NULL);
kfree(vc->vc_screenbuf);
@@ -3365,6 +3379,7 @@ static int __init con_init(void)
INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
tty_port_init(&vc->port);
visual_init(vc, currcons, 1);
+ /* Assuming vc->vc_{cols,rows,screenbuf_size} are sane here. */
vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT);
vc_init(vc, vc->vc_rows, vc->vc_cols,
currcons || !vc->vc_sw->con_save_screen);
@@ -4560,27 +4575,6 @@ static int con_font_default(struct vc_data *vc, struct console_font_op *op)
return rc;
}
-static int con_font_copy(struct vc_data *vc, struct console_font_op *op)
-{
- int con = op->height;
- int rc;
-
-
- console_lock();
- if (vc->vc_mode != KD_TEXT)
- rc = -EINVAL;
- else if (!vc->vc_sw->con_font_copy)
- rc = -ENOSYS;
- else if (con < 0 || !vc_cons_allocated(con))
- rc = -ENOTTY;
- else if (con == vc->vc_num) /* nothing to do */
- rc = 0;
- else
- rc = vc->vc_sw->con_font_copy(vc, con);
- console_unlock();
- return rc;
-}
-
int con_font_op(struct vc_data *vc, struct console_font_op *op)
{
switch (op->op) {
@@ -4591,7 +4585,8 @@ int con_font_op(struct vc_data *vc, struct console_font_op *op)
case KD_FONT_OP_SET_DEFAULT:
return con_font_default(vc, op);
case KD_FONT_OP_COPY:
- return con_font_copy(vc, op);
+ /* was buggy and never really used */
+ return -EINVAL;
}
return -ENOSYS;
}
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 5de81431c835..ce6c7dd7bc12 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -244,7 +244,7 @@ int vt_waitactive(int n)
static inline int
-do_fontx_ioctl(int cmd, struct consolefontdesc __user *user_cfd, int perm, struct console_font_op *op)
+do_fontx_ioctl(struct vc_data *vc, int cmd, struct consolefontdesc __user *user_cfd, int perm, struct console_font_op *op)
{
struct consolefontdesc cfdarg;
int i;
@@ -262,15 +262,16 @@ do_fontx_ioctl(int cmd, struct consolefontdesc __user *user_cfd, int perm, struc
op->height = cfdarg.charheight;
op->charcount = cfdarg.charcount;
op->data = cfdarg.chardata;
- return con_font_op(vc_cons[fg_console].d, op);
- case GIO_FONTX: {
+ return con_font_op(vc, op);
+
+ case GIO_FONTX:
op->op = KD_FONT_OP_GET;
op->flags = KD_FONT_FLAG_OLD;
op->width = 8;
op->height = cfdarg.charheight;
op->charcount = cfdarg.charcount;
op->data = cfdarg.chardata;
- i = con_font_op(vc_cons[fg_console].d, op);
+ i = con_font_op(vc, op);
if (i)
return i;
cfdarg.charheight = op->height;
@@ -278,7 +279,6 @@ do_fontx_ioctl(int cmd, struct consolefontdesc __user *user_cfd, int perm, struc
if (copy_to_user(user_cfd, &cfdarg, sizeof(struct consolefontdesc)))
return -EFAULT;
return 0;
- }
}
return -EINVAL;
}
@@ -893,12 +893,22 @@ int vt_ioctl(struct tty_struct *tty,
console_lock();
vcp = vc_cons[i].d;
if (vcp) {
+ int ret;
+ int save_scan_lines = vcp->vc_scan_lines;
+ int save_cell_height = vcp->vc_cell_height;
+
if (v.v_vlin)
vcp->vc_scan_lines = v.v_vlin;
if (v.v_clin)
- vcp->vc_font.height = v.v_clin;
+ vcp->vc_cell_height = v.v_clin;
vcp->vc_resize_user = 1;
- vc_resize(vcp, v.v_cols, v.v_rows);
+ ret = vc_resize(vcp, v.v_cols, v.v_rows);
+ if (ret) {
+ vcp->vc_scan_lines = save_scan_lines;
+ vcp->vc_cell_height = save_cell_height;
+ console_unlock();
+ return ret;
+ }
}
console_unlock();
}
@@ -914,7 +924,7 @@ int vt_ioctl(struct tty_struct *tty,
op.height = 0;
op.charcount = 256;
op.data = up;
- ret = con_font_op(vc_cons[fg_console].d, &op);
+ ret = con_font_op(vc, &op);
break;
}
@@ -925,7 +935,7 @@ int vt_ioctl(struct tty_struct *tty,
op.height = 32;
op.charcount = 256;
op.data = up;
- ret = con_font_op(vc_cons[fg_console].d, &op);
+ ret = con_font_op(vc, &op);
break;
}
@@ -942,7 +952,7 @@ int vt_ioctl(struct tty_struct *tty,
case PIO_FONTX:
case GIO_FONTX:
- ret = do_fontx_ioctl(cmd, up, perm, &op);
+ ret = do_fontx_ioctl(vc, cmd, up, perm, &op);
break;
case PIO_FONTRESET:
@@ -959,11 +969,11 @@ int vt_ioctl(struct tty_struct *tty,
{
op.op = KD_FONT_OP_SET_DEFAULT;
op.data = NULL;
- ret = con_font_op(vc_cons[fg_console].d, &op);
+ ret = con_font_op(vc, &op);
if (ret)
break;
console_lock();
- con_set_default_unimap(vc_cons[fg_console].d);
+ con_set_default_unimap(vc);
console_unlock();
break;
}
@@ -1090,8 +1100,9 @@ struct compat_consolefontdesc {
};
static inline int
-compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd,
- int perm, struct console_font_op *op)
+compat_fontx_ioctl(struct vc_data *vc, int cmd,
+ struct compat_consolefontdesc __user *user_cfd,
+ int perm, struct console_font_op *op)
{
struct compat_consolefontdesc cfdarg;
int i;
@@ -1109,7 +1120,8 @@ compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd,
op->height = cfdarg.charheight;
op->charcount = cfdarg.charcount;
op->data = compat_ptr(cfdarg.chardata);
- return con_font_op(vc_cons[fg_console].d, op);
+ return con_font_op(vc, op);
+
case GIO_FONTX:
op->op = KD_FONT_OP_GET;
op->flags = KD_FONT_FLAG_OLD;
@@ -1117,7 +1129,7 @@ compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd,
op->height = cfdarg.charheight;
op->charcount = cfdarg.charcount;
op->data = compat_ptr(cfdarg.chardata);
- i = con_font_op(vc_cons[fg_console].d, op);
+ i = con_font_op(vc, op);
if (i)
return i;
cfdarg.charheight = op->height;
@@ -1208,7 +1220,7 @@ long vt_compat_ioctl(struct tty_struct *tty,
*/
case PIO_FONTX:
case GIO_FONTX:
- ret = compat_fontx_ioctl(cmd, up, perm, &op);
+ ret = compat_fontx_ioctl(vc, cmd, up, perm, &op);
break;
case KDFONTOP:
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 9c788748bdc6..0e3e16c51d3a 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -413,10 +413,10 @@ static int uio_get_minor(struct uio_device *idev)
return retval;
}
-static void uio_free_minor(struct uio_device *idev)
+static void uio_free_minor(unsigned long minor)
{
mutex_lock(&minor_lock);
- idr_remove(&uio_idr, idev->minor);
+ idr_remove(&uio_idr, minor);
mutex_unlock(&minor_lock);
}
@@ -988,7 +988,7 @@ err_request_irq:
err_uio_dev_add_attributes:
device_del(&idev->dev);
err_device_create:
- uio_free_minor(idev);
+ uio_free_minor(idev->minor);
put_device(&idev->dev);
return ret;
}
@@ -1002,13 +1002,13 @@ EXPORT_SYMBOL_GPL(__uio_register_device);
void uio_unregister_device(struct uio_info *info)
{
struct uio_device *idev;
+ unsigned long minor;
if (!info || !info->uio_dev)
return;
idev = info->uio_dev;
-
- uio_free_minor(idev);
+ minor = idev->minor;
mutex_lock(&idev->info_lock);
uio_dev_del_attributes(idev);
@@ -1021,6 +1021,8 @@ void uio_unregister_device(struct uio_info *info)
device_unregister(&idev->dev);
+ uio_free_minor(minor);
+
return;
}
EXPORT_SYMBOL_GPL(uio_unregister_device);
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index f598ecddc8a7..b58a504240c4 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -148,7 +148,7 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev)
if (!uioinfo->irq) {
ret = platform_get_irq(pdev, 0);
uioinfo->irq = ret;
- if (ret == -ENXIO && pdev->dev.of_node)
+ if (ret == -ENXIO)
uioinfo->irq = UIO_IRQ_NONE;
else if (ret < 0) {
dev_err(&pdev->dev, "failed to get IRQ\n");
diff --git a/drivers/usb/c67x00/c67x00-sched.c b/drivers/usb/c67x00/c67x00-sched.c
index 633c52de3bb3..9865750bc31e 100644
--- a/drivers/usb/c67x00/c67x00-sched.c
+++ b/drivers/usb/c67x00/c67x00-sched.c
@@ -486,7 +486,7 @@ c67x00_giveback_urb(struct c67x00_hcd *c67x00, struct urb *urb, int status)
c67x00_release_urb(c67x00, urb);
usb_hcd_unlink_urb_from_ep(c67x00_hcd_to_hcd(c67x00), urb);
spin_unlock(&c67x00->lock);
- usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, urbp->status);
+ usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, status);
spin_lock(&c67x00->lock);
}
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 19f5f5f2a48a..af420ec2a9f4 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -57,7 +57,8 @@ static const struct ci_hdrc_imx_platform_flag imx6sx_usb_data = {
static const struct ci_hdrc_imx_platform_flag imx6ul_usb_data = {
.flags = CI_HDRC_SUPPORTS_RUNTIME_PM |
- CI_HDRC_TURN_VBUS_EARLY_ON,
+ CI_HDRC_TURN_VBUS_EARLY_ON |
+ CI_HDRC_DISABLE_DEVICE_STREAMING,
};
static const struct ci_hdrc_imx_platform_flag imx7d_usb_data = {
@@ -127,9 +128,13 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
misc_pdev = of_find_device_by_node(args.np);
of_node_put(args.np);
- if (!misc_pdev || !platform_get_drvdata(misc_pdev))
+ if (!misc_pdev)
return ERR_PTR(-EPROBE_DEFER);
+ if (!platform_get_drvdata(misc_pdev)) {
+ put_device(&misc_pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
data->dev = &misc_pdev->dev;
if (of_find_property(np, "disable-over-current", NULL))
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 159b897c5e80..befd9235bcad 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -1154,6 +1154,29 @@ static void ci_controller_suspend(struct ci_hdrc *ci)
enable_irq(ci->irq);
}
+/*
+ * Handle the wakeup interrupt triggered by extcon connector
+ * We need to call ci_irq again for extcon since the first
+ * interrupt (wakeup int) only let the controller be out of
+ * low power mode, but not handle any interrupts.
+ */
+static void ci_extcon_wakeup_int(struct ci_hdrc *ci)
+{
+ struct ci_hdrc_cable *cable_id, *cable_vbus;
+ u32 otgsc = hw_read_otgsc(ci, ~0);
+
+ cable_id = &ci->platdata->id_extcon;
+ cable_vbus = &ci->platdata->vbus_extcon;
+
+ if (!IS_ERR(cable_id->edev) && ci->is_otg &&
+ (otgsc & OTGSC_IDIE) && (otgsc & OTGSC_IDIS))
+ ci_irq(ci->irq, ci);
+
+ if (!IS_ERR(cable_vbus->edev) && ci->is_otg &&
+ (otgsc & OTGSC_BSVIE) && (otgsc & OTGSC_BSVIS))
+ ci_irq(ci->irq, ci);
+}
+
static int ci_controller_resume(struct device *dev)
{
struct ci_hdrc *ci = dev_get_drvdata(dev);
@@ -1186,6 +1209,7 @@ static int ci_controller_resume(struct device *dev)
enable_irq(ci->irq);
if (ci_otg_is_fsm_mode(ci))
ci_otg_fsm_wakeup_by_srp(ci);
+ ci_extcon_wakeup_int(ci);
}
return 0;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 6e0b41861735..738de8c9c354 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -147,17 +147,29 @@ static inline int acm_set_control(struct acm *acm, int control)
#define acm_send_break(acm, ms) \
acm_ctrl_msg(acm, USB_CDC_REQ_SEND_BREAK, ms, NULL, 0)
-static void acm_kill_urbs(struct acm *acm)
+static void acm_poison_urbs(struct acm *acm)
{
int i;
- usb_kill_urb(acm->ctrlurb);
+ usb_poison_urb(acm->ctrlurb);
for (i = 0; i < ACM_NW; i++)
- usb_kill_urb(acm->wb[i].urb);
+ usb_poison_urb(acm->wb[i].urb);
for (i = 0; i < acm->rx_buflimit; i++)
- usb_kill_urb(acm->read_urbs[i]);
+ usb_poison_urb(acm->read_urbs[i]);
+}
+
+static void acm_unpoison_urbs(struct acm *acm)
+{
+ int i;
+
+ for (i = 0; i < acm->rx_buflimit; i++)
+ usb_unpoison_urb(acm->read_urbs[i]);
+ for (i = 0; i < ACM_NW; i++)
+ usb_unpoison_urb(acm->wb[i].urb);
+ usb_unpoison_urb(acm->ctrlurb);
}
+
/*
* Write buffer management.
* All of these assume proper locks taken by the caller.
@@ -225,9 +237,10 @@ static int acm_start_wb(struct acm *acm, struct acm_wb *wb)
rc = usb_submit_urb(wb->urb, GFP_ATOMIC);
if (rc < 0) {
- dev_err(&acm->data->dev,
- "%s - usb_submit_urb(write bulk) failed: %d\n",
- __func__, rc);
+ if (rc != -EPERM)
+ dev_err(&acm->data->dev,
+ "%s - usb_submit_urb(write bulk) failed: %d\n",
+ __func__, rc);
acm_write_done(acm, wb);
}
return rc;
@@ -312,8 +325,10 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
acm->iocount.dsr++;
if (difference & ACM_CTRL_DCD)
acm->iocount.dcd++;
- if (newctrl & ACM_CTRL_BRK)
+ if (newctrl & ACM_CTRL_BRK) {
acm->iocount.brk++;
+ tty_insert_flip_char(&acm->port, 0, TTY_BREAK);
+ }
if (newctrl & ACM_CTRL_RI)
acm->iocount.rng++;
if (newctrl & ACM_CTRL_FRAMING)
@@ -378,21 +393,19 @@ static void acm_ctrl_irq(struct urb *urb)
if (current_size < expected_size) {
/* notification is transmitted fragmented, reassemble */
if (acm->nb_size < expected_size) {
- if (acm->nb_size) {
- kfree(acm->notification_buffer);
- acm->nb_size = 0;
- }
+ u8 *new_buffer;
alloc_size = roundup_pow_of_two(expected_size);
- /*
- * kmalloc ensures a valid notification_buffer after a
- * use of kfree in case the previous allocation was too
- * small. Final freeing is done on disconnect.
- */
- acm->notification_buffer =
- kmalloc(alloc_size, GFP_ATOMIC);
- if (!acm->notification_buffer)
+ /* Final freeing is done on disconnect. */
+ new_buffer = krealloc(acm->notification_buffer,
+ alloc_size, GFP_ATOMIC);
+ if (!new_buffer) {
+ acm->nb_index = 0;
goto exit;
+ }
+
+ acm->notification_buffer = new_buffer;
acm->nb_size = alloc_size;
+ dr = (struct usb_cdc_notification *)acm->notification_buffer;
}
copy_size = min(current_size,
@@ -412,9 +425,12 @@ static void acm_ctrl_irq(struct urb *urb)
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
- if (retval && retval != -EPERM)
+ if (retval && retval != -EPERM && retval != -ENODEV)
dev_err(&acm->control->dev,
"%s - usb_submit_urb failed: %d\n", __func__, retval);
+ else
+ dev_vdbg(&acm->control->dev,
+ "control resubmission terminated %d\n", retval);
}
static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags)
@@ -430,6 +446,8 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags)
dev_err(&acm->data->dev,
"urb %d failed submission with %d\n",
index, res);
+ } else {
+ dev_vdbg(&acm->data->dev, "intended failure %d\n", res);
}
set_bit(index, &acm->read_urbs_free);
return res;
@@ -472,15 +490,11 @@ static void acm_read_bulk_callback(struct urb *urb)
int status = urb->status;
bool stopped = false;
bool stalled = false;
+ bool cooldown = false;
dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n",
rb->index, urb->actual_length, status);
- if (!acm->dev) {
- dev_dbg(&acm->data->dev, "%s - disconnected\n", __func__);
- return;
- }
-
switch (status) {
case 0:
usb_mark_last_busy(acm->dev);
@@ -498,6 +512,15 @@ static void acm_read_bulk_callback(struct urb *urb)
__func__, status);
stopped = true;
break;
+ case -EOVERFLOW:
+ case -EPROTO:
+ dev_dbg(&acm->data->dev,
+ "%s - cooling babbling device\n", __func__);
+ usb_mark_last_busy(acm->dev);
+ set_bit(rb->index, &acm->urbs_in_error_delay);
+ set_bit(ACM_ERROR_DELAY, &acm->flags);
+ cooldown = true;
+ break;
default:
dev_dbg(&acm->data->dev,
"%s - nonzero urb status received: %d\n",
@@ -519,9 +542,11 @@ static void acm_read_bulk_callback(struct urb *urb)
*/
smp_mb__after_atomic();
- if (stopped || stalled) {
+ if (stopped || stalled || cooldown) {
if (stalled)
- schedule_work(&acm->work);
+ schedule_delayed_work(&acm->dwork, 0);
+ else if (cooldown)
+ schedule_delayed_work(&acm->dwork, HZ / 2);
return;
}
@@ -554,23 +579,29 @@ static void acm_write_bulk(struct urb *urb)
acm_write_done(acm, wb);
spin_unlock_irqrestore(&acm->write_lock, flags);
set_bit(EVENT_TTY_WAKEUP, &acm->flags);
- schedule_work(&acm->work);
+ schedule_delayed_work(&acm->dwork, 0);
}
static void acm_softint(struct work_struct *work)
{
int i;
- struct acm *acm = container_of(work, struct acm, work);
+ struct acm *acm = container_of(work, struct acm, dwork.work);
if (test_bit(EVENT_RX_STALL, &acm->flags)) {
- if (!(usb_autopm_get_interface(acm->data))) {
+ smp_mb(); /* against acm_suspend() */
+ if (!acm->susp_count) {
for (i = 0; i < acm->rx_buflimit; i++)
usb_kill_urb(acm->read_urbs[i]);
usb_clear_halt(acm->dev, acm->in);
acm_submit_read_urbs(acm, GFP_KERNEL);
- usb_autopm_put_interface(acm->data);
+ clear_bit(EVENT_RX_STALL, &acm->flags);
}
- clear_bit(EVENT_RX_STALL, &acm->flags);
+ }
+
+ if (test_and_clear_bit(ACM_ERROR_DELAY, &acm->flags)) {
+ for (i = 0; i < acm->rx_buflimit; i++)
+ if (test_and_clear_bit(i, &acm->urbs_in_error_delay))
+ acm_submit_read_urb(acm, i, GFP_KERNEL);
}
if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags))
@@ -633,7 +664,8 @@ static void acm_port_dtr_rts(struct tty_port *port, int raise)
res = acm_set_control(acm, val);
if (res && (acm->ctrl_caps & USB_CDC_CAP_LINE))
- dev_err(&acm->control->dev, "failed to set dtr/rts\n");
+ /* This is broken in too many devices to spam the logs */
+ dev_dbg(&acm->control->dev, "failed to set dtr/rts\n");
}
static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
@@ -718,6 +750,7 @@ static void acm_port_shutdown(struct tty_port *port)
* Need to grab write_lock to prevent race with resume, but no need to
* hold it due to the tty-port initialised flag.
*/
+ acm_poison_urbs(acm);
spin_lock_irq(&acm->write_lock);
spin_unlock_irq(&acm->write_lock);
@@ -734,7 +767,8 @@ static void acm_port_shutdown(struct tty_port *port)
usb_autopm_put_interface_async(acm->control);
}
- acm_kill_urbs(acm);
+ acm_unpoison_urbs(acm);
+
}
static void acm_tty_cleanup(struct tty_struct *tty)
@@ -953,8 +987,6 @@ static int set_serial_info(struct acm *acm,
if ((new_serial.close_delay != old_close_delay) ||
(new_serial.closing_wait != old_closing_wait))
retval = -EPERM;
- else
- retval = -EOPNOTSUPP;
} else {
acm->port.close_delay = close_delay;
acm->port.closing_wait = closing_wait;
@@ -1255,9 +1287,21 @@ static int acm_probe(struct usb_interface *intf,
}
}
} else {
+ int class = -1;
+
data_intf_num = union_header->bSlaveInterface0;
control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0);
data_interface = usb_ifnum_to_if(usb_dev, data_intf_num);
+
+ if (control_interface)
+ class = control_interface->cur_altsetting->desc.bInterfaceClass;
+
+ if (class != USB_CLASS_COMM && class != USB_CLASS_CDC_DATA) {
+ dev_dbg(&intf->dev, "Broken union descriptor, assuming single interface\n");
+ combined_interfaces = 1;
+ control_interface = data_interface = intf;
+ goto look_for_collapsed_interface;
+ }
}
if (!control_interface || !data_interface) {
@@ -1364,7 +1408,7 @@ made_compressed_probe:
acm->ctrlsize = ctrlsize;
acm->readsize = readsize;
acm->rx_buflimit = num_rx_buf;
- INIT_WORK(&acm->work, acm_softint);
+ INIT_DELAYED_WORK(&acm->dwork, acm_softint);
init_waitqueue_head(&acm->wioctl);
spin_lock_init(&acm->write_lock);
spin_lock_init(&acm->read_lock);
@@ -1515,12 +1559,16 @@ skip_countries:
return 0;
alloc_fail6:
+ if (!acm->combined_interfaces) {
+ /* Clear driver data so that disconnect() returns early. */
+ usb_set_intfdata(data_interface, NULL);
+ usb_driver_release_interface(&acm_driver, data_interface);
+ }
if (acm->country_codes) {
device_remove_file(&acm->control->dev,
&dev_attr_wCountryCodes);
device_remove_file(&acm->control->dev,
&dev_attr_iCountryCodeRelDate);
- kfree(acm->country_codes);
}
device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
alloc_fail5:
@@ -1552,8 +1600,14 @@ static void acm_disconnect(struct usb_interface *intf)
if (!acm)
return;
- mutex_lock(&acm->mutex);
acm->disconnected = true;
+ /*
+ * there is a circular dependency. acm_softint() can resubmit
+ * the URBs in error handling so we need to block any
+ * submission right away
+ */
+ acm_poison_urbs(acm);
+ mutex_lock(&acm->mutex);
if (acm->country_codes) {
device_remove_file(&acm->control->dev,
&dev_attr_wCountryCodes);
@@ -1572,8 +1626,7 @@ static void acm_disconnect(struct usb_interface *intf)
tty_kref_put(tty);
}
- acm_kill_urbs(acm);
- cancel_work_sync(&acm->work);
+ cancel_delayed_work_sync(&acm->dwork);
tty_unregister_device(acm_tty_driver, acm->minor);
@@ -1614,8 +1667,9 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
if (cnt)
return 0;
- acm_kill_urbs(acm);
- cancel_work_sync(&acm->work);
+ acm_poison_urbs(acm);
+ cancel_delayed_work_sync(&acm->dwork);
+ acm->urbs_in_error_delay = 0;
return 0;
}
@@ -1631,6 +1685,8 @@ static int acm_resume(struct usb_interface *intf)
if (--acm->susp_count)
goto out;
+ acm_unpoison_urbs(acm);
+
if (tty_port_initialized(&acm->port)) {
rv = usb_submit_urb(acm->ctrlurb, GFP_ATOMIC);
@@ -1695,6 +1751,8 @@ static int acm_pre_reset(struct usb_interface *intf)
static const struct usb_device_id acm_ids[] = {
/* quirky and broken devices */
+ { USB_DEVICE(0x0424, 0x274e), /* Microchip Technology, Inc. (formerly SMSC) */
+ .driver_info = DISABLE_ECHO, }, /* DISABLE ECHO in termios flag */
{ USB_DEVICE(0x076d, 0x0006), /* Denso Cradle CU-321 */
.driver_info = NO_UNION_NORMAL, },/* has no union descriptor */
{ USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */
@@ -1702,6 +1760,15 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
+ { USB_DEVICE(0x045b, 0x023c), /* Renesas USB Download mode */
+ .driver_info = DISABLE_ECHO, /* Don't echo banner */
+ },
+ { USB_DEVICE(0x045b, 0x0248), /* Renesas USB Download mode */
+ .driver_info = DISABLE_ECHO, /* Don't echo banner */
+ },
+ { USB_DEVICE(0x045b, 0x024D), /* Renesas USB Download mode */
+ .driver_info = DISABLE_ECHO, /* Don't echo banner */
+ },
{ USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
@@ -1894,6 +1961,10 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x04d8, 0x0083), /* Bootloader mode */
.driver_info = IGNORE_DEVICE,
},
+
+ { USB_DEVICE(0x04d8, 0xf58b),
+ .driver_info = IGNORE_DEVICE,
+ },
#endif
/*Samsung phone in firmware update mode */
@@ -1906,6 +1977,17 @@ static const struct usb_device_id acm_ids[] = {
.driver_info = IGNORE_DEVICE,
},
+ /* Exclude ETAS ES58x */
+ { USB_DEVICE(0x108c, 0x0159), /* ES581.4 */
+ .driver_info = IGNORE_DEVICE,
+ },
+ { USB_DEVICE(0x108c, 0x0168), /* ES582.1 */
+ .driver_info = IGNORE_DEVICE,
+ },
+ { USB_DEVICE(0x108c, 0x0169), /* ES584.1 */
+ .driver_info = IGNORE_DEVICE,
+ },
+
{ USB_DEVICE(0x1bc7, 0x0021), /* Telit 3G ACM only composition */
.driver_info = SEND_ZERO_PACKET,
},
@@ -1913,6 +1995,11 @@ static const struct usb_device_id acm_ids[] = {
.driver_info = SEND_ZERO_PACKET,
},
+ /* Exclude Goodix Fingerprint Reader */
+ { USB_DEVICE(0x27c6, 0x5395),
+ .driver_info = IGNORE_DEVICE,
+ },
+
/* control interfaces without any protocol set */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_PROTO_NONE) },
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 515aad0847ee..d8f8651425c4 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -108,8 +108,10 @@ struct acm {
unsigned long flags;
# define EVENT_TTY_WAKEUP 0
# define EVENT_RX_STALL 1
+# define ACM_ERROR_DELAY 3
+ unsigned long urbs_in_error_delay; /* these need to be restarted after a delay */
struct usb_cdc_line_coding line; /* bits, stop, parity */
- struct work_struct work; /* work queue entry for line discipline waking up */
+ struct delayed_work dwork; /* work queue entry for various purposes */
unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */
unsigned int ctrlout; /* output control lines (DTR, RTS) */
struct async_icount iocount; /* counters for control line changes */
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 4929c5883068..48e775fff16d 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -58,6 +58,9 @@ MODULE_DEVICE_TABLE (usb, wdm_ids);
#define WDM_MAX 16
+/* we cannot wait forever at flush() */
+#define WDM_FLUSH_TIMEOUT (30 * HZ)
+
/* CDC-WMC r1.1 requires wMaxCommand to be "at least 256 decimal (0x100)" */
#define WDM_DEFAULT_BUFSIZE 256
@@ -151,7 +154,7 @@ static void wdm_out_callback(struct urb *urb)
kfree(desc->outbuf);
desc->outbuf = NULL;
clear_bit(WDM_IN_USE, &desc->flags);
- wake_up(&desc->wait);
+ wake_up_all(&desc->wait);
}
static void wdm_in_callback(struct urb *urb)
@@ -318,12 +321,23 @@ exit:
}
-static void kill_urbs(struct wdm_device *desc)
+static void poison_urbs(struct wdm_device *desc)
{
/* the order here is essential */
- usb_kill_urb(desc->command);
- usb_kill_urb(desc->validity);
- usb_kill_urb(desc->response);
+ usb_poison_urb(desc->command);
+ usb_poison_urb(desc->validity);
+ usb_poison_urb(desc->response);
+}
+
+static void unpoison_urbs(struct wdm_device *desc)
+{
+ /*
+ * the order here is not essential
+ * it is symmetrical just to be nice
+ */
+ usb_unpoison_urb(desc->response);
+ usb_unpoison_urb(desc->validity);
+ usb_unpoison_urb(desc->command);
}
static void free_urbs(struct wdm_device *desc)
@@ -393,6 +407,9 @@ static ssize_t wdm_write
if (test_bit(WDM_RESETTING, &desc->flags))
r = -EIO;
+ if (test_bit(WDM_DISCONNECTING, &desc->flags))
+ r = -ENODEV;
+
if (r < 0) {
rv = r;
goto out_free_mem_pm;
@@ -424,6 +441,7 @@ static ssize_t wdm_write
if (rv < 0) {
desc->outbuf = NULL;
clear_bit(WDM_IN_USE, &desc->flags);
+ wake_up_all(&desc->wait); /* for wdm_wait_for_response() */
dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv);
rv = usb_translate_errors(rv);
goto out_free_mem_pm;
@@ -458,13 +476,23 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
if (!desc->resp_count || !--desc->resp_count)
goto out;
+ if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
+ rv = -ENODEV;
+ goto out;
+ }
+ if (test_bit(WDM_RESETTING, &desc->flags)) {
+ rv = -EIO;
+ goto out;
+ }
+
set_bit(WDM_RESPONDING, &desc->flags);
spin_unlock_irq(&desc->iuspin);
rv = usb_submit_urb(desc->response, GFP_KERNEL);
spin_lock_irq(&desc->iuspin);
if (rv) {
- dev_err(&desc->intf->dev,
- "usb_submit_urb failed with result %d\n", rv);
+ if (!test_bit(WDM_DISCONNECTING, &desc->flags))
+ dev_err(&desc->intf->dev,
+ "usb_submit_urb failed with result %d\n", rv);
/* make sure the next notification trigger a submit */
clear_bit(WDM_RESPONDING, &desc->flags);
@@ -583,28 +611,58 @@ err:
return rv;
}
-static int wdm_flush(struct file *file, fl_owner_t id)
+static int wdm_wait_for_response(struct file *file, long timeout)
{
struct wdm_device *desc = file->private_data;
+ long rv; /* Use long here because (int) MAX_SCHEDULE_TIMEOUT < 0. */
+
+ /*
+ * Needs both flags. We cannot do with one because resetting it would
+ * cause a race with write() yet we need to signal a disconnect.
+ */
+ rv = wait_event_interruptible_timeout(desc->wait,
+ !test_bit(WDM_IN_USE, &desc->flags) ||
+ test_bit(WDM_DISCONNECTING, &desc->flags),
+ timeout);
- wait_event(desc->wait,
- /*
- * needs both flags. We cannot do with one
- * because resetting it would cause a race
- * with write() yet we need to signal
- * a disconnect
- */
- !test_bit(WDM_IN_USE, &desc->flags) ||
- test_bit(WDM_DISCONNECTING, &desc->flags));
-
- /* cannot dereference desc->intf if WDM_DISCONNECTING */
+ /*
+ * To report the correct error. This is best effort.
+ * We are inevitably racing with the hardware.
+ */
if (test_bit(WDM_DISCONNECTING, &desc->flags))
return -ENODEV;
- if (desc->werr < 0)
- dev_err(&desc->intf->dev, "Error in flush path: %d\n",
- desc->werr);
+ if (!rv)
+ return -EIO;
+ if (rv < 0)
+ return -EINTR;
- return usb_translate_errors(desc->werr);
+ spin_lock_irq(&desc->iuspin);
+ rv = desc->werr;
+ desc->werr = 0;
+ spin_unlock_irq(&desc->iuspin);
+
+ return usb_translate_errors(rv);
+
+}
+
+/*
+ * You need to send a signal when you react to malicious or defective hardware.
+ * Also, don't abort when fsync() returned -EINVAL, for older kernels which do
+ * not implement wdm_flush() will return -EINVAL.
+ */
+static int wdm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+{
+ return wdm_wait_for_response(file, MAX_SCHEDULE_TIMEOUT);
+}
+
+/*
+ * Same with wdm_fsync(), except it uses finite timeout in order to react to
+ * malicious or defective hardware which ceased communication after close() was
+ * implicitly called due to process termination.
+ */
+static int wdm_flush(struct file *file, fl_owner_t id)
+{
+ return wdm_wait_for_response(file, WDM_FLUSH_TIMEOUT);
}
static __poll_t wdm_poll(struct file *file, struct poll_table_struct *wait)
@@ -694,11 +752,12 @@ static int wdm_release(struct inode *inode, struct file *file)
if (!desc->count) {
if (!test_bit(WDM_DISCONNECTING, &desc->flags)) {
dev_dbg(&desc->intf->dev, "wdm_release: cleanup\n");
- kill_urbs(desc);
+ poison_urbs(desc);
spin_lock_irq(&desc->iuspin);
desc->resp_count = 0;
spin_unlock_irq(&desc->iuspin);
desc->manage_power(desc->intf, 0);
+ unpoison_urbs(desc);
} else {
/* must avoid dev_printk here as desc->intf is invalid */
pr_debug(KBUILD_MODNAME " %s: device gone - cleaning up\n", __func__);
@@ -729,6 +788,7 @@ static const struct file_operations wdm_fops = {
.owner = THIS_MODULE,
.read = wdm_read,
.write = wdm_write,
+ .fsync = wdm_fsync,
.open = wdm_open,
.flush = wdm_flush,
.release = wdm_release,
@@ -988,7 +1048,7 @@ static void wdm_disconnect(struct usb_interface *intf)
wake_up_all(&desc->wait);
mutex_lock(&desc->rlock);
mutex_lock(&desc->wlock);
- kill_urbs(desc);
+ poison_urbs(desc);
cancel_work_sync(&desc->rxwork);
cancel_work_sync(&desc->service_outs_intr);
mutex_unlock(&desc->wlock);
@@ -1031,9 +1091,10 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
set_bit(WDM_SUSPENDING, &desc->flags);
spin_unlock_irq(&desc->iuspin);
/* callback submits work - order is essential */
- kill_urbs(desc);
+ poison_urbs(desc);
cancel_work_sync(&desc->rxwork);
cancel_work_sync(&desc->service_outs_intr);
+ unpoison_urbs(desc);
}
if (!PMSG_IS_AUTO(message)) {
mutex_unlock(&desc->wlock);
@@ -1091,7 +1152,7 @@ static int wdm_pre_reset(struct usb_interface *intf)
wake_up_all(&desc->wait);
mutex_lock(&desc->rlock);
mutex_lock(&desc->wlock);
- kill_urbs(desc);
+ poison_urbs(desc);
cancel_work_sync(&desc->rxwork);
cancel_work_sync(&desc->service_outs_intr);
return 0;
@@ -1102,6 +1163,7 @@ static int wdm_post_reset(struct usb_interface *intf)
struct wdm_device *desc = wdm_find_device(intf);
int rv;
+ unpoison_urbs(desc);
clear_bit(WDM_OVERFLOW, &desc->flags);
clear_bit(WDM_RESETTING, &desc->flags);
rv = recover_from_urb_loss(desc);
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 4a80103675d5..5edf52a6edfc 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -274,8 +274,25 @@ static int usblp_ctrl_msg(struct usblp *usblp, int request, int type, int dir, i
#define usblp_reset(usblp)\
usblp_ctrl_msg(usblp, USBLP_REQ_RESET, USB_TYPE_CLASS, USB_DIR_OUT, USB_RECIP_OTHER, 0, NULL, 0)
-#define usblp_hp_channel_change_request(usblp, channel, buffer) \
- usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST, USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE, channel, buffer, 1)
+static int usblp_hp_channel_change_request(struct usblp *usblp, int channel, u8 *new_channel)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kzalloc(1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST,
+ USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE,
+ channel, buf, 1);
+ if (ret == 0)
+ *new_channel = buf[0];
+
+ kfree(buf);
+
+ return ret;
+}
/*
* See the description for usblp_select_alts() below for the usage
@@ -468,7 +485,8 @@ static int usblp_release(struct inode *inode, struct file *file)
usb_autopm_put_interface(usblp->intf);
if (!usblp->present) /* finish cleanup from disconnect */
- usblp_cleanup(usblp);
+ usblp_cleanup(usblp); /* any URBs must be dead */
+
mutex_unlock(&usblp_mutex);
return 0;
}
@@ -476,16 +494,24 @@ static int usblp_release(struct inode *inode, struct file *file)
/* No kernel lock - fine */
static __poll_t usblp_poll(struct file *file, struct poll_table_struct *wait)
{
- __poll_t ret;
+ struct usblp *usblp = file->private_data;
+ __poll_t ret = 0;
unsigned long flags;
- struct usblp *usblp = file->private_data;
/* Should we check file->f_mode & FMODE_WRITE before poll_wait()? */
poll_wait(file, &usblp->rwait, wait);
poll_wait(file, &usblp->wwait, wait);
+
+ mutex_lock(&usblp->mut);
+ if (!usblp->present)
+ ret |= EPOLLHUP;
+ mutex_unlock(&usblp->mut);
+
spin_lock_irqsave(&usblp->lock, flags);
- ret = ((usblp->bidir && usblp->rcomplete) ? EPOLLIN | EPOLLRDNORM : 0) |
- ((usblp->no_paper || usblp->wcomplete) ? EPOLLOUT | EPOLLWRNORM : 0);
+ if (usblp->bidir && usblp->rcomplete)
+ ret |= EPOLLIN | EPOLLRDNORM;
+ if (usblp->no_paper || usblp->wcomplete)
+ ret |= EPOLLOUT | EPOLLWRNORM;
spin_unlock_irqrestore(&usblp->lock, flags);
return ret;
}
@@ -826,6 +852,11 @@ static ssize_t usblp_read(struct file *file, char __user *buffer, size_t len, lo
if (rv < 0)
return rv;
+ if (!usblp->present) {
+ count = -ENODEV;
+ goto done;
+ }
+
if ((avail = usblp->rstatus) < 0) {
printk(KERN_ERR "usblp%d: error %d reading from printer\n",
usblp->minor, (int)avail);
@@ -1304,14 +1335,17 @@ static int usblp_set_protocol(struct usblp *usblp, int protocol)
if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL)
return -EINVAL;
- alts = usblp->protocol[protocol].alt_setting;
- if (alts < 0)
- return -EINVAL;
- r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
- if (r < 0) {
- printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
- alts, usblp->ifnum);
- return r;
+ /* Don't unnecessarily set the interface if there's a single alt. */
+ if (usblp->intf->num_altsetting > 1) {
+ alts = usblp->protocol[protocol].alt_setting;
+ if (alts < 0)
+ return -EINVAL;
+ r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
+ if (r < 0) {
+ printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
+ alts, usblp->ifnum);
+ return r;
+ }
}
usblp->bidir = (usblp->protocol[protocol].epread != NULL);
@@ -1375,9 +1409,11 @@ static void usblp_disconnect(struct usb_interface *intf)
usblp_unlink_urbs(usblp);
mutex_unlock(&usblp->mut);
+ usb_poison_anchored_urbs(&usblp->urbs);
if (!usblp->used)
usblp_cleanup(usblp);
+
mutex_unlock(&usblp_mutex);
}
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 00204824bffd..a45443482e0b 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -463,11 +463,11 @@ static void snoop_urb(struct usb_device *udev,
if (userurb) { /* Async */
if (when == SUBMIT)
- dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, "
+ dev_info(&udev->dev, "userurb %px, ep%d %s-%s, "
"length %u\n",
userurb, ep, t, d, length);
else
- dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, "
+ dev_info(&udev->dev, "userurb %px, ep%d %s-%s, "
"actual_length %u status %d\n",
userurb, ep, t, d, length,
timeout_or_status);
@@ -1189,7 +1189,12 @@ static int proc_bulk(struct usb_dev_state *ps, void __user *arg)
ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb));
if (ret)
return ret;
- tbuf = kmalloc(len1, GFP_KERNEL);
+
+ /*
+ * len1 can be almost arbitrarily large. Don't WARN if it's
+ * too big, just fail the request.
+ */
+ tbuf = kmalloc(len1, GFP_KERNEL | __GFP_NOWARN);
if (!tbuf) {
ret = -ENOMEM;
goto done;
@@ -1631,7 +1636,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
if (num_sgs) {
as->urb->sg = kmalloc_array(num_sgs,
sizeof(struct scatterlist),
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_NOWARN);
if (!as->urb->sg) {
ret = -ENOMEM;
goto error;
@@ -1666,7 +1671,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
(uurb_start - as->usbm->vm_start);
} else {
as->urb->transfer_buffer = kmalloc(uurb->buffer_length,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_NOWARN);
if (!as->urb->transfer_buffer) {
ret = -ENOMEM;
goto error;
@@ -1927,7 +1932,7 @@ static int proc_reapurb(struct usb_dev_state *ps, void __user *arg)
if (as) {
int retval;
- snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
+ snoop(&ps->dev->dev, "reap %px\n", as->userurb);
retval = processcompl(as, (void __user * __user *)arg);
free_async(as);
return retval;
@@ -1944,7 +1949,7 @@ static int proc_reapurbnonblock(struct usb_dev_state *ps, void __user *arg)
as = async_getcompleted(ps);
if (as) {
- snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
+ snoop(&ps->dev->dev, "reap %px\n", as->userurb);
retval = processcompl(as, (void __user * __user *)arg);
free_async(as);
} else {
@@ -2070,7 +2075,7 @@ static int proc_reapurb_compat(struct usb_dev_state *ps, void __user *arg)
if (as) {
int retval;
- snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
+ snoop(&ps->dev->dev, "reap %px\n", as->userurb);
retval = processcompl_compat(as, (void __user * __user *)arg);
free_async(as);
return retval;
@@ -2087,7 +2092,7 @@ static int proc_reapurbnonblock_compat(struct usb_dev_state *ps, void __user *ar
as = async_getcompleted(ps);
if (as) {
- snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
+ snoop(&ps->dev->dev, "reap %px\n", as->userurb);
retval = processcompl_compat(as, (void __user * __user *)arg);
free_async(as);
} else {
@@ -2512,7 +2517,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
#endif
case USBDEVFS_DISCARDURB:
- snoop(&dev->dev, "%s: DISCARDURB %pK\n", __func__, p);
+ snoop(&dev->dev, "%s: DISCARDURB %px\n", __func__, p);
ret = proc_unlinkurb(ps, p);
break;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 8cf2d2a5e266..0ddc2e30065f 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -37,6 +37,7 @@
#define USB_VENDOR_GENESYS_LOGIC 0x05e3
#define USB_VENDOR_SMSC 0x0424
+#define USB_PRODUCT_USB5534B 0x5534
#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
#define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02
@@ -1196,6 +1197,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
#ifdef CONFIG_PM
udev->reset_resume = 1;
#endif
+ /* Don't set the change_bits when the device
+ * was powered off.
+ */
+ if (test_bit(port1, hub->power_bits))
+ set_bit(port1, hub->change_bits);
} else {
/* The power session is gone; tell hub_wq */
@@ -3051,6 +3057,15 @@ static int check_port_resume_type(struct usb_device *udev,
if (portchange & USB_PORT_STAT_C_ENABLE)
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_ENABLE);
+
+ /*
+ * Whatever made this reset-resume necessary may have
+ * turned on the port1 bit in hub->change_bits. But after
+ * a successful reset-resume we want the bit to be clear;
+ * if it was on it would indicate that something happened
+ * following the reset-resume.
+ */
+ clear_bit(port1, hub->change_bits);
}
return status;
@@ -3524,9 +3539,6 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
* sequence.
*/
status = hub_port_status(hub, port1, &portstatus, &portchange);
-
- /* TRSMRCY = 10 msec */
- msleep(10);
}
SuspendCleared:
@@ -3541,6 +3553,9 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_SUSPEND);
}
+
+ /* TRSMRCY = 10 msec */
+ msleep(10);
}
if (udev->persist_enabled)
@@ -5420,8 +5435,11 @@ out_hdev_lock:
}
static const struct usb_device_id hub_id_table[] = {
- { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_CLASS,
+ { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
+ | USB_DEVICE_ID_MATCH_PRODUCT
+ | USB_DEVICE_ID_MATCH_INT_CLASS,
.idVendor = USB_VENDOR_SMSC,
+ .idProduct = USB_PRODUCT_USB5534B,
.bInterfaceClass = USB_CLASS_HUB,
.driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
{ .match_flags = USB_DEVICE_ID_MATCH_VENDOR
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index d0bbbd76ba8e..df3aa0b69188 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -146,8 +146,10 @@ static inline unsigned hub_power_on_good_delay(struct usb_hub *hub)
{
unsigned delay = hub->descriptor->bPwrOn2PwrGood * 2;
- /* Wait at least 100 msec for power to become stable */
- return max(delay, 100U);
+ if (!hub->hdev->parent) /* root hub */
+ return delay;
+ else /* Wait at least 100 msec for power to become stable */
+ return max(delay, 100U);
}
static inline int hub_port_debounce_be_connected(struct usb_hub *hub,
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 0d3fd2083165..152228d33ad2 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -588,12 +588,13 @@ void usb_sg_cancel(struct usb_sg_request *io)
int i, retval;
spin_lock_irqsave(&io->lock, flags);
- if (io->status) {
+ if (io->status || io->count == 0) {
spin_unlock_irqrestore(&io->lock, flags);
return;
}
/* shut everything down */
io->status = -ECONNRESET;
+ io->count++; /* Keep the request alive until we're done */
spin_unlock_irqrestore(&io->lock, flags);
for (i = io->entries - 1; i >= 0; --i) {
@@ -607,6 +608,12 @@ void usb_sg_cancel(struct usb_sg_request *io)
dev_warn(&io->dev->dev, "%s, unlink --> %d\n",
__func__, retval);
}
+
+ spin_lock_irqsave(&io->lock, flags);
+ io->count--;
+ if (!io->count)
+ complete(&io->complete);
+ spin_unlock_irqrestore(&io->lock, flags);
}
EXPORT_SYMBOL_GPL(usb_sg_cancel);
@@ -1136,11 +1143,11 @@ void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr,
if (usb_endpoint_out(epaddr)) {
ep = dev->ep_out[epnum];
- if (reset_hardware)
+ if (reset_hardware && epnum != 0)
dev->ep_out[epnum] = NULL;
} else {
ep = dev->ep_in[epnum];
- if (reset_hardware)
+ if (reset_hardware && epnum != 0)
dev->ep_in[epnum] = NULL;
}
if (ep) {
@@ -1197,6 +1204,34 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf,
}
}
+/*
+ * usb_disable_device_endpoints -- Disable all endpoints for a device
+ * @dev: the device whose endpoints are being disabled
+ * @skip_ep0: 0 to disable endpoint 0, 1 to skip it.
+ */
+static void usb_disable_device_endpoints(struct usb_device *dev, int skip_ep0)
+{
+ struct usb_hcd *hcd = bus_to_hcd(dev->bus);
+ int i;
+
+ if (hcd->driver->check_bandwidth) {
+ /* First pass: Cancel URBs, leave endpoint pointers intact. */
+ for (i = skip_ep0; i < 16; ++i) {
+ usb_disable_endpoint(dev, i, false);
+ usb_disable_endpoint(dev, i + USB_DIR_IN, false);
+ }
+ /* Remove endpoints from the host controller internal state */
+ mutex_lock(hcd->bandwidth_mutex);
+ usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
+ mutex_unlock(hcd->bandwidth_mutex);
+ }
+ /* Second pass: remove endpoint pointers */
+ for (i = skip_ep0; i < 16; ++i) {
+ usb_disable_endpoint(dev, i, true);
+ usb_disable_endpoint(dev, i + USB_DIR_IN, true);
+ }
+}
+
/**
* usb_disable_device - Disable all the endpoints for a USB device
* @dev: the device whose endpoints are being disabled
@@ -1210,7 +1245,6 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf,
void usb_disable_device(struct usb_device *dev, int skip_ep0)
{
int i;
- struct usb_hcd *hcd = bus_to_hcd(dev->bus);
/* getting rid of interfaces will disconnect
* any drivers bound to them (a key side effect)
@@ -1256,22 +1290,8 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__,
skip_ep0 ? "non-ep0" : "all");
- if (hcd->driver->check_bandwidth) {
- /* First pass: Cancel URBs, leave endpoint pointers intact. */
- for (i = skip_ep0; i < 16; ++i) {
- usb_disable_endpoint(dev, i, false);
- usb_disable_endpoint(dev, i + USB_DIR_IN, false);
- }
- /* Remove endpoints from the host controller internal state */
- mutex_lock(hcd->bandwidth_mutex);
- usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
- mutex_unlock(hcd->bandwidth_mutex);
- /* Second pass: remove endpoint pointers */
- }
- for (i = skip_ep0; i < 16; ++i) {
- usb_disable_endpoint(dev, i, true);
- usb_disable_endpoint(dev, i + USB_DIR_IN, true);
- }
+
+ usb_disable_device_endpoints(dev, skip_ep0);
}
/**
@@ -1514,6 +1534,9 @@ EXPORT_SYMBOL_GPL(usb_set_interface);
* The caller must own the device lock.
*
* Return: Zero on success, else a negative error code.
+ *
+ * If this routine fails the device will probably be in an unusable state
+ * with endpoints disabled, and interfaces only partially enabled.
*/
int usb_reset_configuration(struct usb_device *dev)
{
@@ -1529,10 +1552,7 @@ int usb_reset_configuration(struct usb_device *dev)
* calls during probe() are fine
*/
- for (i = 1; i < 16; ++i) {
- usb_disable_endpoint(dev, i, true);
- usb_disable_endpoint(dev, i + USB_DIR_IN, true);
- }
+ usb_disable_device_endpoints(dev, 1); /* skip ep0*/
config = dev->actconfig;
retval = 0;
@@ -1545,34 +1565,10 @@ int usb_reset_configuration(struct usb_device *dev)
mutex_unlock(hcd->bandwidth_mutex);
return -ENOMEM;
}
- /* Make sure we have enough bandwidth for each alternate setting 0 */
- for (i = 0; i < config->desc.bNumInterfaces; i++) {
- struct usb_interface *intf = config->interface[i];
- struct usb_host_interface *alt;
- alt = usb_altnum_to_altsetting(intf, 0);
- if (!alt)
- alt = &intf->altsetting[0];
- if (alt != intf->cur_altsetting)
- retval = usb_hcd_alloc_bandwidth(dev, NULL,
- intf->cur_altsetting, alt);
- if (retval < 0)
- break;
- }
- /* If not, reinstate the old alternate settings */
+ /* xHCI adds all endpoints in usb_hcd_alloc_bandwidth */
+ retval = usb_hcd_alloc_bandwidth(dev, config, NULL, NULL);
if (retval < 0) {
-reset_old_alts:
- for (i--; i >= 0; i--) {
- struct usb_interface *intf = config->interface[i];
- struct usb_host_interface *alt;
-
- alt = usb_altnum_to_altsetting(intf, 0);
- if (!alt)
- alt = &intf->altsetting[0];
- if (alt != intf->cur_altsetting)
- usb_hcd_alloc_bandwidth(dev, NULL,
- alt, intf->cur_altsetting);
- }
usb_enable_lpm(dev);
mutex_unlock(hcd->bandwidth_mutex);
return retval;
@@ -1581,8 +1577,12 @@ reset_old_alts:
USB_REQ_SET_CONFIGURATION, 0,
config->desc.bConfigurationValue, 0,
NULL, 0, USB_CTRL_SET_TIMEOUT);
- if (retval < 0)
- goto reset_old_alts;
+ if (retval < 0) {
+ usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
+ usb_enable_lpm(dev);
+ mutex_unlock(hcd->bandwidth_mutex);
+ return retval;
+ }
mutex_unlock(hcd->bandwidth_mutex);
/* re-init hc/hcd interface/endpoint state */
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index da30b5664ff3..f6a6c54cba35 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -25,17 +25,23 @@ static unsigned int quirk_count;
static char quirks_param[128];
-static int quirks_param_set(const char *val, const struct kernel_param *kp)
+static int quirks_param_set(const char *value, const struct kernel_param *kp)
{
- char *p, *field;
+ char *val, *p, *field;
u16 vid, pid;
u32 flags;
size_t i;
int err;
+ val = kstrdup(value, GFP_KERNEL);
+ if (!val)
+ return -ENOMEM;
+
err = param_set_copystring(val, kp);
- if (err)
+ if (err) {
+ kfree(val);
return err;
+ }
mutex_lock(&quirk_mutex);
@@ -60,10 +66,11 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp)
if (!quirk_list) {
quirk_count = 0;
mutex_unlock(&quirk_mutex);
+ kfree(val);
return -ENOMEM;
}
- for (i = 0, p = (char *)val; p && *p;) {
+ for (i = 0, p = val; p && *p;) {
/* Each entry consists of VID:PID:flags */
field = strsep(&p, ":");
if (!field)
@@ -144,6 +151,7 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp)
unlock:
mutex_unlock(&quirk_mutex);
+ kfree(val);
return 0;
}
@@ -218,11 +226,12 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Logitech HD Webcam C270 */
{ USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME },
- /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
+ /* Logitech HD Pro Webcams C920, C920-C, C922, C925e and C930e */
{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT },
+ { USB_DEVICE(0x046d, 0x085c), .driver_info = USB_QUIRK_DELAY_INIT },
/* Logitech ConferenceCam CC3000e */
{ USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
@@ -333,12 +342,19 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x06a3, 0x0006), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
+ /* Agfa SNAPSCAN 1212U */
+ { USB_DEVICE(0x06bd, 0x0001), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Guillemot Webcam Hercules Dualpix Exchange (2nd ID) */
{ USB_DEVICE(0x06f8, 0x0804), .driver_info = USB_QUIRK_RESET_RESUME },
/* Guillemot Webcam Hercules Dualpix Exchange*/
{ USB_DEVICE(0x06f8, 0x3005), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Guillemot Hercules DJ Console audio card (BZ 208357) */
+ { USB_DEVICE(0x06f8, 0xb000), .driver_info =
+ USB_QUIRK_ENDPOINT_BLACKLIST },
+
/* Midiman M-Audio Keystation 88es */
{ USB_DEVICE(0x0763, 0x0192), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -361,13 +377,23 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0926, 0x0202), .driver_info =
USB_QUIRK_ENDPOINT_BLACKLIST },
+ /* Sound Devices MixPre-D */
+ { USB_DEVICE(0x0926, 0x0208), .driver_info =
+ USB_QUIRK_ENDPOINT_BLACKLIST },
+
/* Keytouch QWERTY Panel keyboard */
{ USB_DEVICE(0x0926, 0x3333), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
+ /* Kingston DataTraveler 3.0 */
+ { USB_DEVICE(0x0951, 0x1666), .driver_info = USB_QUIRK_NO_LPM },
+
/* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
{ USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
+ /* ELMO L-12F document camera */
+ { USB_DEVICE(0x09a1, 0x0028), .driver_info = USB_QUIRK_DELAY_CTRL_MSG },
+
/* Broadcom BCM92035DGROM BT dongle */
{ USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -380,14 +406,22 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Realtek hub in Dell WD19 (Type-C) */
{ USB_DEVICE(0x0bda, 0x0487), .driver_info = USB_QUIRK_NO_LPM },
+ { USB_DEVICE(0x0bda, 0x5487), .driver_info = USB_QUIRK_RESET_RESUME },
/* Generic RTL8153 based ethernet adapters */
{ USB_DEVICE(0x0bda, 0x8153), .driver_info = USB_QUIRK_NO_LPM },
+ /* SONiX USB DEVICE Touchpad */
+ { USB_DEVICE(0x0c45, 0x7056), .driver_info =
+ USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+
/* Action Semiconductor flash disk */
{ USB_DEVICE(0x10d6, 0x2200), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
+ /* novation SoundControl XL */
+ { USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Huawei 4G LTE module */
{ USB_DEVICE(0x12d1, 0x15bb), .driver_info =
USB_QUIRK_DISCONNECT_SUSPEND },
@@ -401,6 +435,13 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x1532, 0x0116), .driver_info =
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+ /* Lenovo ThinkCenter A630Z TI024Gen3 usb-audio */
+ { USB_DEVICE(0x17ef, 0xa012), .driver_info =
+ USB_QUIRK_DISCONNECT_SUSPEND },
+
+ /* Lenovo ThinkPad USB-C Dock Gen2 Ethernet (RTL8153 GigE) */
+ { USB_DEVICE(0x17ef, 0xa387), .driver_info = USB_QUIRK_NO_LPM },
+
/* BUILDWIN Photo Frame */
{ USB_DEVICE(0x1908, 0x1315), .driver_info =
USB_QUIRK_HONOR_BNUMINTERFACES },
@@ -430,6 +471,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Corsair K70 LUX */
{ USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT },
+ /* Corsair K70 RGB RAPDIFIRE */
+ { USB_DEVICE(0x1b1c, 0x1b38), .driver_info = USB_QUIRK_DELAY_INIT |
+ USB_QUIRK_DELAY_CTRL_MSG },
+
/* MIDI keyboard WORLDE MINI */
{ USB_DEVICE(0x1c75, 0x0204), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
@@ -452,15 +497,18 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x2386, 0x3119), .driver_info = USB_QUIRK_NO_LPM },
+ { USB_DEVICE(0x2386, 0x350e), .driver_info = USB_QUIRK_NO_LPM },
+
/* DJI CineSSD */
{ USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
+ /* Fibocom L850-GL LTE Modem */
+ { USB_DEVICE(0x2cb7, 0x0007), .driver_info =
+ USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+
/* INTEL VALUE SSD */
{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
- /* novation SoundControl XL */
- { USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME },
-
{ } /* terminating entry must be last */
};
@@ -495,7 +543,10 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
* Matched for devices with USB_QUIRK_ENDPOINT_BLACKLIST.
*/
static const struct usb_device_id usb_endpoint_blacklist[] = {
+ { USB_DEVICE_INTERFACE_NUMBER(0x06f8, 0xb000, 5), .driver_info = 0x01 },
+ { USB_DEVICE_INTERFACE_NUMBER(0x06f8, 0xb000, 5), .driver_info = 0x81 },
{ USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 },
+ { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0208, 1), .driver_info = 0x85 },
{ }
};
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 7e88fdfe3cf5..b93b18ba89df 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -888,7 +888,11 @@ read_descriptors(struct file *filp, struct kobject *kobj,
size_t srclen, n;
int cfgno;
void *src;
+ int retval;
+ retval = usb_lock_device_interruptible(udev);
+ if (retval < 0)
+ return -EINTR;
/* The binary attribute begins with the device descriptor.
* Following that are the raw descriptor entries for all the
* configurations (config plus subsidiary descriptors).
@@ -913,6 +917,7 @@ read_descriptors(struct file *filp, struct kobject *kobj,
off -= srclen;
}
}
+ usb_unlock_device(udev);
return count - nleft;
}
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 5e844097a9e3..3cd7732c086e 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -773,11 +773,12 @@ void usb_block_urb(struct urb *urb)
EXPORT_SYMBOL_GPL(usb_block_urb);
/**
- * usb_kill_anchored_urbs - cancel transfer requests en masse
+ * usb_kill_anchored_urbs - kill all URBs associated with an anchor
* @anchor: anchor the requests are bound to
*
- * this allows all outstanding URBs to be killed starting
- * from the back of the queue
+ * This kills all outstanding URBs starting from the back of the queue,
+ * with guarantee that no completer callbacks will take place from the
+ * anchor after this function returns.
*
* This routine should not be called by a driver after its disconnect
* method has returned.
@@ -785,20 +786,26 @@ EXPORT_SYMBOL_GPL(usb_block_urb);
void usb_kill_anchored_urbs(struct usb_anchor *anchor)
{
struct urb *victim;
+ int surely_empty;
- spin_lock_irq(&anchor->lock);
- while (!list_empty(&anchor->urb_list)) {
- victim = list_entry(anchor->urb_list.prev, struct urb,
- anchor_list);
- /* we must make sure the URB isn't freed before we kill it*/
- usb_get_urb(victim);
- spin_unlock_irq(&anchor->lock);
- /* this will unanchor the URB */
- usb_kill_urb(victim);
- usb_put_urb(victim);
+ do {
spin_lock_irq(&anchor->lock);
- }
- spin_unlock_irq(&anchor->lock);
+ while (!list_empty(&anchor->urb_list)) {
+ victim = list_entry(anchor->urb_list.prev,
+ struct urb, anchor_list);
+ /* make sure the URB isn't freed before we kill it */
+ usb_get_urb(victim);
+ spin_unlock_irq(&anchor->lock);
+ /* this will unanchor the URB */
+ usb_kill_urb(victim);
+ usb_put_urb(victim);
+ spin_lock_irq(&anchor->lock);
+ }
+ surely_empty = usb_anchor_check_wakeup(anchor);
+
+ spin_unlock_irq(&anchor->lock);
+ cpu_relax();
+ } while (!surely_empty);
}
EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
@@ -817,21 +824,27 @@ EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
void usb_poison_anchored_urbs(struct usb_anchor *anchor)
{
struct urb *victim;
+ int surely_empty;
- spin_lock_irq(&anchor->lock);
- anchor->poisoned = 1;
- while (!list_empty(&anchor->urb_list)) {
- victim = list_entry(anchor->urb_list.prev, struct urb,
- anchor_list);
- /* we must make sure the URB isn't freed before we kill it*/
- usb_get_urb(victim);
- spin_unlock_irq(&anchor->lock);
- /* this will unanchor the URB */
- usb_poison_urb(victim);
- usb_put_urb(victim);
+ do {
spin_lock_irq(&anchor->lock);
- }
- spin_unlock_irq(&anchor->lock);
+ anchor->poisoned = 1;
+ while (!list_empty(&anchor->urb_list)) {
+ victim = list_entry(anchor->urb_list.prev,
+ struct urb, anchor_list);
+ /* make sure the URB isn't freed before we kill it */
+ usb_get_urb(victim);
+ spin_unlock_irq(&anchor->lock);
+ /* this will unanchor the URB */
+ usb_poison_urb(victim);
+ usb_put_urb(victim);
+ spin_lock_irq(&anchor->lock);
+ }
+ surely_empty = usb_anchor_check_wakeup(anchor);
+
+ spin_unlock_irq(&anchor->lock);
+ cpu_relax();
+ } while (!surely_empty);
}
EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs);
@@ -971,14 +984,20 @@ void usb_scuttle_anchored_urbs(struct usb_anchor *anchor)
{
struct urb *victim;
unsigned long flags;
+ int surely_empty;
+
+ do {
+ spin_lock_irqsave(&anchor->lock, flags);
+ while (!list_empty(&anchor->urb_list)) {
+ victim = list_entry(anchor->urb_list.prev,
+ struct urb, anchor_list);
+ __usb_unanchor_urb(victim, anchor);
+ }
+ surely_empty = usb_anchor_check_wakeup(anchor);
- spin_lock_irqsave(&anchor->lock, flags);
- while (!list_empty(&anchor->urb_list)) {
- victim = list_entry(anchor->urb_list.prev, struct urb,
- anchor_list);
- __usb_unanchor_urb(victim, anchor);
- }
- spin_unlock_irqrestore(&anchor->lock, flags);
+ spin_unlock_irqrestore(&anchor->lock, flags);
+ cpu_relax();
+ } while (!surely_empty);
}
EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs);
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index cc9c93affa14..828803b672aa 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -112,6 +112,7 @@ struct dwc2_hsotg_req;
* @debugfs: File entry for debugfs file for this endpoint.
* @dir_in: Set to true if this endpoint is of the IN direction, which
* means that it is sending data to the Host.
+ * @map_dir: Set to the value of dir_in when the DMA buffer is mapped.
* @index: The index for the endpoint registers.
* @mc: Multi Count - number of transactions per microframe
* @interval: Interval for periodic endpoints, in frames or microframes.
@@ -161,6 +162,7 @@ struct dwc2_hsotg_ep {
unsigned short fifo_index;
unsigned char dir_in;
+ unsigned char map_dir;
unsigned char index;
unsigned char mc;
u16 interval;
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index 19ae2595f1c3..af26a8a20e0b 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -312,6 +312,7 @@ static void dwc2_handle_conn_id_status_change_intr(struct dwc2_hsotg *hsotg)
static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
{
int ret;
+ u32 hprt0;
/* Clear interrupt */
dwc2_writel(hsotg, GINTSTS_SESSREQINT, GINTSTS);
@@ -332,6 +333,13 @@ static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
* established
*/
dwc2_hsotg_disconnect(hsotg);
+ } else {
+ /* Turn on the port power bit. */
+ hprt0 = dwc2_read_hprt0(hsotg);
+ hprt0 |= HPRT0_PWR;
+ dwc2_writel(hsotg, hprt0, HPRT0);
+ /* Connect hcd after port power is set. */
+ dwc2_hcd_connect(hsotg);
}
}
@@ -421,10 +429,13 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg)
if (ret && (ret != -ENOTSUPP))
dev_err(hsotg->dev, "exit power_down failed\n");
+ /* Change to L0 state */
+ hsotg->lx_state = DWC2_L0;
call_gadget(hsotg, resume);
+ } else {
+ /* Change to L0 state */
+ hsotg->lx_state = DWC2_L0;
}
- /* Change to L0 state */
- hsotg->lx_state = DWC2_L0;
} else {
if (hsotg->params.power_down)
return;
@@ -642,6 +653,71 @@ static u32 dwc2_read_common_intr(struct dwc2_hsotg *hsotg)
return 0;
}
+/**
+ * dwc_handle_gpwrdn_disc_det() - Handles the gpwrdn disconnect detect.
+ * Exits hibernation without restoring registers.
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ * @gpwrdn: GPWRDN register
+ */
+static inline void dwc_handle_gpwrdn_disc_det(struct dwc2_hsotg *hsotg,
+ u32 gpwrdn)
+{
+ u32 gpwrdn_tmp;
+
+ /* Switch-on voltage to the core */
+ gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn_tmp &= ~GPWRDN_PWRDNSWTCH;
+ dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
+ udelay(5);
+
+ /* Reset core */
+ gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn_tmp &= ~GPWRDN_PWRDNRSTN;
+ dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
+ udelay(5);
+
+ /* Disable Power Down Clamp */
+ gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn_tmp &= ~GPWRDN_PWRDNCLMP;
+ dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
+ udelay(5);
+
+ /* Deassert reset core */
+ gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn_tmp |= GPWRDN_PWRDNRSTN;
+ dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
+ udelay(5);
+
+ /* Disable PMU interrupt */
+ gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn_tmp &= ~GPWRDN_PMUINTSEL;
+ dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
+
+ /* De-assert Wakeup Logic */
+ gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn_tmp &= ~GPWRDN_PMUACTV;
+ dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
+
+ hsotg->hibernated = 0;
+ hsotg->bus_suspended = 0;
+
+ if (gpwrdn & GPWRDN_IDSTS) {
+ hsotg->op_state = OTG_STATE_B_PERIPHERAL;
+ dwc2_core_init(hsotg, false);
+ dwc2_enable_global_interrupts(hsotg);
+ dwc2_hsotg_core_init_disconnected(hsotg, false);
+ dwc2_hsotg_core_connect(hsotg);
+ } else {
+ hsotg->op_state = OTG_STATE_A_HOST;
+
+ /* Initialize the Core for Host mode */
+ dwc2_core_init(hsotg, false);
+ dwc2_enable_global_interrupts(hsotg);
+ dwc2_hcd_start(hsotg);
+ }
+}
+
/*
* GPWRDN interrupt handler.
*
@@ -663,64 +739,14 @@ static void dwc2_handle_gpwrdn_intr(struct dwc2_hsotg *hsotg)
if ((gpwrdn & GPWRDN_DISCONN_DET) &&
(gpwrdn & GPWRDN_DISCONN_DET_MSK) && !linestate) {
- u32 gpwrdn_tmp;
-
dev_dbg(hsotg->dev, "%s: GPWRDN_DISCONN_DET\n", __func__);
-
- /* Switch-on voltage to the core */
- gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
- gpwrdn_tmp &= ~GPWRDN_PWRDNSWTCH;
- dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
- udelay(10);
-
- /* Reset core */
- gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
- gpwrdn_tmp &= ~GPWRDN_PWRDNRSTN;
- dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
- udelay(10);
-
- /* Disable Power Down Clamp */
- gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
- gpwrdn_tmp &= ~GPWRDN_PWRDNCLMP;
- dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
- udelay(10);
-
- /* Deassert reset core */
- gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
- gpwrdn_tmp |= GPWRDN_PWRDNRSTN;
- dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
- udelay(10);
-
- /* Disable PMU interrupt */
- gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
- gpwrdn_tmp &= ~GPWRDN_PMUINTSEL;
- dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
-
- /* De-assert Wakeup Logic */
- gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
- gpwrdn_tmp &= ~GPWRDN_PMUACTV;
- dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
-
- hsotg->hibernated = 0;
-
- if (gpwrdn & GPWRDN_IDSTS) {
- hsotg->op_state = OTG_STATE_B_PERIPHERAL;
- dwc2_core_init(hsotg, false);
- dwc2_enable_global_interrupts(hsotg);
- dwc2_hsotg_core_init_disconnected(hsotg, false);
- dwc2_hsotg_core_connect(hsotg);
- } else {
- hsotg->op_state = OTG_STATE_A_HOST;
-
- /* Initialize the Core for Host mode */
- dwc2_core_init(hsotg, false);
- dwc2_enable_global_interrupts(hsotg);
- dwc2_hcd_start(hsotg);
- }
- }
-
- if ((gpwrdn & GPWRDN_LNSTSCHG) &&
- (gpwrdn & GPWRDN_LNSTSCHG_MSK) && linestate) {
+ /*
+ * Call disconnect detect function to exit from
+ * hibernation
+ */
+ dwc_handle_gpwrdn_disc_det(hsotg, gpwrdn);
+ } else if ((gpwrdn & GPWRDN_LNSTSCHG) &&
+ (gpwrdn & GPWRDN_LNSTSCHG_MSK) && linestate) {
dev_dbg(hsotg->dev, "%s: GPWRDN_LNSTSCHG\n", __func__);
if (hsotg->hw_params.hibernation &&
hsotg->hibernated) {
@@ -731,24 +757,21 @@ static void dwc2_handle_gpwrdn_intr(struct dwc2_hsotg *hsotg)
dwc2_exit_hibernation(hsotg, 1, 0, 1);
}
}
- }
- if ((gpwrdn & GPWRDN_RST_DET) && (gpwrdn & GPWRDN_RST_DET_MSK)) {
+ } else if ((gpwrdn & GPWRDN_RST_DET) &&
+ (gpwrdn & GPWRDN_RST_DET_MSK)) {
dev_dbg(hsotg->dev, "%s: GPWRDN_RST_DET\n", __func__);
if (!linestate && (gpwrdn & GPWRDN_BSESSVLD))
dwc2_exit_hibernation(hsotg, 0, 1, 0);
- }
- if ((gpwrdn & GPWRDN_STS_CHGINT) &&
- (gpwrdn & GPWRDN_STS_CHGINT_MSK) && linestate) {
+ } else if ((gpwrdn & GPWRDN_STS_CHGINT) &&
+ (gpwrdn & GPWRDN_STS_CHGINT_MSK)) {
dev_dbg(hsotg->dev, "%s: GPWRDN_STS_CHGINT\n", __func__);
- if (hsotg->hw_params.hibernation &&
- hsotg->hibernated) {
- if (gpwrdn & GPWRDN_IDSTS) {
- dwc2_exit_hibernation(hsotg, 0, 0, 0);
- call_gadget(hsotg, resume);
- } else {
- dwc2_exit_hibernation(hsotg, 1, 0, 1);
- }
- }
+ /*
+ * As GPWRDN_STS_CHGINT exit from hibernation flow is
+ * the same as in GPWRDN_DISCONN_DET flow. Call
+ * disconnect detect helper function to exit from
+ * hibernation.
+ */
+ dwc_handle_gpwrdn_disc_det(hsotg, gpwrdn);
}
}
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index d8424834902d..d0edb7e453c0 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -380,7 +380,7 @@ static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg,
{
struct usb_request *req = &hs_req->req;
- usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in);
+ usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->map_dir);
}
/*
@@ -671,8 +671,11 @@ static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
*/
static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
{
+ const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
int is_isoc = hs_ep->isochronous;
unsigned int maxsize;
+ u32 mps = hs_ep->ep.maxpacket;
+ int dir_in = hs_ep->dir_in;
if (is_isoc)
maxsize = (hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
@@ -681,6 +684,11 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
else
maxsize = DEV_DMA_NBYTES_LIMIT * MAX_DMA_DESC_NUM_GENERIC;
+ /* Interrupt OUT EP with mps not multiple of 4 */
+ if (hs_ep->index)
+ if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4))
+ maxsize = mps * MAX_DMA_DESC_NUM_GENERIC;
+
return maxsize;
}
@@ -696,11 +704,14 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
* Isochronous - descriptor rx/tx bytes bitfield limit,
* Control In/Bulk/Interrupt - multiple of mps. This will allow to not
* have concatenations from various descriptors within one packet.
+ * Interrupt OUT - if mps not multiple of 4 then a single packet corresponds
+ * to a single descriptor.
*
* Selects corresponding mask for RX/TX bytes as well.
*/
static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask)
{
+ const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
u32 mps = hs_ep->ep.maxpacket;
int dir_in = hs_ep->dir_in;
u32 desc_size = 0;
@@ -724,6 +735,13 @@ static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask)
desc_size -= desc_size % mps;
}
+ /* Interrupt OUT EP with mps not multiple of 4 */
+ if (hs_ep->index)
+ if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4)) {
+ desc_size = mps;
+ *mask = DEV_DMA_NBYTES_MASK;
+ }
+
return desc_size;
}
@@ -1044,13 +1062,7 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
length += (mps - (length % mps));
}
- /*
- * If more data to send, adjust DMA for EP0 out data stage.
- * ureq->dma stays unchanged, hence increment it by already
- * passed passed data count before starting new transaction.
- */
- if (!index && hsotg->ep0_state == DWC2_EP0_DATA_OUT &&
- continuing)
+ if (continuing)
offset = ureq->actual;
/* Fill DDMA chain entries */
@@ -1151,6 +1163,7 @@ static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg,
{
int ret;
+ hs_ep->map_dir = hs_ep->dir_in;
ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in);
if (ret)
goto dma_error;
@@ -1441,7 +1454,6 @@ static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep,
static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
u32 windex)
{
- struct dwc2_hsotg_ep *ep;
int dir = (windex & USB_DIR_IN) ? 1 : 0;
int idx = windex & 0x7F;
@@ -1451,12 +1463,7 @@ static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
if (idx > hsotg->num_of_eps)
return NULL;
- ep = index_to_ep(hsotg, idx, dir);
-
- if (idx && ep->dir_in != dir)
- return NULL;
-
- return ep;
+ return index_to_ep(hsotg, idx, dir);
}
/**
@@ -2220,22 +2227,36 @@ static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg,
*/
static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep)
{
+ const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
struct dwc2_hsotg *hsotg = hs_ep->parent;
unsigned int bytes_rem = 0;
+ unsigned int bytes_rem_correction = 0;
struct dwc2_dma_desc *desc = hs_ep->desc_list;
int i;
u32 status;
+ u32 mps = hs_ep->ep.maxpacket;
+ int dir_in = hs_ep->dir_in;
if (!desc)
return -EINVAL;
+ /* Interrupt OUT EP with mps not multiple of 4 */
+ if (hs_ep->index)
+ if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4))
+ bytes_rem_correction = 4 - (mps % 4);
+
for (i = 0; i < hs_ep->desc_count; ++i) {
status = desc->status;
bytes_rem += status & DEV_DMA_NBYTES_MASK;
+ bytes_rem -= bytes_rem_correction;
if (status & DEV_DMA_STS_MASK)
dev_err(hsotg->dev, "descriptor %d closed with %x\n",
i, status & DEV_DMA_STS_MASK);
+
+ if (status & DEV_DMA_L)
+ break;
+
desc++;
}
@@ -4759,12 +4780,6 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
epnum, 0);
}
- ret = usb_add_gadget_udc(dev, &hsotg->gadget);
- if (ret) {
- dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep,
- hsotg->ctrl_req);
- return ret;
- }
dwc2_hsotg_dump(hsotg);
return 0;
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index a5c8329fd462..58e53e3d905b 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -1512,19 +1512,20 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
if (num_packets > max_hc_pkt_count) {
num_packets = max_hc_pkt_count;
chan->xfer_len = num_packets * chan->max_packet;
+ } else if (chan->ep_is_in) {
+ /*
+ * Always program an integral # of max packets
+ * for IN transfers.
+ * Note: This assumes that the input buffer is
+ * aligned and sized accordingly.
+ */
+ chan->xfer_len = num_packets * chan->max_packet;
}
} else {
/* Need 1 packet for transfer length of 0 */
num_packets = 1;
}
- if (chan->ep_is_in)
- /*
- * Always program an integral # of max packets for IN
- * transfers
- */
- chan->xfer_len = num_packets * chan->max_packet;
-
if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC)
/*
@@ -5559,7 +5560,7 @@ int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg)
dwc2_writel(hsotg, hprt0, HPRT0);
/* Wait for the HPRT0.PrtSusp register field to be set */
- if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 3000))
+ if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 5000))
dev_warn(hsotg->dev, "Suspend wasn't generated\n");
/*
@@ -5740,7 +5741,15 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
return ret;
}
- dwc2_hcd_rem_wakeup(hsotg);
+ if (rem_wakeup) {
+ dwc2_hcd_rem_wakeup(hsotg);
+ /*
+ * Change "port_connect_status_change" flag to re-enumerate,
+ * because after exit from hibernation port connection status
+ * is not detected.
+ */
+ hsotg->flags.b.port_connect_status_change = 1;
+ }
hsotg->hibernated = 0;
hsotg->bus_suspended = 0;
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index a052d39b4375..d5f4ec1b73b1 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -500,7 +500,7 @@ static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
&short_read);
if (urb->actual_length + xfer_length > urb->length) {
- dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
+ dev_dbg(hsotg->dev, "%s(): trimming xfer length\n", __func__);
xfer_length = urb->length - urb->actual_length;
}
@@ -1977,6 +1977,18 @@ error:
qtd->error_count++;
dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
qtd, DWC2_HC_XFER_XACT_ERR);
+ /*
+ * We can get here after a completed transaction
+ * (urb->actual_length >= urb->length) which was not reported
+ * as completed. If that is the case, and we do not abort
+ * the transfer, a transfer of size 0 will be enqueued
+ * subsequently. If urb->actual_length is not DMA-aligned,
+ * the buffer will then point to an unaligned address, and
+ * the resulting behavior is undefined. Bail out in that
+ * situation.
+ */
+ if (qtd->urb->actual_length >= qtd->urb->length)
+ qtd->error_count = 3;
dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
}
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index a93415f33bf3..6d7861cba3f5 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -808,7 +808,7 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
int dwc2_init_params(struct dwc2_hsotg *hsotg)
{
const struct of_device_id *match;
- void (*set_params)(void *data);
+ void (*set_params)(struct dwc2_hsotg *data);
dwc2_set_default_params(hsotg);
dwc2_get_device_properties(hsotg);
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 577642895b57..a9e86f5e6eaa 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -349,7 +349,8 @@ static void dwc2_driver_shutdown(struct platform_device *dev)
{
struct dwc2_hsotg *hsotg = platform_get_drvdata(dev);
- disable_irq(hsotg->irq);
+ dwc2_disable_global_interrupts(hsotg);
+ synchronize_irq(hsotg->irq);
}
/**
@@ -492,10 +493,23 @@ static int dwc2_driver_probe(struct platform_device *dev)
if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
dwc2_lowlevel_hw_disable(hsotg);
+#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
+ IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+ /* Postponed adding a new gadget to the udc class driver list */
+ if (hsotg->gadget_enabled) {
+ retval = usb_add_gadget_udc(hsotg->dev, &hsotg->gadget);
+ if (retval) {
+ hsotg->gadget.udc = NULL;
+ dwc2_hsotg_remove(hsotg);
+ goto error;
+ }
+ }
+#endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */
return 0;
error:
- dwc2_lowlevel_hw_disable(hsotg);
+ if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL)
+ dwc2_lowlevel_hw_disable(hsotg);
return retval;
}
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 6666d2a52bf5..e890c26b6c82 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -981,6 +981,9 @@ static int dwc3_core_init(struct dwc3 *dwc)
if (dwc->dis_tx_ipgap_linecheck_quirk)
reg |= DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS;
+ if (dwc->parkmode_disable_ss_quirk)
+ reg |= DWC3_GUCTL1_PARKMODE_DISABLE_SS;
+
dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
}
@@ -1287,6 +1290,8 @@ static void dwc3_get_properties(struct dwc3 *dwc)
"snps,dis-del-phy-power-chg-quirk");
dwc->dis_tx_ipgap_linecheck_quirk = device_property_read_bool(dev,
"snps,dis-tx-ipgap-linecheck-quirk");
+ dwc->parkmode_disable_ss_quirk = device_property_read_bool(dev,
+ "snps,parkmode-disable-ss-quirk");
dwc->tx_de_emphasis_quirk = device_property_read_bool(dev,
"snps,tx_de_emphasis_quirk");
@@ -1502,6 +1507,17 @@ static int dwc3_probe(struct platform_device *pdev)
err5:
dwc3_event_buffers_cleanup(dwc);
+
+ usb_phy_shutdown(dwc->usb2_phy);
+ usb_phy_shutdown(dwc->usb3_phy);
+ phy_exit(dwc->usb2_generic_phy);
+ phy_exit(dwc->usb3_generic_phy);
+
+ usb_phy_set_suspend(dwc->usb2_phy, 1);
+ usb_phy_set_suspend(dwc->usb3_phy, 1);
+ phy_power_off(dwc->usb2_generic_phy);
+ phy_power_off(dwc->usb3_generic_phy);
+
dwc3_ulpi_exit(dwc);
err4:
@@ -1540,9 +1556,9 @@ static int dwc3_remove(struct platform_device *pdev)
dwc3_core_exit(dwc);
dwc3_ulpi_exit(dwc);
- pm_runtime_put_sync(&pdev->dev);
- pm_runtime_allow(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
dwc3_free_event_buffers(dwc);
dwc3_free_scratch_buffers(dwc);
@@ -1684,7 +1700,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
if (PMSG_IS_AUTO(msg))
break;
- ret = dwc3_core_init(dwc);
+ ret = dwc3_core_init_for_resume(dwc);
if (ret)
return ret;
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 131028501752..55ee41283f39 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -242,6 +242,7 @@
#define DWC3_GUCTL_HSTINAUTORETRY BIT(14)
/* Global User Control 1 Register */
+#define DWC3_GUCTL1_PARKMODE_DISABLE_SS BIT(17)
#define DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS BIT(28)
#define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW BIT(24)
@@ -271,6 +272,7 @@
/* Global USB2 PHY Vendor Control Register */
#define DWC3_GUSB2PHYACC_NEWREGREQ BIT(25)
+#define DWC3_GUSB2PHYACC_DONE BIT(24)
#define DWC3_GUSB2PHYACC_BUSY BIT(23)
#define DWC3_GUSB2PHYACC_WRITE BIT(22)
#define DWC3_GUSB2PHYACC_ADDR(n) (n << 16)
@@ -299,6 +301,10 @@
#define DWC3_GTXFIFOSIZ_TXFDEF(n) ((n) & 0xffff)
#define DWC3_GTXFIFOSIZ_TXFSTADDR(n) ((n) & 0xffff0000)
+/* Global RX Fifo Size Register */
+#define DWC31_GRXFIFOSIZ_RXFDEP(n) ((n) & 0x7fff) /* DWC_usb31 only */
+#define DWC3_GRXFIFOSIZ_RXFDEP(n) ((n) & 0xffff)
+
/* Global Event Size Registers */
#define DWC3_GEVNTSIZ_INTMASK BIT(31)
#define DWC3_GEVNTSIZ_SIZE(n) ((n) & 0xffff)
@@ -992,6 +998,8 @@ struct dwc3_scratchpad_array {
* change quirk.
* @dis_tx_ipgap_linecheck_quirk: set if we disable u2mac linestate
* check during HS transmit.
+ * @parkmode_disable_ss_quirk: set if we need to disable all SuperSpeed
+ * instances in park mode.
* @tx_de_emphasis_quirk: set if we enable Tx de-emphasis quirk
* @tx_de_emphasis: Tx de-emphasis value
* 0 - -6dB de-emphasis
@@ -1163,6 +1171,7 @@ struct dwc3 {
unsigned dis_u2_freeclk_exists_quirk:1;
unsigned dis_del_phy_power_chg_quirk:1;
unsigned dis_tx_ipgap_linecheck_quirk:1;
+ unsigned parkmode_disable_ss_quirk:1;
unsigned tx_de_emphasis_quirk:1;
unsigned tx_de_emphasis:2;
diff --git a/drivers/usb/dwc3/dwc3-haps.c b/drivers/usb/dwc3/dwc3-haps.c
index c9cc33881bef..02d57d98ef9b 100644
--- a/drivers/usb/dwc3/dwc3-haps.c
+++ b/drivers/usb/dwc3/dwc3-haps.c
@@ -15,10 +15,6 @@
#include <linux/platform_device.h>
#include <linux/property.h>
-#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd
-#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce
-#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31 0xabcf
-
/**
* struct dwc3_haps - Driver private structure
* @dwc3: child dwc3 platform_device
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index 4c2771c5e727..1ef89a4317c8 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -243,6 +243,7 @@ static const struct of_device_id of_dwc3_simple_match[] = {
{ .compatible = "amlogic,meson-axg-dwc3" },
{ .compatible = "amlogic,meson-gxl-dwc3" },
{ .compatible = "allwinner,sun50i-h6-dwc3" },
+ { .compatible = "hisilicon,hi3670-dwc3" },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, of_dwc3_simple_match);
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index ed8b86517675..0dfb710f48b5 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -432,8 +432,13 @@ static int dwc3_omap_extcon_register(struct dwc3_omap *omap)
if (extcon_get_state(edev, EXTCON_USB) == true)
dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID);
+ else
+ dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_OFF);
+
if (extcon_get_state(edev, EXTCON_USB_HOST) == true)
dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND);
+ else
+ dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_FLOAT);
omap->edev = edev;
}
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index edf7984707b7..bf40a2f36e97 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -112,6 +112,7 @@ static const struct property_entry dwc3_pci_intel_properties[] = {
static const struct property_entry dwc3_pci_mrfld_properties[] = {
PROPERTY_ENTRY_STRING("dr_mode", "otg"),
+ PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
{}
};
@@ -132,6 +133,7 @@ static const struct property_entry dwc3_pci_amd_properties[] = {
PROPERTY_ENTRY_BOOL("snps,disable_scramble_quirk"),
PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,usb2-gadget-lpm-disable"),
PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
{}
};
@@ -203,8 +205,10 @@ static void dwc3_pci_resume_work(struct work_struct *work)
int ret;
ret = pm_runtime_get_sync(&dwc3->dev);
- if (ret)
+ if (ret) {
+ pm_runtime_put_sync_autosuspend(&dwc3->dev);
return;
+ }
pm_runtime_mark_last_busy(&dwc3->dev);
pm_runtime_put_sync_autosuspend(&dwc3->dev);
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index a6d0203e40b6..5bb5384f3612 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -234,8 +234,10 @@ static int dwc3_qcom_suspend(struct dwc3_qcom *qcom)
for (i = qcom->num_clocks - 1; i >= 0; i--)
clk_disable_unprepare(qcom->clks[i]);
+ if (device_may_wakeup(qcom->dev))
+ dwc3_qcom_enable_interrupts(qcom);
+
qcom->is_suspended = true;
- dwc3_qcom_enable_interrupts(qcom);
return 0;
}
@@ -248,7 +250,8 @@ static int dwc3_qcom_resume(struct dwc3_qcom *qcom)
if (!qcom->is_suspended)
return 0;
- dwc3_qcom_disable_interrupts(qcom);
+ if (device_may_wakeup(qcom->dev))
+ dwc3_qcom_disable_interrupts(qcom);
for (i = 0; i < qcom->num_clocks; i++) {
ret = clk_prepare_enable(qcom->clks[i]);
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 2fb02f877401..0d95ca804c1e 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -935,12 +935,16 @@ static void dwc3_ep0_xfer_complete(struct dwc3 *dwc,
static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
struct dwc3_ep *dep, struct dwc3_request *req)
{
+ unsigned int trb_length = 0;
int ret;
req->direction = !!dep->number;
if (req->request.length == 0) {
- dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 0,
+ if (!req->direction)
+ trb_length = dep->endpoint.maxpacket;
+
+ dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr, trb_length,
DWC3_TRBCTL_CONTROL_DATA, false);
ret = dwc3_ep0_start_trans(dep);
} else if (!IS_ALIGNED(req->request.length, dep->endpoint.maxpacket)
@@ -987,9 +991,12 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1];
+ if (!req->direction)
+ trb_length = dep->endpoint.maxpacket;
+
/* Now prepare one extra TRB to align transfer size */
dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
- 0, DWC3_TRBCTL_CONTROL_DATA,
+ trb_length, DWC3_TRBCTL_CONTROL_DATA,
false);
ret = dwc3_ep0_start_trans(dep);
} else {
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 7b8b463676ad..1396ee529792 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -270,7 +270,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
{
const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
struct dwc3 *dwc = dep->dwc;
- u32 timeout = 1000;
+ u32 timeout = 5000;
u32 saved_config = 0;
u32 reg;
@@ -304,13 +304,12 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
}
if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
- int needs_wakeup;
+ int link_state;
- needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 ||
- dwc->link_state == DWC3_LINK_STATE_U2 ||
- dwc->link_state == DWC3_LINK_STATE_U3);
-
- if (unlikely(needs_wakeup)) {
+ link_state = dwc3_gadget_get_link_state(dwc);
+ if (link_state == DWC3_LINK_STATE_U1 ||
+ link_state == DWC3_LINK_STATE_U2 ||
+ link_state == DWC3_LINK_STATE_U3) {
ret = __dwc3_gadget_wakeup(dwc);
dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
ret);
@@ -593,8 +592,23 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
if (desc->bInterval) {
- params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
- dep->interval = 1 << (desc->bInterval - 1);
+ u8 bInterval_m1;
+
+ /*
+ * Valid range for DEPCFG.bInterval_m1 is from 0 to 13, and it
+ * must be set to 0 when the controller operates in full-speed.
+ */
+ bInterval_m1 = min_t(u8, desc->bInterval - 1, 13);
+ if (dwc->gadget.speed == USB_SPEED_FULL)
+ bInterval_m1 = 0;
+
+ if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
+ dwc->gadget.speed == USB_SPEED_FULL)
+ dep->interval = desc->bInterval;
+ else
+ dep->interval = 1 << (desc->bInterval - 1);
+
+ params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(bInterval_m1);
}
return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
@@ -688,12 +702,13 @@ out:
return 0;
}
-static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force);
+static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
+ bool interrupt);
static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
{
struct dwc3_request *req;
- dwc3_stop_active_transfer(dep, true);
+ dwc3_stop_active_transfer(dep, true, false);
/* - giveback all requests to gadget driver */
while (!list_empty(&dep->started_list)) {
@@ -1016,26 +1031,24 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
* dwc3_prepare_one_trb - setup one TRB from one request
* @dep: endpoint for which this request is prepared
* @req: dwc3_request pointer
+ * @trb_length: buffer size of the TRB
* @chain: should this TRB be chained to the next?
* @node: only for isochronous endpoints. First TRB needs different type.
*/
static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
- struct dwc3_request *req, unsigned chain, unsigned node)
+ struct dwc3_request *req, unsigned int trb_length,
+ unsigned chain, unsigned node)
{
struct dwc3_trb *trb;
- unsigned int length;
dma_addr_t dma;
unsigned stream_id = req->request.stream_id;
unsigned short_not_ok = req->request.short_not_ok;
unsigned no_interrupt = req->request.no_interrupt;
- if (req->request.num_sgs > 0) {
- length = sg_dma_len(req->start_sg);
+ if (req->request.num_sgs > 0)
dma = sg_dma_address(req->start_sg);
- } else {
- length = req->request.length;
+ else
dma = req->request.dma;
- }
trb = &dep->trb_pool[dep->trb_enqueue];
@@ -1047,7 +1060,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
req->num_trbs++;
- __dwc3_prepare_one_trb(dep, trb, dma, length, chain, node,
+ __dwc3_prepare_one_trb(dep, trb, dma, trb_length, chain, node,
stream_id, short_not_ok, no_interrupt);
}
@@ -1057,16 +1070,27 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
struct scatterlist *sg = req->start_sg;
struct scatterlist *s;
int i;
-
+ unsigned int length = req->request.length;
+ unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
+ unsigned int rem = length % maxp;
unsigned int remaining = req->request.num_mapped_sgs
- req->num_queued_sgs;
+ /*
+ * If we resume preparing the request, then get the remaining length of
+ * the request and resume where we left off.
+ */
+ for_each_sg(req->request.sg, s, req->num_queued_sgs, i)
+ length -= sg_dma_len(s);
+
for_each_sg(sg, s, remaining, i) {
- unsigned int length = req->request.length;
- unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
- unsigned int rem = length % maxp;
+ unsigned int trb_length;
unsigned chain = true;
+ trb_length = min_t(unsigned int, length, sg_dma_len(s));
+
+ length -= trb_length;
+
/*
* IOMMU driver is coalescing the list of sgs which shares a
* page boundary into one and giving it to USB driver. With
@@ -1074,7 +1098,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
* sgs passed. So mark the chain bit to false if it isthe last
* mapped sg.
*/
- if (i == remaining - 1)
+ if ((i == remaining - 1) || !length)
chain = false;
if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) {
@@ -1084,7 +1108,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
req->needs_extra_trb = true;
/* prepare normal TRB */
- dwc3_prepare_one_trb(dep, req, true, i);
+ dwc3_prepare_one_trb(dep, req, trb_length, true, i);
/* Now prepare one extra TRB to align transfer size */
trb = &dep->trb_pool[dep->trb_enqueue];
@@ -1094,8 +1118,37 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
req->request.stream_id,
req->request.short_not_ok,
req->request.no_interrupt);
+ } else if (req->request.zero && req->request.length &&
+ !usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
+ !rem && !chain) {
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_trb *trb;
+
+ req->needs_extra_trb = true;
+
+ /* Prepare normal TRB */
+ dwc3_prepare_one_trb(dep, req, trb_length, true, i);
+
+ /* Prepare one extra TRB to handle ZLP */
+ trb = &dep->trb_pool[dep->trb_enqueue];
+ req->num_trbs++;
+ __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0,
+ !req->direction, 1,
+ req->request.stream_id,
+ req->request.short_not_ok,
+ req->request.no_interrupt);
+
+ /* Prepare one more TRB to handle MPS alignment */
+ if (!req->direction) {
+ trb = &dep->trb_pool[dep->trb_enqueue];
+ req->num_trbs++;
+ __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp,
+ false, 1, req->request.stream_id,
+ req->request.short_not_ok,
+ req->request.no_interrupt);
+ }
} else {
- dwc3_prepare_one_trb(dep, req, chain, i);
+ dwc3_prepare_one_trb(dep, req, trb_length, chain, i);
}
/*
@@ -1109,6 +1162,17 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
req->start_sg = sg_next(s);
req->num_queued_sgs++;
+ req->num_pending_sgs--;
+
+ /*
+ * The number of pending SG entries may not correspond to the
+ * number of mapped SG entries. If all the data are queued, then
+ * don't include unused SG entries.
+ */
+ if (length == 0) {
+ req->num_pending_sgs = 0;
+ break;
+ }
if (!dwc3_calc_trbs_left(dep))
break;
@@ -1129,7 +1193,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
req->needs_extra_trb = true;
/* prepare normal TRB */
- dwc3_prepare_one_trb(dep, req, true, 0);
+ dwc3_prepare_one_trb(dep, req, length, true, 0);
/* Now prepare one extra TRB to align transfer size */
trb = &dep->trb_pool[dep->trb_enqueue];
@@ -1139,6 +1203,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
req->request.short_not_ok,
req->request.no_interrupt);
} else if (req->request.zero && req->request.length &&
+ !usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
(IS_ALIGNED(req->request.length, maxp))) {
struct dwc3 *dwc = dep->dwc;
struct dwc3_trb *trb;
@@ -1146,17 +1211,27 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
req->needs_extra_trb = true;
/* prepare normal TRB */
- dwc3_prepare_one_trb(dep, req, true, 0);
+ dwc3_prepare_one_trb(dep, req, length, true, 0);
- /* Now prepare one extra TRB to handle ZLP */
+ /* Prepare one extra TRB to handle ZLP */
trb = &dep->trb_pool[dep->trb_enqueue];
req->num_trbs++;
__dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0,
- false, 1, req->request.stream_id,
+ !req->direction, 1, req->request.stream_id,
req->request.short_not_ok,
req->request.no_interrupt);
+
+ /* Prepare one more TRB to handle MPS alignment for OUT */
+ if (!req->direction) {
+ trb = &dep->trb_pool[dep->trb_enqueue];
+ req->num_trbs++;
+ __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp,
+ false, 1, req->request.stream_id,
+ req->request.short_not_ok,
+ req->request.no_interrupt);
+ }
} else {
- dwc3_prepare_one_trb(dep, req, false, 0);
+ dwc3_prepare_one_trb(dep, req, length, false, 0);
}
}
@@ -1216,6 +1291,8 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep)
}
}
+static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep);
+
static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
{
struct dwc3_gadget_ep_cmd_params params;
@@ -1252,14 +1329,20 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
if (ret < 0) {
- /*
- * FIXME we need to iterate over the list of requests
- * here and stop, unmap, free and del each of the linked
- * requests instead of what we do now.
- */
- if (req->trb)
- memset(req->trb, 0, sizeof(struct dwc3_trb));
- dwc3_gadget_del_and_unmap_request(dep, req, ret);
+ struct dwc3_request *tmp;
+
+ if (ret == -EAGAIN)
+ return ret;
+
+ dwc3_stop_active_transfer(dep, true, true);
+
+ list_for_each_entry_safe(req, tmp, &dep->started_list, list)
+ dwc3_gadget_move_cancelled_request(req);
+
+ /* If ep isn't started, then there's no end transfer pending */
+ if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
+ dwc3_gadget_ep_cleanup_cancelled_requests(dep);
+
return ret;
}
@@ -1331,7 +1414,9 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
}
}
- return __dwc3_gadget_kick_transfer(dep);
+ __dwc3_gadget_kick_transfer(dep);
+
+ return 0;
}
static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
@@ -1416,7 +1501,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
}
if (r == req) {
/* wait until it is processed */
- dwc3_stop_active_transfer(dep, true);
+ dwc3_stop_active_transfer(dep, true, true);
if (!r->trb)
goto out0;
@@ -1576,7 +1661,6 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
u32 reg;
u8 link_state;
- u8 speed;
/*
* According to the Databook Remote wakeup request should
@@ -1586,16 +1670,15 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
*/
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
- speed = reg & DWC3_DSTS_CONNECTSPD;
- if ((speed == DWC3_DSTS_SUPERSPEED) ||
- (speed == DWC3_DSTS_SUPERSPEED_PLUS))
- return 0;
-
link_state = DWC3_DSTS_USBLNKST(reg);
switch (link_state) {
+ case DWC3_LINK_STATE_RESET:
case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
+ case DWC3_LINK_STATE_U2: /* in HS, means Sleep (L1) */
+ case DWC3_LINK_STATE_U1:
+ case DWC3_LINK_STATE_RESUME:
break;
default:
return -EINVAL;
@@ -1752,6 +1835,10 @@ static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
if (dwc->revision < DWC3_REVISION_250A)
reg |= DWC3_DEVTEN_ULSTCNGEN;
+ /* On 2.30a and above this bit enables U3/L2-L1 Suspend Events */
+ if (dwc->revision >= DWC3_REVISION_230A)
+ reg |= DWC3_DEVTEN_EOPFEN;
+
dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
}
@@ -2035,7 +2122,6 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
int mdwidth;
- int kbytes;
int size;
mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
@@ -2051,17 +2137,17 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep)
/* FIFO Depth is in MDWDITH bytes. Multiply */
size *= mdwidth;
- kbytes = size / 1024;
- if (kbytes == 0)
- kbytes = 1;
-
/*
- * FIFO sizes account an extra MDWIDTH * (kbytes + 1) bytes for
- * internal overhead. We don't really know how these are used,
- * but documentation say it exists.
+ * To meet performance requirement, a minimum TxFIFO size of 3x
+ * MaxPacketSize is recommended for endpoints that support burst and a
+ * minimum TxFIFO size of 2x MaxPacketSize for endpoints that don't
+ * support burst. Use those numbers and we can calculate the max packet
+ * limit as below.
*/
- size -= mdwidth * (kbytes + 1);
- size /= kbytes;
+ if (dwc->maximum_speed >= USB_SPEED_SUPER)
+ size /= 3;
+ else
+ size /= 2;
usb_ep_set_maxpacket_limit(&dep->endpoint, size);
@@ -2079,8 +2165,39 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep)
static int dwc3_gadget_init_out_endpoint(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
+ int mdwidth;
+ int size;
+
+ mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
+
+ /* MDWIDTH is represented in bits, convert to bytes */
+ mdwidth /= 8;
+
+ /* All OUT endpoints share a single RxFIFO space */
+ size = dwc3_readl(dwc->regs, DWC3_GRXFIFOSIZ(0));
+ if (dwc3_is_usb31(dwc))
+ size = DWC31_GRXFIFOSIZ_RXFDEP(size);
+ else
+ size = DWC3_GRXFIFOSIZ_RXFDEP(size);
+
+ /* FIFO depth is in MDWDITH bytes */
+ size *= mdwidth;
+
+ /*
+ * To meet performance requirement, a minimum recommended RxFIFO size
+ * is defined as follow:
+ * RxFIFO size >= (3 x MaxPacketSize) +
+ * (3 x 8 bytes setup packets size) + (16 bytes clock crossing margin)
+ *
+ * Then calculate the max packet limit as below.
+ */
+ size -= (3 * 8) + 16;
+ if (size < 0)
+ size = 0;
+ else
+ size /= 3;
- usb_ep_set_maxpacket_limit(&dep->endpoint, 1024);
+ usb_ep_set_maxpacket_limit(&dep->endpoint, size);
dep->endpoint.max_streams = 15;
dep->endpoint.ops = &dwc3_gadget_ep_ops;
list_add_tail(&dep->endpoint.ep_list,
@@ -2245,18 +2362,15 @@ static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep,
struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue];
struct scatterlist *sg = req->sg;
struct scatterlist *s;
- unsigned int pending = req->num_pending_sgs;
+ unsigned int num_queued = req->num_queued_sgs;
unsigned int i;
int ret = 0;
- for_each_sg(sg, s, pending, i) {
+ for_each_sg(sg, s, num_queued, i) {
trb = &dep->trb_pool[dep->trb_dequeue];
- if (trb->ctrl & DWC3_TRB_CTRL_HWO)
- break;
-
req->sg = sg_next(s);
- req->num_pending_sgs--;
+ req->num_queued_sgs--;
ret = dwc3_gadget_ep_reclaim_completed_trb(dep, req,
trb, event, status, true);
@@ -2279,14 +2393,7 @@ static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep,
static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req)
{
- /*
- * For OUT direction, host may send less than the setup
- * length. Return true for all OUT requests.
- */
- if (!req->direction)
- return true;
-
- return req->request.actual == req->request.length;
+ return req->num_pending_sgs == 0 && req->num_queued_sgs == 0;
}
static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
@@ -2295,25 +2402,31 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
{
int ret;
- if (req->num_pending_sgs)
+ if (req->request.num_mapped_sgs)
ret = dwc3_gadget_ep_reclaim_trb_sg(dep, req, event,
status);
else
ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
status);
+ req->request.actual = req->request.length - req->remaining;
+
+ if (!dwc3_gadget_ep_request_completed(req))
+ goto out;
+
if (req->needs_extra_trb) {
+ unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
+
ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
status);
- req->needs_extra_trb = false;
- }
- req->request.actual = req->request.length - req->remaining;
+ /* Reclaim MPS padding TRB for ZLP */
+ if (!req->direction && req->request.zero && req->request.length &&
+ !usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
+ (IS_ALIGNED(req->request.length, maxp)))
+ ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, status);
- if (!dwc3_gadget_ep_request_completed(req) ||
- req->num_pending_sgs) {
- __dwc3_gadget_kick_transfer(dep);
- goto out;
+ req->needs_extra_trb = false;
}
dwc3_gadget_giveback(dep, req, status);
@@ -2338,6 +2451,24 @@ static void dwc3_gadget_ep_cleanup_completed_requests(struct dwc3_ep *dep,
}
}
+static bool dwc3_gadget_ep_should_continue(struct dwc3_ep *dep)
+{
+ struct dwc3_request *req;
+
+ if (!list_empty(&dep->pending_list))
+ return true;
+
+ /*
+ * We only need to check the first entry of the started list. We can
+ * assume the completed requests are removed from the started list.
+ */
+ req = next_request(&dep->started_list);
+ if (!req)
+ return false;
+
+ return !dwc3_gadget_ep_request_completed(req);
+}
+
static void dwc3_gadget_endpoint_frame_from_event(struct dwc3_ep *dep,
const struct dwc3_event_depevt *event)
{
@@ -2365,10 +2496,10 @@ static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
- if (stop) {
- dwc3_stop_active_transfer(dep, true);
- dep->flags = DWC3_EP_ENABLED;
- }
+ if (stop)
+ dwc3_stop_active_transfer(dep, true, true);
+ else if (dwc3_gadget_ep_should_continue(dep))
+ __dwc3_gadget_kick_transfer(dep);
/*
* WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
@@ -2488,7 +2619,8 @@ static void dwc3_reset_gadget(struct dwc3 *dwc)
}
}
-static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force)
+static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
+ bool interrupt)
{
struct dwc3 *dwc = dep->dwc;
struct dwc3_gadget_ep_cmd_params params;
@@ -2532,7 +2664,7 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force)
cmd = DWC3_DEPCMD_ENDTRANSFER;
cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
- cmd |= DWC3_DEPCMD_CMDIOC;
+ cmd |= interrupt ? DWC3_DEPCMD_CMDIOC : 0;
cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
memset(&params, 0, sizeof(params));
ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
@@ -2594,6 +2726,15 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
dwc->connected = true;
/*
+ * Ideally, dwc3_reset_gadget() would trigger the function
+ * drivers to stop any active transfers through ep disable.
+ * However, for functions which defer ep disable, such as mass
+ * storage, we will need to rely on the call to stop active
+ * transfers here, and avoid allowing of request queuing.
+ */
+ dwc->connected = false;
+
+ /*
* WORKAROUND: DWC3 revisions <1.88a have an issue which
* would cause a missing Disconnect Event if there's a
* pending Setup Packet in the FIFO.
diff --git a/drivers/usb/dwc3/ulpi.c b/drivers/usb/dwc3/ulpi.c
index f62b5f3c2d67..ffe3440abb74 100644
--- a/drivers/usb/dwc3/ulpi.c
+++ b/drivers/usb/dwc3/ulpi.c
@@ -7,6 +7,8 @@
* Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
*/
+#include <linux/delay.h>
+#include <linux/time64.h>
#include <linux/ulpi/regs.h>
#include "core.h"
@@ -17,14 +19,24 @@
DWC3_GUSB2PHYACC_ADDR(ULPI_ACCESS_EXTENDED) | \
DWC3_GUSB2PHYACC_EXTEND_ADDR(a) : DWC3_GUSB2PHYACC_ADDR(a))
-static int dwc3_ulpi_busyloop(struct dwc3 *dwc)
+#define DWC3_ULPI_BASE_DELAY DIV_ROUND_UP(NSEC_PER_SEC, 60000000L)
+
+static int dwc3_ulpi_busyloop(struct dwc3 *dwc, u8 addr, bool read)
{
- unsigned count = 1000;
+ unsigned long ns = 5L * DWC3_ULPI_BASE_DELAY;
+ unsigned int count = 1000;
u32 reg;
+ if (addr >= ULPI_EXT_VENDOR_SPECIFIC)
+ ns += DWC3_ULPI_BASE_DELAY;
+
+ if (read)
+ ns += DWC3_ULPI_BASE_DELAY;
+
while (count--) {
+ ndelay(ns);
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYACC(0));
- if (!(reg & DWC3_GUSB2PHYACC_BUSY))
+ if (reg & DWC3_GUSB2PHYACC_DONE)
return 0;
cpu_relax();
}
@@ -47,7 +59,7 @@ static int dwc3_ulpi_read(struct device *dev, u8 addr)
reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
- ret = dwc3_ulpi_busyloop(dwc);
+ ret = dwc3_ulpi_busyloop(dwc, addr, true);
if (ret)
return ret;
@@ -71,7 +83,7 @@ static int dwc3_ulpi_write(struct device *dev, u8 addr, u8 val)
reg |= DWC3_GUSB2PHYACC_WRITE | val;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
- return dwc3_ulpi_busyloop(dwc);
+ return dwc3_ulpi_busyloop(dwc, addr, false);
}
static const struct ulpi_ops dwc3_ulpi_ops = {
diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
index e15e896f356c..97885eb57be6 100644
--- a/drivers/usb/early/xhci-dbc.c
+++ b/drivers/usb/early/xhci-dbc.c
@@ -735,19 +735,19 @@ static void xdbc_handle_tx_event(struct xdbc_trb *evt_trb)
case COMP_USB_TRANSACTION_ERROR:
case COMP_STALL_ERROR:
default:
- if (ep_id == XDBC_EPID_OUT)
+ if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL)
xdbc.flags |= XDBC_FLAGS_OUT_STALL;
- if (ep_id == XDBC_EPID_IN)
+ if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL)
xdbc.flags |= XDBC_FLAGS_IN_STALL;
xdbc_trace("endpoint %d stalled\n", ep_id);
break;
}
- if (ep_id == XDBC_EPID_IN) {
+ if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL) {
xdbc.flags &= ~XDBC_FLAGS_IN_PROCESS;
xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
- } else if (ep_id == XDBC_EPID_OUT) {
+ } else if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL) {
xdbc.flags &= ~XDBC_FLAGS_OUT_PROCESS;
} else {
xdbc_trace("invalid endpoint id %d\n", ep_id);
diff --git a/drivers/usb/early/xhci-dbc.h b/drivers/usb/early/xhci-dbc.h
index 673686eeddd7..6e2b7266a695 100644
--- a/drivers/usb/early/xhci-dbc.h
+++ b/drivers/usb/early/xhci-dbc.h
@@ -120,8 +120,22 @@ struct xdbc_ring {
u32 cycle_state;
};
-#define XDBC_EPID_OUT 2
-#define XDBC_EPID_IN 3
+/*
+ * These are the "Endpoint ID" (also known as "Context Index") values for the
+ * OUT Transfer Ring and the IN Transfer Ring of a Debug Capability Context data
+ * structure.
+ * According to the "eXtensible Host Controller Interface for Universal Serial
+ * Bus (xHCI)" specification, section "7.6.3.2 Endpoint Contexts and Transfer
+ * Rings", these should be 0 and 1, and those are the values AMD machines give
+ * you; but Intel machines seem to use the formula from section "4.5.1 Device
+ * Context Index", which is supposed to be used for the Device Context only.
+ * Luckily the values from Intel don't overlap with those from AMD, so we can
+ * just test for both.
+ */
+#define XDBC_EPID_OUT 0
+#define XDBC_EPID_IN 1
+#define XDBC_EPID_OUT_INTEL 2
+#define XDBC_EPID_IN_INTEL 3
struct xdbc_state {
u16 vendor;
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 31cce7805eb2..c6afc4242962 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -264,6 +264,7 @@ config USB_CONFIGFS_NCM
depends on NET
select USB_U_ETHER
select USB_F_NCM
+ select CRC32
help
NCM is an advanced protocol for Ethernet encapsulation, allows
grouping of several ethernet frames into one USB transfer and
@@ -313,6 +314,7 @@ config USB_CONFIGFS_EEM
depends on NET
select USB_U_ETHER
select USB_F_EEM
+ select CRC32
help
CDC EEM is a newer USB standard that is somewhat simpler than CDC ECM
and therefore can be supported by more hardware. Technically ECM and
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 30aefd1adbad..d85bb3ba8263 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -96,40 +96,43 @@ function_descriptors(struct usb_function *f,
}
/**
- * next_ep_desc() - advance to the next EP descriptor
+ * next_desc() - advance to the next desc_type descriptor
* @t: currect pointer within descriptor array
+ * @desc_type: descriptor type
*
- * Return: next EP descriptor or NULL
+ * Return: next desc_type descriptor or NULL
*
- * Iterate over @t until either EP descriptor found or
+ * Iterate over @t until either desc_type descriptor found or
* NULL (that indicates end of list) encountered
*/
static struct usb_descriptor_header**
-next_ep_desc(struct usb_descriptor_header **t)
+next_desc(struct usb_descriptor_header **t, u8 desc_type)
{
for (; *t; t++) {
- if ((*t)->bDescriptorType == USB_DT_ENDPOINT)
+ if ((*t)->bDescriptorType == desc_type)
return t;
}
return NULL;
}
/*
- * for_each_ep_desc()- iterate over endpoint descriptors in the
- * descriptors list
- * @start: pointer within descriptor array.
- * @ep_desc: endpoint descriptor to use as the loop cursor
+ * for_each_desc() - iterate over desc_type descriptors in the
+ * descriptors list
+ * @start: pointer within descriptor array.
+ * @iter_desc: desc_type descriptor to use as the loop cursor
+ * @desc_type: wanted descriptr type
*/
-#define for_each_ep_desc(start, ep_desc) \
- for (ep_desc = next_ep_desc(start); \
- ep_desc; ep_desc = next_ep_desc(ep_desc+1))
+#define for_each_desc(start, iter_desc, desc_type) \
+ for (iter_desc = next_desc(start, desc_type); \
+ iter_desc; iter_desc = next_desc(iter_desc + 1, desc_type))
/**
- * config_ep_by_speed() - configures the given endpoint
+ * config_ep_by_speed_and_alt() - configures the given endpoint
* according to gadget speed.
* @g: pointer to the gadget
* @f: usb function
* @_ep: the endpoint to configure
+ * @alt: alternate setting number
*
* Return: error code, 0 on success
*
@@ -142,11 +145,13 @@ next_ep_desc(struct usb_descriptor_header **t)
* Note: the supplied function should hold all the descriptors
* for supported speeds
*/
-int config_ep_by_speed(struct usb_gadget *g,
- struct usb_function *f,
- struct usb_ep *_ep)
+int config_ep_by_speed_and_alt(struct usb_gadget *g,
+ struct usb_function *f,
+ struct usb_ep *_ep,
+ u8 alt)
{
struct usb_endpoint_descriptor *chosen_desc = NULL;
+ struct usb_interface_descriptor *int_desc = NULL;
struct usb_descriptor_header **speed_desc = NULL;
struct usb_ss_ep_comp_descriptor *comp_desc = NULL;
@@ -182,8 +187,21 @@ int config_ep_by_speed(struct usb_gadget *g,
default:
speed_desc = f->fs_descriptors;
}
+
+ /* find correct alternate setting descriptor */
+ for_each_desc(speed_desc, d_spd, USB_DT_INTERFACE) {
+ int_desc = (struct usb_interface_descriptor *)*d_spd;
+
+ if (int_desc->bAlternateSetting == alt) {
+ speed_desc = d_spd;
+ goto intf_found;
+ }
+ }
+ return -EIO;
+
+intf_found:
/* find descriptors */
- for_each_ep_desc(speed_desc, d_spd) {
+ for_each_desc(speed_desc, d_spd, USB_DT_ENDPOINT) {
chosen_desc = (struct usb_endpoint_descriptor *)*d_spd;
if (chosen_desc->bEndpointAddress == _ep->address)
goto ep_found;
@@ -237,6 +255,32 @@ ep_found:
}
return 0;
}
+EXPORT_SYMBOL_GPL(config_ep_by_speed_and_alt);
+
+/**
+ * config_ep_by_speed() - configures the given endpoint
+ * according to gadget speed.
+ * @g: pointer to the gadget
+ * @f: usb function
+ * @_ep: the endpoint to configure
+ *
+ * Return: error code, 0 on success
+ *
+ * This function chooses the right descriptors for a given
+ * endpoint according to gadget speed and saves it in the
+ * endpoint desc field. If the endpoint already has a descriptor
+ * assigned to it - overwrites it with currently corresponding
+ * descriptor. The endpoint maxpacket field is updated according
+ * to the chosen descriptor.
+ * Note: the supplied function should hold all the descriptors
+ * for supported speeds
+ */
+int config_ep_by_speed(struct usb_gadget *g,
+ struct usb_function *f,
+ struct usb_ep *_ep)
+{
+ return config_ep_by_speed_and_alt(g, f, _ep, 0);
+}
EXPORT_SYMBOL_GPL(config_ep_by_speed);
/**
@@ -348,8 +392,11 @@ int usb_function_deactivate(struct usb_function *function)
spin_lock_irqsave(&cdev->lock, flags);
- if (cdev->deactivations == 0)
+ if (cdev->deactivations == 0) {
+ spin_unlock_irqrestore(&cdev->lock, flags);
status = usb_gadget_deactivate(cdev->gadget);
+ spin_lock_irqsave(&cdev->lock, flags);
+ }
if (status == 0)
cdev->deactivations++;
@@ -380,8 +427,11 @@ int usb_function_activate(struct usb_function *function)
status = -EINVAL;
else {
cdev->deactivations--;
- if (cdev->deactivations == 0)
+ if (cdev->deactivations == 0) {
+ spin_unlock_irqrestore(&cdev->lock, flags);
status = usb_gadget_activate(cdev->gadget);
+ spin_lock_irqsave(&cdev->lock, flags);
+ }
}
spin_unlock_irqrestore(&cdev->lock, flags);
@@ -847,6 +897,11 @@ static int set_config(struct usb_composite_dev *cdev,
else
power = min(power, 900U);
done:
+ if (power <= USB_SELF_POWER_VBUS_MAX_DRAW)
+ usb_gadget_set_selfpowered(gadget);
+ else
+ usb_gadget_clear_selfpowered(gadget);
+
usb_gadget_vbus_draw(gadget, power);
if (result >= 0 && cdev->delayed_status)
result = USB_GADGET_DELAYED_STATUS;
@@ -1022,7 +1077,7 @@ static void collect_langs(struct usb_gadget_strings **sp, __le16 *buf)
while (*sp) {
s = *sp;
language = cpu_to_le16(s->language);
- for (tmp = buf; *tmp && tmp < &buf[126]; tmp++) {
+ for (tmp = buf; *tmp && tmp < &buf[USB_MAX_STRING_LEN]; tmp++) {
if (*tmp == language)
goto repeat;
}
@@ -1097,7 +1152,7 @@ static int get_string(struct usb_composite_dev *cdev,
collect_langs(sp, s->wData);
}
- for (len = 0; len <= 126 && s->wData[len]; len++)
+ for (len = 0; len <= USB_MAX_STRING_LEN && s->wData[len]; len++)
continue;
if (!len)
return -EINVAL;
@@ -2265,6 +2320,7 @@ void composite_suspend(struct usb_gadget *gadget)
cdev->suspended = 1;
+ usb_gadget_set_selfpowered(gadget);
usb_gadget_vbus_draw(gadget, 2);
}
@@ -2293,6 +2349,9 @@ void composite_resume(struct usb_gadget *gadget)
else
maxpower = min(maxpower, 900U);
+ if (maxpower > USB_SELF_POWER_VBUS_MAX_DRAW)
+ usb_gadget_clear_selfpowered(gadget);
+
usb_gadget_vbus_draw(gadget, maxpower);
}
diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
index 2d115353424c..8bb25773b61e 100644
--- a/drivers/usb/gadget/config.c
+++ b/drivers/usb/gadget/config.c
@@ -194,9 +194,13 @@ EXPORT_SYMBOL_GPL(usb_assign_descriptors);
void usb_free_all_descriptors(struct usb_function *f)
{
usb_free_descriptors(f->fs_descriptors);
+ f->fs_descriptors = NULL;
usb_free_descriptors(f->hs_descriptors);
+ f->hs_descriptors = NULL;
usb_free_descriptors(f->ss_descriptors);
+ f->ss_descriptors = NULL;
usb_free_descriptors(f->ssp_descriptors);
+ f->ssp_descriptors = NULL;
}
EXPORT_SYMBOL_GPL(usb_free_all_descriptors);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index ab9ac48a751a..3d4710cc34bc 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -109,21 +109,27 @@ struct gadget_config_name {
struct list_head list;
};
+#define USB_MAX_STRING_WITH_NULL_LEN (USB_MAX_STRING_LEN+1)
+
static int usb_string_copy(const char *s, char **s_copy)
{
int ret;
char *str;
char *copy = *s_copy;
ret = strlen(s);
- if (ret > 126)
+ if (ret > USB_MAX_STRING_LEN)
return -EOVERFLOW;
- str = kstrdup(s, GFP_KERNEL);
- if (!str)
- return -ENOMEM;
+ if (copy) {
+ str = copy;
+ } else {
+ str = kmalloc(USB_MAX_STRING_WITH_NULL_LEN, GFP_KERNEL);
+ if (!str)
+ return -ENOMEM;
+ }
+ strcpy(str, s);
if (str[ret - 1] == '\n')
str[ret - 1] = '\0';
- kfree(copy);
*s_copy = str;
return 0;
}
@@ -233,9 +239,16 @@ static ssize_t gadget_dev_desc_bcdUSB_store(struct config_item *item,
static ssize_t gadget_dev_desc_UDC_show(struct config_item *item, char *page)
{
- char *udc_name = to_gadget_info(item)->composite.gadget_driver.udc_name;
+ struct gadget_info *gi = to_gadget_info(item);
+ char *udc_name;
+ int ret;
- return sprintf(page, "%s\n", udc_name ?: "");
+ mutex_lock(&gi->lock);
+ udc_name = gi->composite.gadget_driver.udc_name;
+ ret = sprintf(page, "%s\n", udc_name ?: "");
+ mutex_unlock(&gi->lock);
+
+ return ret;
}
static int unregister_gadget(struct gadget_info *gi)
@@ -260,6 +273,9 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item,
char *name;
int ret;
+ if (strlen(page) < len)
+ return -EOVERFLOW;
+
name = kstrdup(page, GFP_KERNEL);
if (!name)
return -ENOMEM;
@@ -1214,9 +1230,9 @@ static void purge_configs_funcs(struct gadget_info *gi)
cfg = container_of(c, struct config_usb_cfg, c);
- list_for_each_entry_safe(f, tmp, &c->functions, list) {
+ list_for_each_entry_safe_reverse(f, tmp, &c->functions, list) {
- list_move_tail(&f->list, &cfg->func_list);
+ list_move(&f->list, &cfg->func_list);
if (f->unbind) {
dev_dbg(&gi->cdev.gadget->dev,
"unbind function '%s'/%p\n",
@@ -1502,7 +1518,7 @@ static const struct usb_gadget_driver configfs_driver_template = {
.suspend = configfs_composite_suspend,
.resume = configfs_composite_resume,
- .max_speed = USB_SPEED_SUPER,
+ .max_speed = USB_SPEED_SUPER_PLUS,
.driver = {
.owner = THIS_MODULE,
.name = "configfs-gadget",
@@ -1542,7 +1558,7 @@ static struct config_group *gadgets_make(
gi->composite.unbind = configfs_do_nothing;
gi->composite.suspend = NULL;
gi->composite.resume = NULL;
- gi->composite.max_speed = USB_SPEED_SUPER;
+ gi->composite.max_speed = USB_SPEED_SUPER_PLUS;
spin_lock_init(&gi->spinlock);
mutex_init(&gi->lock);
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index 9fc98de83624..add0f7ead55c 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -684,7 +684,7 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
acm_ss_out_desc.bEndpointAddress = acm_fs_out_desc.bEndpointAddress;
status = usb_assign_descriptors(f, acm_fs_function, acm_hs_function,
- acm_ss_function, NULL);
+ acm_ss_function, acm_ss_function);
if (status)
goto fail;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index a9239455eb6d..dcb432860d06 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1036,6 +1036,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
if (unlikely(ret)) {
+ io_data->req = NULL;
usb_ep_free_request(ep->ep, req);
goto error_lock;
}
@@ -1242,10 +1243,11 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
case FUNCTIONFS_ENDPOINT_DESC:
{
int desc_idx;
- struct usb_endpoint_descriptor *desc;
+ struct usb_endpoint_descriptor desc1, *desc;
switch (epfile->ffs->gadget->speed) {
case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
desc_idx = 2;
break;
case USB_SPEED_HIGH:
@@ -1254,10 +1256,12 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
default:
desc_idx = 0;
}
+
desc = epfile->ep->descs[desc_idx];
+ memcpy(&desc1, desc, desc->bLength);
spin_unlock_irq(&epfile->ffs->eps_lock);
- ret = copy_to_user((void __user *)value, desc, desc->bLength);
+ ret = copy_to_user((void __user *)value, &desc1, desc1.bLength);
if (ret)
ret = -EFAULT;
return ret;
@@ -1736,6 +1740,10 @@ static void ffs_data_reset(struct ffs_data *ffs)
ffs->state = FFS_READ_DESCRIPTORS;
ffs->setup_state = FFS_NO_SETUP;
ffs->flags = 0;
+
+ ffs->ms_os_descs_ext_prop_count = 0;
+ ffs->ms_os_descs_ext_prop_name_len = 0;
+ ffs->ms_os_descs_ext_prop_data_len = 0;
}
@@ -2545,6 +2553,7 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
do { /* lang_count > 0 so we can use do-while */
unsigned needed = needed_count;
+ u32 str_per_lang = str_count;
if (unlikely(len < 3))
goto error_free;
@@ -2580,7 +2589,7 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
data += length + 1;
len -= length + 1;
- } while (--str_count);
+ } while (--str_per_lang);
s->id = 0; /* terminator */
s->s = NULL;
@@ -3070,7 +3079,8 @@ static int _ffs_func_bind(struct usb_configuration *c,
}
if (likely(super)) {
- func->function.ss_descriptors = vla_ptr(vlabuf, d, ss_descs);
+ func->function.ss_descriptors = func->function.ssp_descriptors =
+ vla_ptr(vlabuf, d, ss_descs);
ss_len = ffs_do_descs(ffs->ss_descs_count,
vla_ptr(vlabuf, d, raw_descs) + fs_len + hs_len,
d_raw_descs__sz - fs_len - hs_len,
@@ -3480,6 +3490,7 @@ static void ffs_func_unbind(struct usb_configuration *c,
func->function.fs_descriptors = NULL;
func->function.hs_descriptors = NULL;
func->function.ss_descriptors = NULL;
+ func->function.ssp_descriptors = NULL;
func->interfaces_nums = NULL;
ffs_event_add(ffs, FUNCTIONFS_UNBIND);
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 46af0aa07e2e..0e083a53da53 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -1048,6 +1048,12 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
f->ss_descriptors = usb_copy_descriptors(midi_function);
if (!f->ss_descriptors)
goto fail_f_midi;
+
+ if (gadget_is_superspeed_plus(c->cdev->gadget)) {
+ f->ssp_descriptors = usb_copy_descriptors(midi_function);
+ if (!f->ssp_descriptors)
+ goto fail_f_midi;
+ }
}
kfree(midi_function);
@@ -1315,7 +1321,7 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
midi->id = kstrdup(opts->id, GFP_KERNEL);
if (opts->id && !midi->id) {
status = -ENOMEM;
- goto setup_fail;
+ goto midi_free;
}
midi->in_ports = opts->in_ports;
midi->out_ports = opts->out_ports;
@@ -1327,7 +1333,7 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
status = kfifo_alloc(&midi->in_req_fifo, midi->qlen, GFP_KERNEL);
if (status)
- goto setup_fail;
+ goto midi_free;
spin_lock_init(&midi->transmit_lock);
@@ -1343,9 +1349,13 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
return &midi->func;
+midi_free:
+ if (midi)
+ kfree(midi->id);
+ kfree(midi);
setup_fail:
mutex_unlock(&opts->lock);
- kfree(midi);
+
return ERR_PTR(status);
}
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index cfca4584ae13..e4aa370e86a9 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -86,8 +86,10 @@ static inline struct f_ncm *func_to_ncm(struct usb_function *f)
/* peak (theoretical) bulk transfer rate in bits-per-second */
static inline unsigned ncm_bitrate(struct usb_gadget *g)
{
- if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
- return 13 * 1024 * 8 * 1000 * 8;
+ if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS)
+ return 4250000000U;
+ else if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
+ return 3750000000U;
else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
return 13 * 512 * 8 * 1000 * 8;
else
@@ -1184,9 +1186,11 @@ static int ncm_unwrap_ntb(struct gether *port,
int ndp_index;
unsigned dg_len, dg_len2;
unsigned ndp_len;
+ unsigned block_len;
struct sk_buff *skb2;
int ret = -EINVAL;
- unsigned max_size = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize);
+ unsigned ntb_max = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize);
+ unsigned frame_max = le16_to_cpu(ecm_desc.wMaxSegmentSize);
const struct ndp_parser_opts *opts = ncm->parser_opts;
unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0;
int dgram_counter;
@@ -1208,8 +1212,9 @@ static int ncm_unwrap_ntb(struct gether *port,
}
tmp++; /* skip wSequence */
+ block_len = get_ncm(&tmp, opts->block_length);
/* (d)wBlockLength */
- if (get_ncm(&tmp, opts->block_length) > max_size) {
+ if (block_len > ntb_max) {
INFO(port->func.config->cdev, "OUT size exceeded\n");
goto err;
}
@@ -1218,15 +1223,23 @@ static int ncm_unwrap_ntb(struct gether *port,
/* Run through all the NDP's in the NTB */
do {
- /* NCM 3.2 */
- if (((ndp_index % 4) != 0) &&
- (ndp_index < opts->nth_size)) {
+ /*
+ * NCM 3.2
+ * dwNdpIndex
+ */
+ if (((ndp_index % 4) != 0) ||
+ (ndp_index < opts->nth_size) ||
+ (ndp_index > (block_len -
+ opts->ndp_size))) {
INFO(port->func.config->cdev, "Bad index: %#X\n",
ndp_index);
goto err;
}
- /* walk through NDP */
+ /*
+ * walk through NDP
+ * dwSignature
+ */
tmp = (void *)(skb->data + ndp_index);
if (get_unaligned_le32(tmp) != ncm->ndp_sign) {
INFO(port->func.config->cdev, "Wrong NDP SIGN\n");
@@ -1237,14 +1250,15 @@ static int ncm_unwrap_ntb(struct gether *port,
ndp_len = get_unaligned_le16(tmp++);
/*
* NCM 3.3.1
+ * wLength
* entry is 2 items
* item size is 16/32 bits, opts->dgram_item_len * 2 bytes
* minimal: struct usb_cdc_ncm_ndpX + normal entry + zero entry
* Each entry is a dgram index and a dgram length.
*/
if ((ndp_len < opts->ndp_size
- + 2 * 2 * (opts->dgram_item_len * 2))
- || (ndp_len % opts->ndplen_align != 0)) {
+ + 2 * 2 * (opts->dgram_item_len * 2)) ||
+ (ndp_len % opts->ndplen_align != 0)) {
INFO(port->func.config->cdev, "Bad NDP length: %#X\n",
ndp_len);
goto err;
@@ -1261,8 +1275,21 @@ static int ncm_unwrap_ntb(struct gether *port,
do {
index = index2;
+ /* wDatagramIndex[0] */
+ if ((index < opts->nth_size) ||
+ (index > block_len - opts->dpe_size)) {
+ INFO(port->func.config->cdev,
+ "Bad index: %#X\n", index);
+ goto err;
+ }
+
dg_len = dg_len2;
- if (dg_len < 14 + crc_len) { /* ethernet hdr + crc */
+ /*
+ * wDatagramLength[0]
+ * ethernet hdr + crc or larger than max frame size
+ */
+ if ((dg_len < 14 + crc_len) ||
+ (dg_len > frame_max)) {
INFO(port->func.config->cdev,
"Bad dgram length: %#X\n", dg_len);
goto err;
@@ -1286,6 +1313,13 @@ static int ncm_unwrap_ntb(struct gether *port,
index2 = get_ncm(&tmp, opts->dgram_item_len);
dg_len2 = get_ncm(&tmp, opts->dgram_item_len);
+ /* wDatagramIndex[1] */
+ if (index2 > block_len - opts->dpe_size) {
+ INFO(port->func.config->cdev,
+ "Bad index: %#X\n", index2);
+ goto err;
+ }
+
/*
* Copy the data into a new skb.
* This ensures the truesize is correct
@@ -1302,7 +1336,6 @@ static int ncm_unwrap_ntb(struct gether *port,
ndp_len -= 2 * (opts->dgram_item_len * 2);
dgram_counter++;
-
if (index2 == 0 || dg_len2 == 0)
break;
} while (ndp_len > 2 * (opts->dgram_item_len * 2));
@@ -1490,7 +1523,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
fs_ncm_notify_desc.bEndpointAddress;
status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function,
- ncm_ss_function, NULL);
+ ncm_ss_function, ncm_ss_function);
if (status)
goto fail;
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 9c7ed2539ff7..0f47cd398d60 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -31,6 +31,7 @@
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/cdev.h>
+#include <linux/kref.h>
#include <asm/byteorder.h>
#include <linux/io.h>
@@ -64,7 +65,7 @@ struct printer_dev {
struct usb_gadget *gadget;
s8 interface;
struct usb_ep *in_ep, *out_ep;
-
+ struct kref kref;
struct list_head rx_reqs; /* List of free RX structs */
struct list_head rx_reqs_active; /* List of Active RX xfers */
struct list_head rx_buffers; /* List of completed xfers */
@@ -218,6 +219,13 @@ static inline struct usb_endpoint_descriptor *ep_desc(struct usb_gadget *gadget,
/*-------------------------------------------------------------------------*/
+static void printer_dev_free(struct kref *kref)
+{
+ struct printer_dev *dev = container_of(kref, struct printer_dev, kref);
+
+ kfree(dev);
+}
+
static struct usb_request *
printer_req_alloc(struct usb_ep *ep, unsigned len, gfp_t gfp_flags)
{
@@ -348,6 +356,7 @@ printer_open(struct inode *inode, struct file *fd)
spin_unlock_irqrestore(&dev->lock, flags);
+ kref_get(&dev->kref);
DBG(dev, "printer_open returned %x\n", ret);
return ret;
}
@@ -365,6 +374,7 @@ printer_close(struct inode *inode, struct file *fd)
dev->printer_status &= ~PRINTER_SELECTED;
spin_unlock_irqrestore(&dev->lock, flags);
+ kref_put(&dev->kref, printer_dev_free);
DBG(dev, "printer_close\n");
return 0;
@@ -1116,6 +1126,7 @@ fail_tx_reqs:
printer_req_free(dev->in_ep, req);
}
+ usb_free_all_descriptors(f);
return ret;
}
@@ -1350,7 +1361,8 @@ static void gprinter_free(struct usb_function *f)
struct f_printer_opts *opts;
opts = container_of(f->fi, struct f_printer_opts, func_inst);
- kfree(dev);
+
+ kref_put(&dev->kref, printer_dev_free);
mutex_lock(&opts->lock);
--opts->refcnt;
mutex_unlock(&opts->lock);
@@ -1419,6 +1431,7 @@ static struct usb_function *gprinter_alloc(struct usb_function_instance *fi)
return ERR_PTR(-ENOMEM);
}
+ kref_init(&dev->kref);
++opts->refcnt;
dev->minor = opts->minor;
dev->pnp_string = opts->pnp_string;
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index 0d8e4a364ca6..cc1ff5b7b60c 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -87,8 +87,10 @@ static inline struct f_rndis *func_to_rndis(struct usb_function *f)
/* peak (theoretical) bulk transfer rate in bits-per-second */
static unsigned int bitrate(struct usb_gadget *g)
{
+ if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS)
+ return 4250000000U;
if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
- return 13 * 1024 * 8 * 1000 * 8;
+ return 3750000000U;
else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
return 13 * 512 * 8 * 1000 * 8;
else
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 106988a6661a..785826ab5348 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -751,12 +751,13 @@ static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream)
goto err_sts;
return 0;
+
err_sts:
- usb_ep_free_request(fu->ep_status, stream->req_status);
- stream->req_status = NULL;
-err_out:
usb_ep_free_request(fu->ep_out, stream->req_out);
stream->req_out = NULL;
+err_out:
+ usb_ep_free_request(fu->ep_in, stream->req_in);
+ stream->req_in = NULL;
out:
return -ENOMEM;
}
diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
index 2746a926a8d9..41e7b29f58df 100644
--- a/drivers/usb/gadget/function/f_uac1.c
+++ b/drivers/usb/gadget/function/f_uac1.c
@@ -19,6 +19,9 @@
#include "u_audio.h"
#include "u_uac1.h"
+/* UAC1 spec: 3.7.2.3 Audio Channel Cluster Format */
+#define UAC1_CHANNEL_MASK 0x0FFF
+
struct f_uac1 {
struct g_audio g_audio;
u8 ac_intf, as_in_intf, as_out_intf;
@@ -30,6 +33,11 @@ static inline struct f_uac1 *func_to_uac1(struct usb_function *f)
return container_of(f, struct f_uac1, g_audio.func);
}
+static inline struct f_uac1_opts *g_audio_to_uac1_opts(struct g_audio *audio)
+{
+ return container_of(audio->func.fi, struct f_uac1_opts, func_inst);
+}
+
/*
* DESCRIPTORS ... most are static, but strings and full
* configuration descriptors are built on demand.
@@ -499,16 +507,48 @@ static void f_audio_disable(struct usb_function *f)
uac1->as_out_alt = 0;
uac1->as_in_alt = 0;
+ u_audio_stop_playback(&uac1->g_audio);
u_audio_stop_capture(&uac1->g_audio);
}
/*-------------------------------------------------------------------------*/
+static int f_audio_validate_opts(struct g_audio *audio, struct device *dev)
+{
+ struct f_uac1_opts *opts = g_audio_to_uac1_opts(audio);
+
+ if (!opts->p_chmask && !opts->c_chmask) {
+ dev_err(dev, "Error: no playback and capture channels\n");
+ return -EINVAL;
+ } else if (opts->p_chmask & ~UAC1_CHANNEL_MASK) {
+ dev_err(dev, "Error: unsupported playback channels mask\n");
+ return -EINVAL;
+ } else if (opts->c_chmask & ~UAC1_CHANNEL_MASK) {
+ dev_err(dev, "Error: unsupported capture channels mask\n");
+ return -EINVAL;
+ } else if ((opts->p_ssize < 1) || (opts->p_ssize > 4)) {
+ dev_err(dev, "Error: incorrect playback sample size\n");
+ return -EINVAL;
+ } else if ((opts->c_ssize < 1) || (opts->c_ssize > 4)) {
+ dev_err(dev, "Error: incorrect capture sample size\n");
+ return -EINVAL;
+ } else if (!opts->p_srate) {
+ dev_err(dev, "Error: incorrect playback sampling rate\n");
+ return -EINVAL;
+ } else if (!opts->c_srate) {
+ dev_err(dev, "Error: incorrect capture sampling rate\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/* audio function driver setup/binding */
static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct usb_gadget *gadget = cdev->gadget;
+ struct device *dev = &gadget->dev;
struct f_uac1 *uac1 = func_to_uac1(f);
struct g_audio *audio = func_to_g_audio(f);
struct f_uac1_opts *audio_opts;
@@ -518,6 +558,10 @@ static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
int rate;
int status;
+ status = f_audio_validate_opts(audio, dev);
+ if (status)
+ return status;
+
audio_opts = container_of(f->fi, struct f_uac1_opts, func_inst);
us = usb_gstrings_attach(cdev, uac1_strings, ARRAY_SIZE(strings_uac1));
diff --git a/drivers/usb/gadget/function/f_uac1_legacy.c b/drivers/usb/gadget/function/f_uac1_legacy.c
index 24c086bcdeaa..56c724cfc439 100644
--- a/drivers/usb/gadget/function/f_uac1_legacy.c
+++ b/drivers/usb/gadget/function/f_uac1_legacy.c
@@ -336,7 +336,9 @@ static int f_audio_out_ep_complete(struct usb_ep *ep, struct usb_request *req)
/* Copy buffer is full, add it to the play_queue */
if (audio_buf_size - copy_buf->actual < req->actual) {
+ spin_lock_irq(&audio->lock);
list_add_tail(&copy_buf->list, &audio->play_queue);
+ spin_unlock_irq(&audio->lock);
schedule_work(&audio->playback_work);
copy_buf = f_audio_buffer_alloc(audio_buf_size);
if (IS_ERR(copy_buf))
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index d582921f7257..8e563f56fbc2 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -275,7 +275,7 @@ static struct usb_endpoint_descriptor fs_epout_desc = {
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1023),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 1,
};
@@ -284,7 +284,7 @@ static struct usb_endpoint_descriptor hs_epout_desc = {
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1024),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 4,
};
@@ -352,7 +352,7 @@ static struct usb_endpoint_descriptor fs_epin_desc = {
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1023),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 1,
};
@@ -361,7 +361,7 @@ static struct usb_endpoint_descriptor hs_epin_desc = {
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1024),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 4,
};
@@ -448,12 +448,28 @@ struct cntrl_range_lay3 {
__le32 dRES;
} __packed;
-static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
+static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
struct usb_endpoint_descriptor *ep_desc,
- unsigned int factor, bool is_playback)
+ enum usb_device_speed speed, bool is_playback)
{
int chmask, srate, ssize;
- u16 max_packet_size;
+ u16 max_size_bw, max_size_ep;
+ unsigned int factor;
+
+ switch (speed) {
+ case USB_SPEED_FULL:
+ max_size_ep = 1023;
+ factor = 1000;
+ break;
+
+ case USB_SPEED_HIGH:
+ max_size_ep = 1024;
+ factor = 8000;
+ break;
+
+ default:
+ return -EINVAL;
+ }
if (is_playback) {
chmask = uac2_opts->p_chmask;
@@ -465,10 +481,12 @@ static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
ssize = uac2_opts->c_ssize;
}
- max_packet_size = num_channels(chmask) * ssize *
- DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1)));
- ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_packet_size,
- le16_to_cpu(ep_desc->wMaxPacketSize)));
+ max_size_bw = num_channels(chmask) * ssize *
+ ((srate / (factor / (1 << (ep_desc->bInterval - 1)))) + 1);
+ ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_size_bw,
+ max_size_ep));
+
+ return 0;
}
static int
@@ -551,10 +569,33 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
uac2->as_in_alt = 0;
/* Calculate wMaxPacketSize according to audio bandwidth */
- set_ep_max_packet_size(uac2_opts, &fs_epin_desc, 1000, true);
- set_ep_max_packet_size(uac2_opts, &fs_epout_desc, 1000, false);
- set_ep_max_packet_size(uac2_opts, &hs_epin_desc, 8000, true);
- set_ep_max_packet_size(uac2_opts, &hs_epout_desc, 8000, false);
+ ret = set_ep_max_packet_size(uac2_opts, &fs_epin_desc, USB_SPEED_FULL,
+ true);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
+
+ ret = set_ep_max_packet_size(uac2_opts, &fs_epout_desc, USB_SPEED_FULL,
+ false);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
+
+ ret = set_ep_max_packet_size(uac2_opts, &hs_epin_desc, USB_SPEED_HIGH,
+ true);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
+
+ ret = set_ep_max_packet_size(uac2_opts, &hs_epout_desc, USB_SPEED_HIGH,
+ false);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
if (!agdev->out_ep) {
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index d8ce7868fe22..169e73ed128c 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -645,7 +645,12 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
uvc_hs_streaming_ep.wMaxPacketSize =
cpu_to_le16(max_packet_size | ((max_packet_mult - 1) << 11));
- uvc_hs_streaming_ep.bInterval = opts->streaming_interval;
+
+ /* A high-bandwidth endpoint must specify a bInterval value of 1 */
+ if (max_packet_mult > 1)
+ uvc_hs_streaming_ep.bInterval = 1;
+ else
+ uvc_hs_streaming_ep.bInterval = opts->streaming_interval;
uvc_ss_streaming_ep.wMaxPacketSize = cpu_to_le16(max_packet_size);
uvc_ss_streaming_ep.bInterval = opts->streaming_interval;
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index fb5ed97572e5..0cb0c638fd13 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -89,7 +89,12 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
struct snd_uac_chip *uac = prm->uac;
/* i/f shutting down */
- if (!prm->ep_enabled || req->status == -ESHUTDOWN)
+ if (!prm->ep_enabled) {
+ usb_ep_free_request(ep, req);
+ return;
+ }
+
+ if (req->status == -ESHUTDOWN)
return;
/*
@@ -351,8 +356,14 @@ static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep)
for (i = 0; i < params->req_number; i++) {
if (prm->ureq[i].req) {
- usb_ep_dequeue(ep, prm->ureq[i].req);
- usb_ep_free_request(ep, prm->ureq[i].req);
+ if (usb_ep_dequeue(ep, prm->ureq[i].req))
+ usb_ep_free_request(ep, prm->ureq[i].req);
+ /*
+ * If usb_ep_dequeue() cannot successfully dequeue the
+ * request, the request will be freed by the completion
+ * callback.
+ */
+
prm->ureq[i].req = NULL;
}
}
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 0ef00315ec73..156651df6b4d 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -45,9 +45,10 @@
#define UETH__VERSION "29-May-2008"
/* Experiments show that both Linux and Windows hosts allow up to 16k
- * frame sizes. Set the max size to 15k+52 to prevent allocating 32k
+ * frame sizes. Set the max MTU size to 15k+52 to prevent allocating 32k
* blocks and still have efficient handling. */
-#define GETHER_MAX_ETH_FRAME_LEN 15412
+#define GETHER_MAX_MTU_SIZE 15412
+#define GETHER_MAX_ETH_FRAME_LEN (GETHER_MAX_MTU_SIZE + ETH_HLEN)
struct eth_dev {
/* lock is held while accessing port_usb
@@ -93,7 +94,7 @@ struct eth_dev {
static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
{
if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
- gadget->speed == USB_SPEED_SUPER))
+ gadget->speed >= USB_SPEED_SUPER))
return qmult * DEFAULT_QLEN;
else
return DEFAULT_QLEN;
@@ -786,7 +787,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
/* MTU range: 14 - 15412 */
net->min_mtu = ETH_HLEN;
- net->max_mtu = GETHER_MAX_ETH_FRAME_LEN;
+ net->max_mtu = GETHER_MAX_MTU_SIZE;
dev->gadget = g;
SET_NETDEV_DEV(net, &g->dev);
@@ -848,7 +849,7 @@ struct net_device *gether_setup_name_default(const char *netname)
/* MTU range: 14 - 15412 */
net->min_mtu = ETH_HLEN;
- net->max_mtu = GETHER_MAX_ETH_FRAME_LEN;
+ net->max_mtu = GETHER_MAX_MTU_SIZE;
return net;
}
diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h
index cd33cee4d78b..1321e773626d 100644
--- a/drivers/usb/gadget/function/u_ether_configfs.h
+++ b/drivers/usb/gadget/function/u_ether_configfs.h
@@ -169,12 +169,11 @@ out: \
size_t len) \
{ \
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
- int ret; \
+ int ret = -EINVAL; \
u8 val; \
\
mutex_lock(&opts->lock); \
- ret = sscanf(page, "%02hhx", &val); \
- if (ret > 0) { \
+ if (sscanf(page, "%02hhx", &val) > 0) { \
opts->_n_ = val; \
ret = len; \
} \
diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c
index af16672d5118..6680dcfe660e 100644
--- a/drivers/usb/gadget/legacy/acm_ms.c
+++ b/drivers/usb/gadget/legacy/acm_ms.c
@@ -203,8 +203,10 @@ static int acm_ms_bind(struct usb_composite_dev *cdev)
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget);
- if (!usb_desc)
+ if (!usb_desc) {
+ status = -ENOMEM;
goto fail_string_ids;
+ }
usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
diff --git a/drivers/usb/gadget/legacy/audio.c b/drivers/usb/gadget/legacy/audio.c
index dd81fd538cb8..a748ed0842e8 100644
--- a/drivers/usb/gadget/legacy/audio.c
+++ b/drivers/usb/gadget/legacy/audio.c
@@ -300,8 +300,10 @@ static int audio_bind(struct usb_composite_dev *cdev)
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(cdev->gadget);
- if (!usb_desc)
+ if (!usb_desc) {
+ status = -ENOMEM;
goto fail;
+ }
usb_otg_descriptor_init(cdev->gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
diff --git a/drivers/usb/gadget/legacy/cdc2.c b/drivers/usb/gadget/legacy/cdc2.c
index 8d7a556ece30..563363aba48f 100644
--- a/drivers/usb/gadget/legacy/cdc2.c
+++ b/drivers/usb/gadget/legacy/cdc2.c
@@ -179,8 +179,10 @@ static int cdc_bind(struct usb_composite_dev *cdev)
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget);
- if (!usb_desc)
+ if (!usb_desc) {
+ status = -ENOMEM;
goto fail1;
+ }
usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
diff --git a/drivers/usb/gadget/legacy/ether.c b/drivers/usb/gadget/legacy/ether.c
index 30313b233680..99c7fc0d1d59 100644
--- a/drivers/usb/gadget/legacy/ether.c
+++ b/drivers/usb/gadget/legacy/ether.c
@@ -403,8 +403,10 @@ static int eth_bind(struct usb_composite_dev *cdev)
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget);
- if (!usb_desc)
+ if (!usb_desc) {
+ status = -ENOMEM;
goto fail1;
+ }
usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 37ca0e669bd8..09ed5f02c24f 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1360,7 +1360,6 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
req->buf = dev->rbuf;
req->context = NULL;
- value = -EOPNOTSUPP;
switch (ctrl->bRequest) {
case USB_REQ_GET_DESCRIPTOR:
@@ -1783,7 +1782,7 @@ static ssize_t
dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
{
struct dev_data *dev = fd->private_data;
- ssize_t value = len, length = len;
+ ssize_t value, length = len;
unsigned total;
u32 tag;
char *kbuf;
@@ -2040,6 +2039,9 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
return 0;
Enomem:
+ kfree(CHIP);
+ CHIP = NULL;
+
return -ENOMEM;
}
diff --git a/drivers/usb/gadget/legacy/ncm.c b/drivers/usb/gadget/legacy/ncm.c
index c61e71ba7045..0f1b45e3abd1 100644
--- a/drivers/usb/gadget/legacy/ncm.c
+++ b/drivers/usb/gadget/legacy/ncm.c
@@ -156,8 +156,10 @@ static int gncm_bind(struct usb_composite_dev *cdev)
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget);
- if (!usb_desc)
+ if (!usb_desc) {
+ status = -ENOMEM;
goto fail;
+ }
usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h
index 09f90447fed5..cfa642655337 100644
--- a/drivers/usb/gadget/u_f.h
+++ b/drivers/usb/gadget/u_f.h
@@ -14,6 +14,7 @@
#define __U_F_H__
#include <linux/usb/gadget.h>
+#include <linux/overflow.h>
/* Variable Length Array Macros **********************************************/
#define vla_group(groupname) size_t groupname##__next = 0
@@ -21,21 +22,36 @@
#define vla_item(groupname, type, name, n) \
size_t groupname##_##name##__offset = ({ \
- size_t align_mask = __alignof__(type) - 1; \
- size_t offset = (groupname##__next + align_mask) & ~align_mask;\
- size_t size = (n) * sizeof(type); \
- groupname##__next = offset + size; \
+ size_t offset = 0; \
+ if (groupname##__next != SIZE_MAX) { \
+ size_t align_mask = __alignof__(type) - 1; \
+ size_t size = array_size(n, sizeof(type)); \
+ offset = (groupname##__next + align_mask) & \
+ ~align_mask; \
+ if (check_add_overflow(offset, size, \
+ &groupname##__next)) { \
+ groupname##__next = SIZE_MAX; \
+ offset = 0; \
+ } \
+ } \
offset; \
})
#define vla_item_with_sz(groupname, type, name, n) \
- size_t groupname##_##name##__sz = (n) * sizeof(type); \
- size_t groupname##_##name##__offset = ({ \
- size_t align_mask = __alignof__(type) - 1; \
- size_t offset = (groupname##__next + align_mask) & ~align_mask;\
- size_t size = groupname##_##name##__sz; \
- groupname##__next = offset + size; \
- offset; \
+ size_t groupname##_##name##__sz = array_size(n, sizeof(type)); \
+ size_t groupname##_##name##__offset = ({ \
+ size_t offset = 0; \
+ if (groupname##__next != SIZE_MAX) { \
+ size_t align_mask = __alignof__(type) - 1; \
+ offset = (groupname##__next + align_mask) & \
+ ~align_mask; \
+ if (check_add_overflow(offset, groupname##_##name##__sz,\
+ &groupname##__next)) { \
+ groupname##__next = SIZE_MAX; \
+ offset = 0; \
+ } \
+ } \
+ offset; \
})
#define vla_ptr(ptr, groupname, name) \
diff --git a/drivers/usb/gadget/udc/amd5536udc_pci.c b/drivers/usb/gadget/udc/amd5536udc_pci.c
index 57b6f66331cf..362284057d30 100644
--- a/drivers/usb/gadget/udc/amd5536udc_pci.c
+++ b/drivers/usb/gadget/udc/amd5536udc_pci.c
@@ -154,6 +154,11 @@ static int udc_pci_probe(
pci_set_master(pdev);
pci_try_set_mwi(pdev);
+ dev->phys_addr = resource;
+ dev->irq = pdev->irq;
+ dev->pdev = pdev;
+ dev->dev = &pdev->dev;
+
/* init dma pools */
if (use_dma) {
retval = init_dma_pools(dev);
@@ -161,11 +166,6 @@ static int udc_pci_probe(
goto err_dma;
}
- dev->phys_addr = resource;
- dev->irq = pdev->irq;
- dev->pdev = pdev;
- dev->dev = &pdev->dev;
-
/* general probing */
if (udc_probe(dev)) {
retval = -ENODEV;
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/core.c b/drivers/usb/gadget/udc/aspeed-vhub/core.c
index db3628be38c0..902e61be4d64 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/core.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/core.c
@@ -36,6 +36,7 @@ void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
int status)
{
bool internal = req->internal;
+ struct ast_vhub *vhub = ep->vhub;
EPVDBG(ep, "completing request @%p, status %d\n", req, status);
@@ -46,7 +47,7 @@ void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
if (req->req.dma) {
if (!WARN_ON(!ep->dev))
- usb_gadget_unmap_request(&ep->dev->gadget,
+ usb_gadget_unmap_request_by_dev(&vhub->pdev->dev,
&req->req, ep->epn.is_in);
req->req.dma = 0;
}
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
index 5939eb1e97f2..931f540a747e 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
@@ -376,7 +376,7 @@ static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req,
if (ep->epn.desc_mode ||
((((unsigned long)u_req->buf & 7) == 0) &&
(ep->epn.is_in || !(u_req->length & (u_ep->maxpacket - 1))))) {
- rc = usb_gadget_map_request(&ep->dev->gadget, u_req,
+ rc = usb_gadget_map_request_by_dev(&vhub->pdev->dev, u_req,
ep->epn.is_in);
if (rc) {
dev_warn(&vhub->pdev->dev,
@@ -420,7 +420,10 @@ static void ast_vhub_stop_active_req(struct ast_vhub_ep *ep,
u32 state, reg, loops;
/* Stop DMA activity */
- writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+ if (ep->epn.desc_mode)
+ writel(VHUB_EP_DMA_CTRL_RESET, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+ else
+ writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
/* Wait for it to complete */
for (loops = 0; loops < 1000; loops++) {
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index a4ab23033578..23691a32759f 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -860,7 +860,7 @@ static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
u32 status;
DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n",
- ep->ep.name, req);
+ ep->ep.name, _req);
spin_lock_irqsave(&udc->lock, flags);
diff --git a/drivers/usb/gadget/udc/bdc/Kconfig b/drivers/usb/gadget/udc/bdc/Kconfig
index c74ac25dddcd..051091bd265b 100644
--- a/drivers/usb/gadget/udc/bdc/Kconfig
+++ b/drivers/usb/gadget/udc/bdc/Kconfig
@@ -15,7 +15,7 @@ if USB_BDC_UDC
comment "Platform Support"
config USB_BDC_PCI
tristate "BDC support for PCIe based platforms"
- depends on USB_PCI
+ depends on USB_PCI && BROKEN
default USB_BDC_UDC
help
Enable support for platforms which have BDC connected through PCIe, such as Lego3 FPGA platform.
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index 01b44e159623..e174b1b889da 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -283,6 +283,7 @@ static void bdc_mem_init(struct bdc *bdc, bool reinit)
* in that case reinit is passed as 1
*/
if (reinit) {
+ int i;
/* Enable interrupts */
temp = bdc_readl(bdc->regs, BDC_BDCSC);
temp |= BDC_GIE;
@@ -292,6 +293,9 @@ static void bdc_mem_init(struct bdc *bdc, bool reinit)
/* Initialize SRR to 0 */
memset(bdc->srr.sr_bds, 0,
NUM_SR_ENTRIES * sizeof(struct bdc_bd));
+ /* clear ep flags to avoid post disconnect stops/deconfigs */
+ for (i = 1; i < bdc->num_eps; ++i)
+ bdc->bdc_ep_array[i]->flags = 0;
} else {
/* One time initiaization only */
/* Enable status report function pointers */
@@ -604,9 +608,14 @@ static int bdc_remove(struct platform_device *pdev)
static int bdc_suspend(struct device *dev)
{
struct bdc *bdc = dev_get_drvdata(dev);
+ int ret;
- clk_disable_unprepare(bdc->clk);
- return 0;
+ /* Halt the controller */
+ ret = bdc_stop(bdc);
+ if (!ret)
+ clk_disable_unprepare(bdc->clk);
+
+ return ret;
}
static int bdc_resume(struct device *dev)
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
index a4d9b5e1e50e..9ddc0b4e92c9 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
@@ -540,7 +540,7 @@ static void bdc_req_complete(struct bdc_ep *ep, struct bdc_req *req,
{
struct bdc *bdc = ep->bdc;
- if (req == NULL || &req->queue == NULL || &req->usb_req == NULL)
+ if (req == NULL)
return;
dev_dbg(bdc->dev, "%s ep:%s status:%d\n", __func__, ep->name, status);
@@ -615,7 +615,6 @@ int bdc_ep_enable(struct bdc_ep *ep)
}
bdc_dbg_bd_list(bdc, ep);
/* only for ep0: config ep is called for ep0 from connect event */
- ep->flags |= BDC_EP_ENABLED;
if (ep->ep_num == 1)
return ret;
@@ -759,10 +758,13 @@ static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req)
__func__, ep->name, start_bdi, end_bdi);
dev_dbg(bdc->dev, "ep_dequeue ep=%p ep->desc=%p\n",
ep, (void *)ep->usb_ep.desc);
- /* Stop the ep to see where the HW is ? */
- ret = bdc_stop_ep(bdc, ep->ep_num);
- /* if there is an issue with stopping ep, then no need to go further */
- if (ret)
+ /* if still connected, stop the ep to see where the HW is ? */
+ if (!(bdc_readl(bdc->regs, BDC_USPC) & BDC_PST_MASK)) {
+ ret = bdc_stop_ep(bdc, ep->ep_num);
+ /* if there is an issue, then no need to go further */
+ if (ret)
+ return 0;
+ } else
return 0;
/*
@@ -1911,7 +1913,9 @@ static int bdc_gadget_ep_disable(struct usb_ep *_ep)
__func__, ep->name, ep->flags);
if (!(ep->flags & BDC_EP_ENABLED)) {
- dev_warn(bdc->dev, "%s is already disabled\n", ep->name);
+ if (bdc->gadget.speed != USB_SPEED_UNKNOWN)
+ dev_warn(bdc->dev, "%s is already disabled\n",
+ ep->name);
return 0;
}
spin_lock_irqsave(&bdc->lock, flags);
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 1a79a9955187..87a417d878b8 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1471,10 +1471,13 @@ static ssize_t soft_connect_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t n)
{
struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
+ ssize_t ret;
+ mutex_lock(&udc_lock);
if (!udc->driver) {
dev_err(dev, "soft-connect without a gadget driver\n");
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto out;
}
if (sysfs_streq(buf, "connect")) {
@@ -1486,10 +1489,14 @@ static ssize_t soft_connect_store(struct device *dev,
usb_gadget_udc_stop(udc);
} else {
dev_err(dev, "unsupported command '%s'\n", buf);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
- return n;
+ ret = n;
+out:
+ mutex_unlock(&udc_lock);
+ return ret;
}
static DEVICE_ATTR_WO(soft_connect);
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index d0248c58dcb6..d25d25da6875 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -914,6 +914,21 @@ static int dummy_pullup(struct usb_gadget *_gadget, int value)
spin_lock_irqsave(&dum->lock, flags);
dum->pullup = (value != 0);
set_link_state(dum_hcd);
+ if (value == 0) {
+ /*
+ * Emulate synchronize_irq(): wait for callbacks to finish.
+ * This seems to be the best place to emulate the call to
+ * synchronize_irq() that's in usb_gadget_remove_driver().
+ * Doing it in dummy_udc_stop() would be too late since it
+ * is called after the unbind callback and unbind shouldn't
+ * be invoked until all the other callbacks are finished.
+ */
+ while (dum->callback_usage > 0) {
+ spin_unlock_irqrestore(&dum->lock, flags);
+ usleep_range(1000, 2000);
+ spin_lock_irqsave(&dum->lock, flags);
+ }
+ }
spin_unlock_irqrestore(&dum->lock, flags);
usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd));
@@ -1015,14 +1030,6 @@ static int dummy_udc_stop(struct usb_gadget *g)
spin_lock_irq(&dum->lock);
dum->ints_enabled = 0;
stop_activity(dum);
-
- /* emulate synchronize_irq(): wait for callbacks to finish */
- while (dum->callback_usage > 0) {
- spin_unlock_irq(&dum->lock);
- usleep_range(1000, 2000);
- spin_lock_irq(&dum->lock);
- }
-
dum->driver = NULL;
spin_unlock_irq(&dum->lock);
@@ -2747,7 +2754,7 @@ static int __init init(void)
{
int retval = -ENOMEM;
int i;
- struct dummy *dum[MAX_NUM_UDC];
+ struct dummy *dum[MAX_NUM_UDC] = {};
if (usb_disabled())
return -ENODEV;
diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
index bc6abaea907d..785822ecc3f1 100644
--- a/drivers/usb/gadget/udc/fotg210-udc.c
+++ b/drivers/usb/gadget/udc/fotg210-udc.c
@@ -337,15 +337,16 @@ static void fotg210_start_dma(struct fotg210_ep *ep,
} else {
buffer = req->req.buf + req->req.actual;
length = ioread32(ep->fotg210->reg +
- FOTG210_FIBCR(ep->epnum - 1));
- length &= FIBCR_BCFX;
+ FOTG210_FIBCR(ep->epnum - 1)) & FIBCR_BCFX;
+ if (length > req->req.length - req->req.actual)
+ length = req->req.length - req->req.actual;
}
} else {
buffer = req->req.buf + req->req.actual;
if (req->req.length - req->req.actual > ep->ep.maxpacket)
length = ep->ep.maxpacket;
else
- length = req->req.length;
+ length = req->req.length - req->req.actual;
}
d = dma_map_single(NULL, buffer, length,
@@ -382,8 +383,7 @@ static void fotg210_ep0_queue(struct fotg210_ep *ep,
}
if (ep->dir_in) { /* if IN */
fotg210_start_dma(ep, req);
- if ((req->req.length == req->req.actual) ||
- (req->req.actual < ep->ep.maxpacket))
+ if (req->req.length == req->req.actual)
fotg210_done(ep, req, 0);
} else { /* OUT */
u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR0);
@@ -824,7 +824,7 @@ static void fotg210_ep0in(struct fotg210_udc *fotg210)
if (req->req.length)
fotg210_start_dma(ep, req);
- if ((req->req.length - req->req.actual) < ep->ep.maxpacket)
+ if (req->req.actual == req->req.length)
fotg210_done(ep, req, 0);
} else {
fotg210_set_cxdone(fotg210);
@@ -853,12 +853,16 @@ static void fotg210_out_fifo_handler(struct fotg210_ep *ep)
{
struct fotg210_request *req = list_entry(ep->queue.next,
struct fotg210_request, queue);
+ int disgr1 = ioread32(ep->fotg210->reg + FOTG210_DISGR1);
fotg210_start_dma(ep, req);
- /* finish out transfer */
+ /* Complete the request when it's full or a short packet arrived.
+ * Like other drivers, short_not_ok isn't handled.
+ */
+
if (req->req.length == req->req.actual ||
- req->req.actual < ep->ep.maxpacket)
+ (disgr1 & DISGR1_SPK_INT(ep->epnum - 1)))
fotg210_done(ep, req, 0);
}
@@ -1031,6 +1035,12 @@ static void fotg210_init(struct fotg210_udc *fotg210)
value &= ~DMCR_GLINT_EN;
iowrite32(value, fotg210->reg + FOTG210_DMCR);
+ /* enable only grp2 irqs we handle */
+ iowrite32(~(DISGR2_DMA_ERROR | DISGR2_RX0BYTE_INT | DISGR2_TX0BYTE_INT
+ | DISGR2_ISO_SEQ_ABORT_INT | DISGR2_ISO_SEQ_ERR_INT
+ | DISGR2_RESM_INT | DISGR2_SUSP_INT | DISGR2_USBRST_INT),
+ fotg210->reg + FOTG210_DMISGR2);
+
/* disable all fifo interrupt */
iowrite32(~(u32)0, fotg210->reg + FOTG210_DMISGR1);
diff --git a/drivers/usb/gadget/udc/goku_udc.c b/drivers/usb/gadget/udc/goku_udc.c
index c3721225b61e..b706ad3034bc 100644
--- a/drivers/usb/gadget/udc/goku_udc.c
+++ b/drivers/usb/gadget/udc/goku_udc.c
@@ -1757,6 +1757,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err;
}
+ pci_set_drvdata(pdev, dev);
spin_lock_init(&dev->lock);
dev->pdev = pdev;
dev->gadget.ops = &goku_ops;
@@ -1790,7 +1791,6 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
dev->regs = (struct goku_udc_regs __iomem *) base;
- pci_set_drvdata(pdev, dev);
INFO(dev, "%s\n", driver_desc);
INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr());
INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base);
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index e50108f9a374..e0b2fb33ed0d 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -1980,9 +1980,12 @@ static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit)
if (num == 0) {
_req = gr_alloc_request(&ep->ep, GFP_ATOMIC);
+ if (!_req)
+ return -ENOMEM;
+
buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_ATOMIC);
- if (!_req || !buf) {
- /* possible _req freed by gr_probe via gr_remove */
+ if (!buf) {
+ gr_free_request(&ep->ep, _req);
return -ENOMEM;
}
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index 21921db068f6..cf56819f16e4 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -1602,17 +1602,17 @@ static int lpc32xx_ep_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
- struct lpc32xx_udc *udc = ep->udc;
+ struct lpc32xx_udc *udc;
u16 maxpacket;
u32 tmp;
unsigned long flags;
/* Verify EP data */
if ((!_ep) || (!ep) || (!desc) ||
- (desc->bDescriptorType != USB_DT_ENDPOINT)) {
- dev_dbg(udc->dev, "bad ep or descriptor\n");
+ (desc->bDescriptorType != USB_DT_ENDPOINT))
return -EINVAL;
- }
+
+ udc = ep->udc;
maxpacket = usb_endpoint_maxp(desc);
if ((maxpacket == 0) || (maxpacket > ep->maxpacket)) {
dev_dbg(udc->dev, "bad ep descriptor's packet size\n");
@@ -1860,7 +1860,7 @@ static int lpc32xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
static int lpc32xx_ep_set_halt(struct usb_ep *_ep, int value)
{
struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
- struct lpc32xx_udc *udc = ep->udc;
+ struct lpc32xx_udc *udc;
unsigned long flags;
if ((!ep) || (ep->hwep_num <= 1))
@@ -1870,6 +1870,7 @@ static int lpc32xx_ep_set_halt(struct usb_ep *_ep, int value)
if (ep->is_in)
return -EAGAIN;
+ udc = ep->udc;
spin_lock_irqsave(&udc->lock, flags);
if (value == 1) {
diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c
index a8288df6aadf..ea59b56e5402 100644
--- a/drivers/usb/gadget/udc/m66592-udc.c
+++ b/drivers/usb/gadget/udc/m66592-udc.c
@@ -1667,7 +1667,7 @@ static int m66592_probe(struct platform_device *pdev)
err_add_udc:
m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req);
-
+ m66592->ep0_req = NULL;
clean_up3:
if (m66592->pdata->on_chip) {
clk_disable(m66592->clk);
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index 95f52232493b..83e98b59910e 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -2313,7 +2313,8 @@ static int mv_udc_probe(struct platform_device *pdev)
return 0;
err_create_workqueue:
- destroy_workqueue(udc->qwork);
+ if (udc->qwork)
+ destroy_workqueue(udc->qwork);
err_destroy_dma:
dma_pool_destroy(udc->dtd_pool);
err_free_dma:
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index c2011cd7df8c..077fa9304618 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -2653,6 +2653,8 @@ net2272_plat_probe(struct platform_device *pdev)
err_req:
release_mem_region(base, len);
err:
+ kfree(dev);
+
return ret;
}
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index ee872cad5270..a87caad8d1c7 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -3782,8 +3782,10 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
done:
- if (dev)
+ if (dev) {
net2280_remove(pdev);
+ kfree(dev);
+ }
return retval;
}
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index 991184b8bb41..14e99905cbee 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -368,7 +368,6 @@ struct pch_udc_dev {
#define PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC 0x0939
#define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
-#define PCI_VENDOR_ID_ROHM 0x10DB
#define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D
#define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808
@@ -601,18 +600,22 @@ static void pch_udc_reconnect(struct pch_udc_dev *dev)
static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
int is_active)
{
+ unsigned long iflags;
+
+ spin_lock_irqsave(&dev->lock, iflags);
if (is_active) {
pch_udc_reconnect(dev);
dev->vbus_session = 1;
} else {
if (dev->driver && dev->driver->disconnect) {
- spin_lock(&dev->lock);
+ spin_unlock_irqrestore(&dev->lock, iflags);
dev->driver->disconnect(&dev->gadget);
- spin_unlock(&dev->lock);
+ spin_lock_irqsave(&dev->lock, iflags);
}
pch_udc_set_disconnect(dev);
dev->vbus_session = 0;
}
+ spin_unlock_irqrestore(&dev->lock, iflags);
}
/**
@@ -1169,20 +1172,25 @@ static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
{
struct pch_udc_dev *dev;
+ unsigned long iflags;
if (!gadget)
return -EINVAL;
+
dev = container_of(gadget, struct pch_udc_dev, gadget);
+
+ spin_lock_irqsave(&dev->lock, iflags);
if (is_on) {
pch_udc_reconnect(dev);
} else {
if (dev->driver && dev->driver->disconnect) {
- spin_lock(&dev->lock);
+ spin_unlock_irqrestore(&dev->lock, iflags);
dev->driver->disconnect(&dev->gadget);
- spin_unlock(&dev->lock);
+ spin_lock_irqsave(&dev->lock, iflags);
}
pch_udc_set_disconnect(dev);
}
+ spin_unlock_irqrestore(&dev->lock, iflags);
return 0;
}
@@ -1774,7 +1782,7 @@ static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
}
/* prevent from using desc. - set HOST BUSY */
dma_desc->status |= PCH_UDC_BS_HST_BSY;
- dma_desc->dataptr = cpu_to_le32(DMA_ADDR_INVALID);
+ dma_desc->dataptr = lower_32_bits(DMA_ADDR_INVALID);
req->td_data = dma_desc;
req->td_data_last = dma_desc;
req->chain_len = 1;
@@ -2317,6 +2325,21 @@ static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
pch_udc_set_dma(dev, DMA_DIR_RX);
}
+static int pch_udc_gadget_setup(struct pch_udc_dev *dev)
+ __must_hold(&dev->lock)
+{
+ int rc;
+
+ /* In some cases we can get an interrupt before driver gets setup */
+ if (!dev->driver)
+ return -ESHUTDOWN;
+
+ spin_unlock(&dev->lock);
+ rc = dev->driver->setup(&dev->gadget, &dev->setup_data);
+ spin_lock(&dev->lock);
+ return rc;
+}
+
/**
* pch_udc_svc_control_in() - Handle Control IN endpoint interrupts
* @dev: Reference to the device structure
@@ -2388,15 +2411,12 @@ static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
else /* OUT */
dev->gadget.ep0 = &ep->ep;
- spin_lock(&dev->lock);
/* If Mass storage Reset */
if ((dev->setup_data.bRequestType == 0x21) &&
(dev->setup_data.bRequest == 0xFF))
dev->prot_stall = 0;
/* call gadget with setup data received */
- setup_supported = dev->driver->setup(&dev->gadget,
- &dev->setup_data);
- spin_unlock(&dev->lock);
+ setup_supported = pch_udc_gadget_setup(dev);
if (dev->setup_data.bRequestType & USB_DIR_IN) {
ep->td_data->status = (ep->td_data->status &
@@ -2644,9 +2664,7 @@ static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
dev->ep[i].halted = 0;
}
dev->stall = 0;
- spin_unlock(&dev->lock);
- dev->driver->setup(&dev->gadget, &dev->setup_data);
- spin_lock(&dev->lock);
+ pch_udc_gadget_setup(dev);
}
/**
@@ -2681,9 +2699,7 @@ static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
dev->stall = 0;
/* call gadget zero with setup data received */
- spin_unlock(&dev->lock);
- dev->driver->setup(&dev->gadget, &dev->setup_data);
- spin_lock(&dev->lock);
+ pch_udc_gadget_setup(dev);
}
/**
@@ -2957,7 +2973,7 @@ static int init_dma_pools(struct pch_udc_dev *dev)
dev->dma_addr = dma_map_single(&dev->pdev->dev, ep0out_buf,
UDC_EP0OUT_BUFF_SIZE * 4,
DMA_FROM_DEVICE);
- return 0;
+ return dma_mapping_error(&dev->pdev->dev, dev->dma_addr);
}
static int pch_udc_start(struct usb_gadget *g,
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index 11e25a3f4f1f..a766476fd742 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -1852,6 +1852,8 @@ static int r8a66597_probe(struct platform_device *pdev)
return PTR_ERR(reg);
ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!ires)
+ return -EINVAL;
irq = ires->start;
irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index a5254e82d628..c17d7a71e29a 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -1466,7 +1466,7 @@ static void usb3_start_pipen(struct renesas_usb3_ep *usb3_ep,
struct renesas_usb3_request *usb3_req)
{
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
- struct renesas_usb3_request *usb3_req_first = usb3_get_request(usb3_ep);
+ struct renesas_usb3_request *usb3_req_first;
unsigned long flags;
int ret = -EAGAIN;
u32 enable_bits = 0;
@@ -1474,7 +1474,8 @@ static void usb3_start_pipen(struct renesas_usb3_ep *usb3_ep,
spin_lock_irqsave(&usb3->lock, flags);
if (usb3_ep->halt || usb3_ep->started)
goto out;
- if (usb3_req != usb3_req_first)
+ usb3_req_first = __usb3_get_request(usb3_ep);
+ if (!usb3_req_first || usb3_req != usb3_req_first)
goto out;
if (usb3_pn_change(usb3, usb3_ep->num) < 0)
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
index 8bf5ad7a59ad..6cc63e317aba 100644
--- a/drivers/usb/gadget/udc/s3c2410_udc.c
+++ b/drivers/usb/gadget/udc/s3c2410_udc.c
@@ -264,10 +264,6 @@ static void s3c2410_udc_done(struct s3c2410_ep *ep,
static void s3c2410_udc_nuke(struct s3c2410_udc *udc,
struct s3c2410_ep *ep, int status)
{
- /* Sanity check */
- if (&ep->queue == NULL)
- return;
-
while (!list_empty(&ep->queue)) {
struct s3c2410_request *req;
req = list_entry(ep->queue.next, struct s3c2410_request,
diff --git a/drivers/usb/gadget/udc/snps_udc_plat.c b/drivers/usb/gadget/udc/snps_udc_plat.c
index 32f1d3e90c26..99805d60a7ab 100644
--- a/drivers/usb/gadget/udc/snps_udc_plat.c
+++ b/drivers/usb/gadget/udc/snps_udc_plat.c
@@ -114,8 +114,8 @@ static int udc_plat_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
udc->virt_addr = devm_ioremap_resource(dev, res);
- if (IS_ERR(udc->regs))
- return PTR_ERR(udc->regs);
+ if (IS_ERR(udc->virt_addr))
+ return PTR_ERR(udc->virt_addr);
/* udc csr registers base */
udc->csr = udc->virt_addr + UDC_CSR_ADDR;
diff --git a/drivers/usb/gadget/usbstring.c b/drivers/usb/gadget/usbstring.c
index 7c24d1ce1088..33f77e59aa6f 100644
--- a/drivers/usb/gadget/usbstring.c
+++ b/drivers/usb/gadget/usbstring.c
@@ -55,9 +55,9 @@ usb_gadget_get_string (const struct usb_gadget_strings *table, int id, u8 *buf)
return -EINVAL;
/* string descriptors have length, tag, then UTF16-LE text */
- len = min ((size_t) 126, strlen (s->s));
+ len = min((size_t)USB_MAX_STRING_LEN, strlen(s->s));
len = utf8s_to_utf16s(s->s, len, UTF16_LITTLE_ENDIAN,
- (wchar_t *) &buf[2], 126);
+ (wchar_t *) &buf[2], USB_MAX_STRING_LEN);
if (len < 0)
return -EINVAL;
buf [0] = (len + 1) * 2;
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index 8e3bab1e0c1f..f433883ca2bf 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -188,9 +188,8 @@ static int exynos_ehci_probe(struct platform_device *pdev)
hcd->rsrc_len = resource_size(res);
irq = platform_get_irq(pdev, 0);
- if (!irq) {
- dev_err(&pdev->dev, "Failed to get IRQ\n");
- err = -ENODEV;
+ if (irq < 0) {
+ err = irq;
goto fail_io;
}
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 8608ac513fb7..d9282ca7ae8c 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -22,6 +22,7 @@
#include <linux/interrupt.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
+#include <linux/usb/otg.h>
#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
@@ -573,6 +574,7 @@ static int ehci_run (struct usb_hcd *hcd)
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
u32 temp;
u32 hcc_params;
+ int rc;
hcd->uses_new_polling = 1;
@@ -628,9 +630,20 @@ static int ehci_run (struct usb_hcd *hcd)
down_write(&ehci_cf_port_reset_rwsem);
ehci->rh_state = EHCI_RH_RUNNING;
ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
+
+ /* Wait until HC become operational */
ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
msleep(5);
+ rc = ehci_handshake(ehci, &ehci->regs->status, STS_HALT, 0, 100 * 1000);
+
up_write(&ehci_cf_port_reset_rwsem);
+
+ if (rc) {
+ ehci_err(ehci, "USB %x.%x, controller refused to start: %d\n",
+ ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), rc);
+ return rc;
+ }
+
ehci->last_periodic_enable = ktime_get_real();
temp = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index ce0eaf7d7c12..9f9ab5ccea88 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -14,7 +14,6 @@
*/
/*-------------------------------------------------------------------------*/
-#include <linux/usb/otg.h>
#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
@@ -346,6 +345,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
unlink_empty_async_suspended(ehci);
+ /* Some Synopsys controllers mistakenly leave IAA turned on */
+ ehci_writel(ehci, STS_IAA, &ehci->regs->status);
+
/* Any IAA cycle that started before the suspend is now invalid */
end_iaa_cycle(ehci);
ehci_handle_start_intr_unlinks(ehci);
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index de764459e05a..9d93e7441bbc 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -192,12 +192,10 @@ static int mv_ehci_probe(struct platform_device *pdev)
hcd->rsrc_len = resource_size(r);
hcd->regs = ehci_mv->op_regs;
- hcd->irq = platform_get_irq(pdev, 0);
- if (!hcd->irq) {
- dev_err(&pdev->dev, "Cannot get irq.");
- retval = -ENODEV;
+ retval = platform_get_irq(pdev, 0);
+ if (retval < 0)
goto err_disable_clk;
- }
+ hcd->irq = retval;
ehci = hcd_to_ehci(hcd);
ehci->caps = (struct ehci_caps *) ehci_mv->cap_regs;
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index c9f91e6c72b6..7f65c86047dd 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -50,6 +50,8 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
hcd = usb_create_hcd(&ehci_mxc_hc_driver, dev, dev_name(dev));
if (!hcd)
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 7d20296cbe9f..d31c425d6167 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -222,6 +222,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
err_pm_runtime:
pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
err_phy:
for (i = 0; i < omap->nports; i++) {
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index fe9422d3bcdc..fcfad5c298a9 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -216,6 +216,13 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
ehci_info(ehci, "applying MosChip frame-index workaround\n");
ehci->frame_index_bug = 1;
break;
+ case PCI_VENDOR_ID_HUAWEI:
+ /* Synopsys HC bug */
+ if (pdev->device == 0xa239) {
+ ehci_info(ehci, "applying Synopsys HC workaround\n");
+ ehci->has_synopsys_hc_bug = 1;
+ }
+ break;
}
/* optional debug port, normally in the first BAR */
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index 4c306fb6b069..8a45362155c5 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -29,6 +29,8 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
+#include <linux/sys_soc.h>
+#include <linux/timer.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/usb/ehci_pdriver.h>
@@ -44,6 +46,9 @@ struct ehci_platform_priv {
struct clk *clks[EHCI_MAX_CLKS];
struct reset_control *rsts;
bool reset_on_resume;
+ bool quirk_poll;
+ struct timer_list poll_timer;
+ struct delayed_work poll_work;
};
static const char hcd_name[] = "ehci-platform";
@@ -118,6 +123,111 @@ static struct usb_ehci_pdata ehci_platform_defaults = {
.power_off = ehci_platform_power_off,
};
+/**
+ * quirk_poll_check_port_status - Poll port_status if the device sticks
+ * @ehci: the ehci hcd pointer
+ *
+ * Since EHCI/OHCI controllers on R-Car Gen3 SoCs are possible to be getting
+ * stuck very rarely after a full/low usb device was disconnected. To
+ * detect such a situation, the controllers require a special way which poll
+ * the EHCI PORTSC register.
+ *
+ * Return: true if the controller's port_status indicated getting stuck
+ */
+static bool quirk_poll_check_port_status(struct ehci_hcd *ehci)
+{
+ u32 port_status = ehci_readl(ehci, &ehci->regs->port_status[0]);
+
+ if (!(port_status & PORT_OWNER) &&
+ (port_status & PORT_POWER) &&
+ !(port_status & PORT_CONNECT) &&
+ (port_status & PORT_LS_MASK))
+ return true;
+
+ return false;
+}
+
+/**
+ * quirk_poll_rebind_companion - rebind comanion device to recover
+ * @ehci: the ehci hcd pointer
+ *
+ * Since EHCI/OHCI controllers on R-Car Gen3 SoCs are possible to be getting
+ * stuck very rarely after a full/low usb device was disconnected. To
+ * recover from such a situation, the controllers require changing the OHCI
+ * functional state.
+ */
+static void quirk_poll_rebind_companion(struct ehci_hcd *ehci)
+{
+ struct device *companion_dev;
+ struct usb_hcd *hcd = ehci_to_hcd(ehci);
+
+ companion_dev = usb_of_get_companion_dev(hcd->self.controller);
+ if (!companion_dev)
+ return;
+
+ device_release_driver(companion_dev);
+ if (device_attach(companion_dev) < 0)
+ ehci_err(ehci, "%s: failed\n", __func__);
+
+ put_device(companion_dev);
+}
+
+static void quirk_poll_work(struct work_struct *work)
+{
+ struct ehci_platform_priv *priv =
+ container_of(to_delayed_work(work), struct ehci_platform_priv,
+ poll_work);
+ struct ehci_hcd *ehci = container_of((void *)priv, struct ehci_hcd,
+ priv);
+
+ /* check the status twice to reduce misdetection rate */
+ if (!quirk_poll_check_port_status(ehci))
+ return;
+ udelay(10);
+ if (!quirk_poll_check_port_status(ehci))
+ return;
+
+ ehci_dbg(ehci, "%s: detected getting stuck. rebind now!\n", __func__);
+ quirk_poll_rebind_companion(ehci);
+}
+
+static void quirk_poll_timer(struct timer_list *t)
+{
+ struct ehci_platform_priv *priv = from_timer(priv, t, poll_timer);
+ struct ehci_hcd *ehci = container_of((void *)priv, struct ehci_hcd,
+ priv);
+
+ if (quirk_poll_check_port_status(ehci)) {
+ /*
+ * Now scheduling the work for testing the port more. Note that
+ * updating the status is possible to be delayed when
+ * reconnection. So, this uses delayed work with 5 ms delay
+ * to avoid misdetection.
+ */
+ schedule_delayed_work(&priv->poll_work, msecs_to_jiffies(5));
+ }
+
+ mod_timer(&priv->poll_timer, jiffies + HZ);
+}
+
+static void quirk_poll_init(struct ehci_platform_priv *priv)
+{
+ INIT_DELAYED_WORK(&priv->poll_work, quirk_poll_work);
+ timer_setup(&priv->poll_timer, quirk_poll_timer, 0);
+ mod_timer(&priv->poll_timer, jiffies + HZ);
+}
+
+static void quirk_poll_end(struct ehci_platform_priv *priv)
+{
+ del_timer_sync(&priv->poll_timer);
+ cancel_delayed_work(&priv->poll_work);
+}
+
+static const struct soc_device_attribute quirk_poll_match[] = {
+ { .family = "R-Car Gen3" },
+ { /* sentinel*/ }
+};
+
static int ehci_platform_probe(struct platform_device *dev)
{
struct usb_hcd *hcd;
@@ -178,6 +288,9 @@ static int ehci_platform_probe(struct platform_device *dev)
"has-transaction-translator"))
hcd->has_tt = 1;
+ if (soc_device_match(quirk_poll_match))
+ priv->quirk_poll = true;
+
for (clk = 0; clk < EHCI_MAX_CLKS; clk++) {
priv->clks[clk] = of_clk_get(dev->dev.of_node, clk);
if (IS_ERR(priv->clks[clk])) {
@@ -249,6 +362,9 @@ static int ehci_platform_probe(struct platform_device *dev)
device_enable_async_suspend(hcd->self.controller);
platform_set_drvdata(dev, hcd);
+ if (priv->quirk_poll)
+ quirk_poll_init(priv);
+
return err;
err_power:
@@ -275,6 +391,9 @@ static int ehci_platform_remove(struct platform_device *dev)
struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
int clk;
+ if (priv->quirk_poll)
+ quirk_poll_end(priv);
+
usb_remove_hcd(hcd);
if (pdata->power_off)
@@ -299,9 +418,13 @@ static int ehci_platform_suspend(struct device *dev)
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct usb_ehci_pdata *pdata = dev_get_platdata(dev);
struct platform_device *pdev = to_platform_device(dev);
+ struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
bool do_wakeup = device_may_wakeup(dev);
int ret;
+ if (priv->quirk_poll)
+ quirk_poll_end(priv);
+
ret = ehci_suspend(hcd, do_wakeup);
if (ret)
return ret;
@@ -333,6 +456,10 @@ static int ehci_platform_resume(struct device *dev)
}
ehci_resume(hcd, priv->reset_on_resume);
+
+ if (priv->quirk_poll)
+ quirk_poll_init(priv);
+
return 0;
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index 2d5a72c15069..226b38274a6e 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -5569,7 +5569,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
struct usb_hcd *hcd;
struct resource *res;
int irq;
- int retval = -ENODEV;
+ int retval;
struct fotg210_hcd *fotg210;
if (usb_disabled())
@@ -5589,7 +5589,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
hcd = usb_create_hcd(&fotg210_fotg210_hc_driver, dev,
dev_name(dev));
if (!hcd) {
- dev_err(dev, "failed to create hcd with err %d\n", retval);
+ dev_err(dev, "failed to create hcd\n");
retval = -ENOMEM;
goto fail_create_hcd;
}
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index 677f9d592109..de922022b83a 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -94,10 +94,13 @@ static struct platform_device *fsl_usb2_device_register(
pdev->dev.coherent_dma_mask = ofdev->dev.coherent_dma_mask;
- if (!pdev->dev.dma_mask)
+ if (!pdev->dev.dma_mask) {
pdev->dev.dma_mask = &ofdev->dev.coherent_dma_mask;
- else
- dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ } else {
+ retval = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval)
+ goto error;
+ }
retval = platform_device_add_data(pdev, pdata, sizeof(*pdata));
if (retval)
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
index afa321ab55fc..c9acc59f4add 100644
--- a/drivers/usb/host/max3421-hcd.c
+++ b/drivers/usb/host/max3421-hcd.c
@@ -1864,7 +1864,7 @@ max3421_probe(struct spi_device *spi)
struct max3421_hcd *max3421_hcd;
struct usb_hcd *hcd = NULL;
struct max3421_hcd_platform_data *pdata = NULL;
- int retval = -ENOMEM;
+ int retval;
if (spi_setup(spi) < 0) {
dev_err(&spi->dev, "Unable to setup SPI bus");
@@ -1906,6 +1906,7 @@ max3421_probe(struct spi_device *spi)
goto error;
}
+ retval = -ENOMEM;
hcd = usb_create_hcd(&max3421_hcd_desc, &spi->dev,
dev_name(&spi->dev));
if (!hcd) {
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index c0c4dcca6f3c..a4a88b6de3c4 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -156,9 +156,8 @@ static int exynos_ohci_probe(struct platform_device *pdev)
hcd->rsrc_len = resource_size(res);
irq = platform_get_irq(pdev, 0);
- if (!irq) {
- dev_err(&pdev->dev, "Failed to get IRQ\n");
- err = -ENODEV;
+ if (irq < 0) {
+ err = irq;
goto fail_io;
}
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index af11887f5f9e..5916235adf35 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -101,7 +101,7 @@ static void io_watchdog_func(struct timer_list *t);
/* Some boards misreport power switching/overcurrent */
-static bool distrust_firmware = true;
+static bool distrust_firmware;
module_param (distrust_firmware, bool, 0);
MODULE_PARM_DESC (distrust_firmware,
"true to distrust firmware power/overcurrent setup");
@@ -665,20 +665,24 @@ retry:
/* handle root hub init quirks ... */
val = roothub_a (ohci);
- val &= ~(RH_A_PSM | RH_A_OCPM);
+ /* Configure for per-port over-current protection by default */
+ val &= ~RH_A_NOCP;
+ val |= RH_A_OCPM;
if (ohci->flags & OHCI_QUIRK_SUPERIO) {
- /* NSC 87560 and maybe others */
+ /* NSC 87560 and maybe others.
+ * Ganged power switching, no over-current protection.
+ */
val |= RH_A_NOCP;
- val &= ~(RH_A_POTPGT | RH_A_NPS);
- ohci_writel (ohci, val, &ohci->regs->roothub.a);
+ val &= ~(RH_A_POTPGT | RH_A_NPS | RH_A_PSM | RH_A_OCPM);
} else if ((ohci->flags & OHCI_QUIRK_AMD756) ||
(ohci->flags & OHCI_QUIRK_HUB_POWER)) {
/* hub power always on; required for AMD-756 and some
- * Mac platforms. ganged overcurrent reporting, if any.
+ * Mac platforms.
*/
val |= RH_A_NPS;
- ohci_writel (ohci, val, &ohci->regs->roothub.a);
}
+ ohci_writel(ohci, val, &ohci->regs->roothub.a);
+
ohci_writel (ohci, RH_HS_LPSC, &ohci->regs->roothub.status);
ohci_writel (ohci, (val & RH_A_NPS) ? 0 : RH_B_PPCM,
&ohci->regs->roothub.b);
diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c
index c9233cddf9a2..0a39dc58f376 100644
--- a/drivers/usb/host/ohci-sm501.c
+++ b/drivers/usb/host/ohci-sm501.c
@@ -196,6 +196,7 @@ static int ohci_hcd_sm501_drv_remove(struct platform_device *pdev)
struct resource *mem;
usb_remove_hcd(hcd);
+ iounmap(hcd->regs);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
dma_release_declared_memory(&pdev->dev);
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index c5e6e8d0b5ef..10d97261b433 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -3719,8 +3719,10 @@ static struct usb_hcd *oxu_create(struct platform_device *pdev,
oxu->is_otg = otg;
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
- if (ret < 0)
+ if (ret < 0) {
+ usb_put_hcd(hcd);
return ERR_PTR(ret);
+ }
device_wakeup_enable(hcd->self.controller);
return hcd;
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 5b061e599948..6dedefada92b 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -1287,11 +1287,10 @@ sl811h_hub_control(
goto error;
put_unaligned_le32(sl811->port1, buf);
-#ifndef VERBOSE
- if (*(u16*)(buf+2)) /* only if wPortChange is interesting */
-#endif
- dev_dbg(hcd->self.controller, "GetPortStatus %08x\n",
- sl811->port1);
+ if (__is_defined(VERBOSE) ||
+ *(u16*)(buf+2)) /* only if wPortChange is interesting */
+ dev_dbg(hcd->self.controller, "GetPortStatus %08x\n",
+ sl811->port1);
break;
case SetPortFeature:
if (wIndex != 1 || wLength != 0)
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
index 76c3f29562d2..448d7b11dec4 100644
--- a/drivers/usb/host/xhci-debugfs.c
+++ b/drivers/usb/host/xhci-debugfs.c
@@ -273,7 +273,7 @@ static int xhci_slot_context_show(struct seq_file *s, void *unused)
static int xhci_endpoint_context_show(struct seq_file *s, void *unused)
{
- int dci;
+ int ep_index;
dma_addr_t dma;
struct xhci_hcd *xhci;
struct xhci_ep_ctx *ep_ctx;
@@ -282,9 +282,9 @@ static int xhci_endpoint_context_show(struct seq_file *s, void *unused)
xhci = hcd_to_xhci(bus_to_hcd(dev->udev->bus));
- for (dci = 1; dci < 32; dci++) {
- ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, dci);
- dma = dev->out_ctx->dma + dci * CTX_SIZE(xhci->hcc_params);
+ for (ep_index = 0; ep_index < 31; ep_index++) {
+ ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
+ dma = dev->out_ctx->dma + (ep_index + 1) * CTX_SIZE(xhci->hcc_params);
seq_printf(s, "%pad: %s\n", &dma,
xhci_decode_ep_context(le32_to_cpu(ep_ctx->ep_info),
le32_to_cpu(ep_ctx->ep_info2),
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
index 268328c20681..2208fa6c7410 100644
--- a/drivers/usb/host/xhci-ext-caps.h
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -7,8 +7,9 @@
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
*/
-/* Up to 16 ms to halt an HC */
-#define XHCI_MAX_HALT_USEC (16*1000)
+
+/* HC should halt within 16 ms, but use 32 ms as some hosts take longer */
+#define XHCI_MAX_HALT_USEC (32 * 1000)
/* HC not running - set to 1 when run/stop bit is cleared. */
#define XHCI_STS_HALT (1<<0)
diff --git a/drivers/usb/host/xhci-histb.c b/drivers/usb/host/xhci-histb.c
index 3c4abb5a1c3f..73aba464b66a 100644
--- a/drivers/usb/host/xhci-histb.c
+++ b/drivers/usb/host/xhci-histb.c
@@ -241,7 +241,7 @@ static int xhci_histb_probe(struct platform_device *pdev)
/* Initialize dma_mask and coherent_dma_mask to 32-bits */
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret)
- return ret;
+ goto disable_pm;
hcd = usb_create_hcd(driver, dev, dev_name(dev));
if (!hcd) {
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index a024230f00e2..e6e8bed11aea 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -736,15 +736,6 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
{
u32 pls = status_reg & PORT_PLS_MASK;
- /* resume state is a xHCI internal state.
- * Do not report it to usb core, instead, pretend to be U3,
- * thus usb core knows it's not ready for transfer
- */
- if (pls == XDEV_RESUME) {
- *status |= USB_SS_PORT_LS_U3;
- return;
- }
-
/* When the CAS bit is set then warm reset
* should be performed on port
*/
@@ -767,6 +758,16 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
pls |= USB_PORT_STAT_CONNECTION;
} else {
/*
+ * Resume state is an xHCI internal state. Do not report it to
+ * usb core, instead, pretend to be U3, thus usb core knows
+ * it's not ready for transfer.
+ */
+ if (pls == XDEV_RESUME) {
+ *status |= USB_SS_PORT_LS_U3;
+ return;
+ }
+
+ /*
* If CAS bit isn't set but the Port is already at
* Compliance Mode, fake a connection so the USB core
* notices the Compliance state and resets the port.
@@ -1266,7 +1267,16 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_set_link_state(xhci, ports[wIndex], link_state);
spin_unlock_irqrestore(&xhci->lock, flags);
- msleep(20); /* wait device to enter */
+ if (link_state == USB_SS_PORT_LS_U3) {
+ int retries = 16;
+
+ while (retries--) {
+ usleep_range(4000, 8000);
+ temp = readl(ports[wIndex]->addr);
+ if ((temp & PORT_PLS_MASK) == XDEV_U3)
+ break;
+ }
+ }
spin_lock_irqsave(&xhci->lock, flags);
temp = readl(ports[wIndex]->addr);
@@ -1472,6 +1482,8 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
}
if ((temp & PORT_RC))
reset_change = true;
+ if (temp & PORT_OC)
+ status = 1;
}
if (!status && !reset_change) {
xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
@@ -1537,6 +1549,13 @@ retry:
port_index);
goto retry;
}
+ /* bail out if port detected a over-current condition */
+ if (t1 & PORT_OC) {
+ bus_state->bus_suspended = 0;
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_dbg(xhci, "Bus suspend bailout, port over-current detected\n");
+ return -EBUSY;
+ }
/* suspend ports in U0, or bail out for new connect changes */
if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
if ((t1 & PORT_CSC) && wake_enabled) {
@@ -1598,6 +1617,10 @@ retry:
hcd->state = HC_STATE_SUSPENDED;
bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
spin_unlock_irqrestore(&xhci->lock, flags);
+
+ if (bus_state->bus_suspended)
+ usleep_range(5000, 10000);
+
return 0;
}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 9e87c282a743..2461be2a8748 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -2134,6 +2134,15 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
if (major_revision == 0x03) {
rhub = &xhci->usb3_rhub;
+ /*
+ * Some hosts incorrectly use sub-minor version for minor
+ * version (i.e. 0x02 instead of 0x20 for bcdUSB 0x320 and 0x01
+ * for bcdUSB 0x310). Since there is no USB release with sub
+ * minor version 0x301 to 0x309, we can assume that they are
+ * incorrect and fix it here.
+ */
+ if (minor_revision > 0x00 && minor_revision < 0x10)
+ minor_revision <<= 4;
} else if (major_revision <= 0x02) {
rhub = &xhci->usb2_rhub;
} else {
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index d04fdd173ed2..4bda7e7ee342 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -275,6 +275,10 @@ static bool need_bw_sch(struct usb_host_endpoint *ep,
if (is_fs_or_ls(speed) && !has_tt)
return false;
+ /* skip endpoint with zero maxpkt */
+ if (usb_endpoint_maxp(&ep->desc) == 0)
+ return false;
+
return true;
}
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 60987c787e44..f4b2e766f195 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -395,6 +395,15 @@ static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
if (mtk->lpm_support)
xhci->quirks |= XHCI_LPM_SUPPORT;
+ if (mtk->u2_lpm_disable)
+ xhci->quirks |= XHCI_HW_LPM_DISABLE;
+
+ /*
+ * MTK xHCI 0.96: PSA is 1 by default even if doesn't support stream,
+ * and it's 3 when support it.
+ */
+ if (xhci->hci_version < 0x100 && HCC_MAX_PSA(xhci->hcc_params) == 4)
+ xhci->quirks |= XHCI_BROKEN_STREAMS;
}
/* called during probe() after chip reset completes */
@@ -460,6 +469,7 @@ static int xhci_mtk_probe(struct platform_device *pdev)
return ret;
mtk->lpm_support = of_property_read_bool(node, "usb3-lpm-capable");
+ mtk->u2_lpm_disable = of_property_read_bool(node, "usb2-lpm-disable");
/* optional property, ignore the error if it does not exist */
of_property_read_u32(node, "mediatek,u3p-dis-msk",
&mtk->u3p_dis_msk);
@@ -551,7 +561,8 @@ static int xhci_mtk_probe(struct platform_device *pdev)
if (ret)
goto put_usb3_hcd;
- if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
+ if (HCC_MAX_PSA(xhci->hcc_params) >= 4 &&
+ !(xhci->quirks & XHCI_BROKEN_STREAMS))
xhci->shared_hcd->can_do_streams = 1;
ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
@@ -592,6 +603,9 @@ static int xhci_mtk_remove(struct platform_device *dev)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct usb_hcd *shared_hcd = xhci->shared_hcd;
+ pm_runtime_put_noidle(&dev->dev);
+ pm_runtime_disable(&dev->dev);
+
usb_remove_hcd(shared_hcd);
xhci->shared_hcd = NULL;
device_init_wakeup(&dev->dev, false);
@@ -602,8 +616,6 @@ static int xhci_mtk_remove(struct platform_device *dev)
xhci_mtk_sch_exit(mtk);
xhci_mtk_clks_disable(mtk);
xhci_mtk_ldos_disable(mtk);
- pm_runtime_put_sync(&dev->dev);
- pm_runtime_disable(&dev->dev);
return 0;
}
diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
index cc59d80b663b..1601ca9a388e 100644
--- a/drivers/usb/host/xhci-mtk.h
+++ b/drivers/usb/host/xhci-mtk.h
@@ -123,6 +123,7 @@ struct xhci_hcd_mtk {
struct phy **phys;
int num_phys;
bool lpm_support;
+ bool u2_lpm_disable;
/* usb remote wakeup */
bool uwk_en;
struct regmap *uwk;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index d87f48e6b0c7..19485c076ba3 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -21,6 +21,8 @@
#define SSIC_PORT_CFG2_OFFSET 0x30
#define PROG_DONE (1 << 30)
#define SSIC_PORT_UNUSED (1 << 31)
+#define SPARSE_DISABLE_BIT 17
+#define SPARSE_CNTL_ENABLE 0xC12C
/* Device for a quirk */
#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
@@ -47,7 +49,11 @@
#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb
#define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc
+#define PCI_DEVICE_ID_ASMEDIA_1042_XHCI 0x1042
#define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142
+#define PCI_DEVICE_ID_ASMEDIA_1142_XHCI 0x1242
+#define PCI_DEVICE_ID_ASMEDIA_2142_XHCI 0x2142
+#define PCI_DEVICE_ID_ASMEDIA_3242_XHCI 0x3242
static const char hcd_name[] = "xhci_hcd";
@@ -138,6 +144,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
(pdev->device == 0x15e0 || pdev->device == 0x15e1))
xhci->quirks |= XHCI_SNPS_BROKEN_SUSPEND;
+ if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x15e5) {
+ xhci->quirks |= XHCI_DISABLE_SPARSE;
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+ }
+
if (pdev->vendor == PCI_VENDOR_ID_AMD)
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
@@ -226,11 +237,18 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_BROKEN_STREAMS;
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
- pdev->device == 0x1042)
+ pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI)
xhci->quirks |= XHCI_BROKEN_STREAMS;
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
- pdev->device == 0x1142)
+ pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) {
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
+ }
+ if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+ (pdev->device == PCI_DEVICE_ID_ASMEDIA_1142_XHCI ||
+ pdev->device == PCI_DEVICE_ID_ASMEDIA_2142_XHCI ||
+ pdev->device == PCI_DEVICE_ID_ASMEDIA_3242_XHCI))
+ xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI)
@@ -434,6 +452,15 @@ static void xhci_pme_quirk(struct usb_hcd *hcd)
readl(reg);
}
+static void xhci_sparse_control_quirk(struct usb_hcd *hcd)
+{
+ u32 reg;
+
+ reg = readl(hcd->regs + SPARSE_CNTL_ENABLE);
+ reg &= ~BIT(SPARSE_DISABLE_BIT);
+ writel(reg, hcd->regs + SPARSE_CNTL_ENABLE);
+}
+
static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
@@ -453,6 +480,9 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
if (xhci->quirks & XHCI_SSIC_PORT_UNUSED)
xhci_ssic_port_unused_quirk(hcd, true);
+ if (xhci->quirks & XHCI_DISABLE_SPARSE)
+ xhci_sparse_control_quirk(hcd);
+
ret = xhci_suspend(xhci, do_wakeup);
if (ret && (xhci->quirks & XHCI_SSIC_PORT_UNUSED))
xhci_ssic_port_unused_quirk(hcd, false);
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 9602241e4371..adc437ca83b8 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -361,6 +361,7 @@ static int xhci_plat_remove(struct platform_device *dev)
struct clk *reg_clk = xhci->reg_clk;
struct usb_hcd *shared_hcd = xhci->shared_hcd;
+ pm_runtime_get_sync(&dev->dev);
xhci->xhc_state |= XHCI_STATE_REMOVING;
usb_remove_hcd(shared_hcd);
@@ -374,8 +375,9 @@ static int xhci_plat_remove(struct platform_device *dev)
clk_disable_unprepare(reg_clk);
usb_put_hcd(hcd);
- pm_runtime_set_suspended(&dev->dev);
pm_runtime_disable(&dev->dev);
+ pm_runtime_put_noidle(&dev->dev);
+ pm_runtime_set_suspended(&dev->dev);
return 0;
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 509a7fce8f05..22b1de99d02d 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -670,11 +670,16 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
DMA_FROM_DEVICE);
/* for in tranfers we need to copy the data from bounce to sg */
- len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
- seg->bounce_len, seg->bounce_offs);
- if (len != seg->bounce_len)
- xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
- len, seg->bounce_len);
+ if (urb->num_sgs) {
+ len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
+ seg->bounce_len, seg->bounce_offs);
+ if (len != seg->bounce_len)
+ xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
+ len, seg->bounce_len);
+ } else {
+ memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf,
+ seg->bounce_len);
+ }
seg->bounce_len = 0;
seg->bounce_offs = 0;
}
@@ -2835,6 +2840,8 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
trb->field[0] = cpu_to_le32(field1);
trb->field[1] = cpu_to_le32(field2);
trb->field[2] = cpu_to_le32(field3);
+ /* make sure TRB is fully written before giving it to the controller */
+ wmb();
trb->field[3] = cpu_to_le32(field4);
trace_xhci_queue_trb(ring, trb);
@@ -3178,12 +3185,16 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
/* create a max max_pkt sized bounce buffer pointed to by last trb */
if (usb_urb_dir_out(urb)) {
- len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
- seg->bounce_buf, new_buff_len, enqd_len);
- if (len != new_buff_len)
- xhci_warn(xhci,
- "WARN Wrong bounce buffer write length: %zu != %d\n",
- len, new_buff_len);
+ if (urb->num_sgs) {
+ len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
+ seg->bounce_buf, new_buff_len, enqd_len);
+ if (len != new_buff_len)
+ xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n",
+ len, new_buff_len);
+ } else {
+ memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len);
+ }
+
seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
max_pkt, DMA_TO_DEVICE);
} else {
@@ -3331,8 +3342,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* New sg entry */
--num_sgs;
sent_len -= block_len;
- if (num_sgs != 0) {
- sg = sg_next(sg);
+ sg = sg_next(sg);
+ if (num_sgs != 0 && sg) {
block_len = sg_dma_len(sg);
addr = (u64) sg_dma_address(sg);
addr += sent_len;
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index fe37dacc695f..dc9cd8c519cf 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -578,6 +578,13 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra,
enable);
if (err < 0)
break;
+
+ /*
+ * wait 500us for LFPS detector to be disabled before
+ * sending ACK
+ */
+ if (!enable)
+ usleep_range(500, 1000);
}
if (err < 0) {
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 65cc362717fc..c4e3760abd5b 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -227,6 +227,7 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
int err, i;
u64 val;
+ u32 intrs;
/*
* Some Renesas controllers get into a weird state if they are
@@ -265,7 +266,10 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
if (upper_32_bits(val))
xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
- for (i = 0; i < HCS_MAX_INTRS(xhci->hcs_params1); i++) {
+ intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1),
+ ARRAY_SIZE(xhci->run_regs->ir_set));
+
+ for (i = 0; i < intrs; i++) {
struct xhci_intr_reg __iomem *ir;
ir = &xhci->run_regs->ir_set[i];
@@ -972,12 +976,15 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
xhci->shared_hcd->state != HC_STATE_SUSPENDED)
return -EINVAL;
- xhci_dbc_suspend(xhci);
-
/* Clear root port wake on bits if wakeup not allowed. */
if (!do_wakeup)
xhci_disable_port_wake_on_bits(xhci);
+ if (!HCD_HW_ACCESSIBLE(hcd))
+ return 0;
+
+ xhci_dbc_suspend(xhci);
+
/* Don't poll the roothubs on bus suspend. */
xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
@@ -1075,6 +1082,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
struct usb_hcd *secondary_hcd;
int retval = 0;
bool comp_timer_running = false;
+ bool pending_portevent = false;
if (!hcd->state)
return 0;
@@ -1147,8 +1155,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
xhci_dbg(xhci, "Stop HCD\n");
xhci_halt(xhci);
xhci_zero_64b_regs(xhci);
- xhci_reset(xhci);
+ retval = xhci_reset(xhci);
spin_unlock_irq(&xhci->lock);
+ if (retval)
+ return retval;
xhci_cleanup_msix(xhci);
xhci_dbg(xhci, "// Disabling event ring interrupts\n");
@@ -1211,13 +1221,22 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
done:
if (retval == 0) {
- /* Resume root hubs only when have pending events. */
- if (xhci_pending_portevent(xhci)) {
+ /*
+ * Resume roothubs only if there are pending events.
+ * USB 3 devices resend U3 LFPS wake after a 100ms delay if
+ * the first wake signalling failed, give it that chance.
+ */
+ pending_portevent = xhci_pending_portevent(xhci);
+ if (!pending_portevent) {
+ msleep(120);
+ pending_portevent = xhci_pending_portevent(xhci);
+ }
+
+ if (pending_portevent) {
usb_hcd_resume_root_hub(xhci->shared_hcd);
usb_hcd_resume_root_hub(hcd);
}
}
-
/*
* If system is subject to the Quirk, Compliance Mode Timer needs to
* be re-initialized Always after a system resume. Ports are subject
@@ -1355,7 +1374,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
* we need to issue an evaluate context command and wait on it.
*/
static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
- unsigned int ep_index, struct urb *urb)
+ unsigned int ep_index, struct urb *urb, gfp_t mem_flags)
{
struct xhci_container_ctx *out_ctx;
struct xhci_input_control_ctx *ctrl_ctx;
@@ -1386,7 +1405,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
* changes max packet sizes.
*/
- command = xhci_alloc_command(xhci, true, GFP_KERNEL);
+ command = xhci_alloc_command(xhci, true, mem_flags);
if (!command)
return -ENOMEM;
@@ -1403,6 +1422,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
xhci->devs[slot_id]->out_ctx, ep_index);
ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
+ ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */
ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
@@ -1482,7 +1502,7 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
*/
if (urb->dev->speed == USB_SPEED_FULL) {
ret = xhci_check_maxpacket(xhci, slot_id,
- ep_index, urb);
+ ep_index, urb, mem_flags);
if (ret < 0) {
xhci_urb_free_priv(urb_priv);
urb->hcpriv = NULL;
@@ -3132,6 +3152,14 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
/* config ep command clears toggle if add and drop ep flags are set */
ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx);
+ if (!ctrl_ctx) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_free_command(xhci, cfg_cmd);
+ xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+ __func__);
+ goto cleanup;
+ }
+
xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
ctrl_ctx, ep_flag, ep_flag);
xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
@@ -3151,10 +3179,11 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
wait_for_completion(cfg_cmd->completion);
- ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
xhci_free_command(xhci, cfg_cmd);
cleanup:
xhci_free_command(xhci, stop_cmd);
+ if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE)
+ ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
}
static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
@@ -4302,6 +4331,9 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
int hird, exit_latency;
int ret;
+ if (xhci->quirks & XHCI_HW_LPM_DISABLE)
+ return -EPERM;
+
if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
!udev->lpm_capable)
return -EPERM;
@@ -4324,7 +4356,7 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
enable ? "enable" : "disable", port_num + 1);
- if (enable && !(xhci->quirks & XHCI_HW_LPM_DISABLE)) {
+ if (enable) {
/* Host supports BESL timeout instead of HIRD */
if (udev->usb2_hw_lpm_besl_capable) {
/* if device doesn't have a preferred BESL value use a
@@ -4383,6 +4415,9 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
mutex_lock(hcd->bandwidth_mutex);
xhci_change_max_exit_latency(xhci, udev, 0);
mutex_unlock(hcd->bandwidth_mutex);
+ readl_poll_timeout(ports[port_num]->addr, pm_val,
+ (pm_val & PORT_PLS_MASK) == XDEV_U0,
+ 100, 10000);
return 0;
}
}
@@ -4545,19 +4580,19 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
{
unsigned long long timeout_ns;
+ if (xhci->quirks & XHCI_INTEL_HOST)
+ timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
+ else
+ timeout_ns = udev->u1_params.sel;
+
/* Prevent U1 if service interval is shorter than U1 exit latency */
if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
- if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
+ if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
return USB3_LPM_DISABLED;
}
}
- if (xhci->quirks & XHCI_INTEL_HOST)
- timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
- else
- timeout_ns = udev->u1_params.sel;
-
/* The U1 timeout is encoded in 1us intervals.
* Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
*/
@@ -4609,19 +4644,19 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
{
unsigned long long timeout_ns;
+ if (xhci->quirks & XHCI_INTEL_HOST)
+ timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
+ else
+ timeout_ns = udev->u2_params.sel;
+
/* Prevent U2 if service interval is shorter than U2 exit latency */
if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
- if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
+ if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
return USB3_LPM_DISABLED;
}
}
- if (xhci->quirks & XHCI_INTEL_HOST)
- timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
- else
- timeout_ns = udev->u2_params.sel;
-
/* The U2 timeout is encoded in 256us intervals */
timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
/* If the necessary timeout value is bigger than what we can set in the
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 4dedc822237f..7a4195f8cd1c 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -716,7 +716,7 @@ struct xhci_ep_ctx {
* 4 - TRB error
* 5-7 - reserved
*/
-#define EP_STATE_MASK (0xf)
+#define EP_STATE_MASK (0x7)
#define EP_STATE_DISABLED 0
#define EP_STATE_RUNNING 1
#define EP_STATE_HALTED 2
@@ -1872,6 +1872,7 @@ struct xhci_hcd {
#define XHCI_ZERO_64B_REGS BIT_ULL(32)
#define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34)
#define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35)
+#define XHCI_DISABLE_SPARSE BIT_ULL(38)
unsigned int num_active_eps;
unsigned int limit_active_eps;
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index b8073f36ffdc..62fdfde4ad03 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -209,6 +209,7 @@ static void adu_interrupt_out_callback(struct urb *urb)
if (status != 0) {
if ((status != -ENOENT) &&
+ (status != -ESHUTDOWN) &&
(status != -ECONNRESET)) {
dev_dbg(&dev->udev->dev,
"%s :nonzero status received: %d\n", __func__,
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 92875a264b14..9c1ca20d4139 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -2,8 +2,9 @@
/*
* Native support for the I/O-Warrior USB devices
*
- * Copyright (c) 2003-2005 Code Mercenaries GmbH
- * written by Christian Lucht <lucht@codemercs.com>
+ * Copyright (c) 2003-2005, 2020 Code Mercenaries GmbH
+ * written by Christian Lucht <lucht@codemercs.com> and
+ * Christoph Jung <jung@codemercs.com>
*
* based on
@@ -817,14 +818,28 @@ static int iowarrior_probe(struct usb_interface *interface,
/* we have to check the report_size often, so remember it in the endianness suitable for our machine */
dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint);
- if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) &&
- ((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) ||
- (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) ||
- (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) ||
- (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) ||
- (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100)))
- /* IOWarrior56 has wMaxPacketSize different from report size */
- dev->report_size = 7;
+
+ /*
+ * Some devices need the report size to be different than the
+ * endpoint size.
+ */
+ if (dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) {
+ switch (dev->product_id) {
+ case USB_DEVICE_ID_CODEMERCS_IOW56:
+ case USB_DEVICE_ID_CODEMERCS_IOW56AM:
+ dev->report_size = 7;
+ break;
+
+ case USB_DEVICE_ID_CODEMERCS_IOW28:
+ case USB_DEVICE_ID_CODEMERCS_IOW28L:
+ dev->report_size = 4;
+ break;
+
+ case USB_DEVICE_ID_CODEMERCS_IOW100:
+ dev->report_size = 13;
+ break;
+ }
+ }
/* create the urb and buffer for reading */
dev->int_in_urb = usb_alloc_urb(0, GFP_KERNEL);
diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c
index e5c03c6d16e9..d3b161bdef74 100644
--- a/drivers/usb/misc/lvstest.c
+++ b/drivers/usb/misc/lvstest.c
@@ -429,7 +429,7 @@ static int lvs_rh_probe(struct usb_interface *intf,
USB_DT_SS_HUB_SIZE, USB_CTRL_GET_TIMEOUT);
if (ret < (USB_DT_HUB_NONVAR_SIZE + 2)) {
dev_err(&hdev->dev, "wrong root hub descriptor read %d\n", ret);
- return ret;
+ return ret < 0 ? ret : -EINVAL;
}
/* submit urb to poll interrupt endpoint */
diff --git a/drivers/usb/misc/sisusbvga/Kconfig b/drivers/usb/misc/sisusbvga/Kconfig
index 36bc28c884ad..47dabccafef4 100644
--- a/drivers/usb/misc/sisusbvga/Kconfig
+++ b/drivers/usb/misc/sisusbvga/Kconfig
@@ -15,7 +15,7 @@ config USB_SISUSBVGA
config USB_SISUSBVGA_CON
bool "Text console and mode switching support" if USB_SISUSBVGA
- depends on VT
+ depends on VT && BROKEN
select FONT_8x16
---help---
Say Y here if you want a VGA text console via the USB dongle or
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index c4f6ac5f035e..4877bf82ad39 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -761,7 +761,7 @@ static int sisusb_write_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
u8 swap8, fromkern = kernbuffer ? 1 : 0;
u16 swap16;
u32 swap32, flag = (length >> 28) & 1;
- char buf[4];
+ u8 buf[4];
/* if neither kernbuffer not userbuffer are given, assume
* data in obuf
@@ -1199,18 +1199,18 @@ static int sisusb_read_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
/* High level: Gfx (indexed) register access */
#ifdef INCL_SISUSB_CON
-int sisusb_setreg(struct sisusb_usb_data *sisusb, int port, u8 data)
+int sisusb_setreg(struct sisusb_usb_data *sisusb, u32 port, u8 data)
{
return sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port, data);
}
-int sisusb_getreg(struct sisusb_usb_data *sisusb, int port, u8 *data)
+int sisusb_getreg(struct sisusb_usb_data *sisusb, u32 port, u8 *data)
{
return sisusb_read_memio_byte(sisusb, SISUSB_TYPE_IO, port, data);
}
#endif
-int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port,
+int sisusb_setidxreg(struct sisusb_usb_data *sisusb, u32 port,
u8 index, u8 data)
{
int ret;
@@ -1220,7 +1220,7 @@ int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port,
return ret;
}
-int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port,
+int sisusb_getidxreg(struct sisusb_usb_data *sisusb, u32 port,
u8 index, u8 *data)
{
int ret;
@@ -1230,7 +1230,7 @@ int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port,
return ret;
}
-int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, u8 idx,
+int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, u32 port, u8 idx,
u8 myand, u8 myor)
{
int ret;
@@ -1245,7 +1245,7 @@ int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, u8 idx,
}
static int sisusb_setidxregmask(struct sisusb_usb_data *sisusb,
- int port, u8 idx, u8 data, u8 mask)
+ u32 port, u8 idx, u8 data, u8 mask)
{
int ret;
u8 tmp;
@@ -1258,13 +1258,13 @@ static int sisusb_setidxregmask(struct sisusb_usb_data *sisusb,
return ret;
}
-int sisusb_setidxregor(struct sisusb_usb_data *sisusb, int port,
+int sisusb_setidxregor(struct sisusb_usb_data *sisusb, u32 port,
u8 index, u8 myor)
{
return sisusb_setidxregandor(sisusb, port, index, 0xff, myor);
}
-int sisusb_setidxregand(struct sisusb_usb_data *sisusb, int port,
+int sisusb_setidxregand(struct sisusb_usb_data *sisusb, u32 port,
u8 idx, u8 myand)
{
return sisusb_setidxregandor(sisusb, port, idx, myand, 0x00);
@@ -2787,8 +2787,8 @@ static loff_t sisusb_lseek(struct file *file, loff_t offset, int orig)
static int sisusb_handle_command(struct sisusb_usb_data *sisusb,
struct sisusb_command *y, unsigned long arg)
{
- int retval, port, length;
- u32 address;
+ int retval, length;
+ u32 port, address;
/* All our commands require the device
* to be initialized.
diff --git a/drivers/usb/misc/sisusbvga/sisusb_init.h b/drivers/usb/misc/sisusbvga/sisusb_init.h
index 1782c759c4ad..ace09985dae4 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_init.h
+++ b/drivers/usb/misc/sisusbvga/sisusb_init.h
@@ -812,17 +812,17 @@ static const struct SiS_VCLKData SiSUSB_VCLKData[] = {
int SiSUSBSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo);
int SiSUSBSetVESAMode(struct SiS_Private *SiS_Pr, unsigned short VModeNo);
-extern int sisusb_setreg(struct sisusb_usb_data *sisusb, int port, u8 data);
-extern int sisusb_getreg(struct sisusb_usb_data *sisusb, int port, u8 * data);
-extern int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port,
+extern int sisusb_setreg(struct sisusb_usb_data *sisusb, u32 port, u8 data);
+extern int sisusb_getreg(struct sisusb_usb_data *sisusb, u32 port, u8 * data);
+extern int sisusb_setidxreg(struct sisusb_usb_data *sisusb, u32 port,
u8 index, u8 data);
-extern int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port,
+extern int sisusb_getidxreg(struct sisusb_usb_data *sisusb, u32 port,
u8 index, u8 * data);
-extern int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port,
+extern int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, u32 port,
u8 idx, u8 myand, u8 myor);
-extern int sisusb_setidxregor(struct sisusb_usb_data *sisusb, int port,
+extern int sisusb_setidxregor(struct sisusb_usb_data *sisusb, u32 port,
u8 index, u8 myor);
-extern int sisusb_setidxregand(struct sisusb_usb_data *sisusb, int port,
+extern int sisusb_setidxregand(struct sisusb_usb_data *sisusb, u32 port,
u8 idx, u8 myand);
void sisusb_delete(struct kref *kref);
diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c
index b3e1f553954a..ed63e954970a 100644
--- a/drivers/usb/misc/trancevibrator.c
+++ b/drivers/usb/misc/trancevibrator.c
@@ -59,9 +59,9 @@ static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
/* Set speed */
retval = usb_control_msg(tv->udev, usb_sndctrlpipe(tv->udev, 0),
0x01, /* vendor request: set speed */
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
tv->speed, /* speed value */
- 0, NULL, 0, USB_CTRL_GET_TIMEOUT);
+ 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
if (retval) {
tv->speed = old;
dev_dbg(&tv->udev->dev, "retval = %d\n", retval);
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index c7f82310e73e..fc3fc9d48a55 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -2853,6 +2853,7 @@ static void usbtest_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
dev_dbg(&intf->dev, "disconnect\n");
+ kfree(dev->buf);
kfree(dev);
}
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index b5d661644263..748139d26263 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -736,6 +736,7 @@ static int uss720_probe(struct usb_interface *intf,
parport_announce_port(pp);
usb_set_intfdata(intf, pp);
+ usb_put_dev(usbdev);
return 0;
probe_abort:
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index be0505b8b5d4..08b72bb22b7e 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -492,11 +492,14 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
prepare_to_wait(&dev->waitq, &wait, TASK_INTERRUPTIBLE);
dev_dbg(&dev->interface->dev, "%s - submit %c\n", __func__,
dev->cntl_buffer[0]);
- retval = usb_submit_urb(dev->cntl_urb, GFP_KERNEL);
+ retval = usb_submit_urb(dev->cntl_urb, GFP_ATOMIC);
if (retval >= 0)
timeout = schedule_timeout(YUREX_WRITE_TIMEOUT);
finish_wait(&dev->waitq, &wait);
+ /* make sure URB is idle after timeout or (spurious) CMD_ACK */
+ usb_kill_urb(dev->cntl_urb);
+
mutex_unlock(&dev->io_mutex);
if (retval < 0) {
diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c
index 860693520132..408e964522ab 100644
--- a/drivers/usb/mtu3/mtu3_core.c
+++ b/drivers/usb/mtu3/mtu3_core.c
@@ -128,8 +128,12 @@ static void mtu3_device_disable(struct mtu3 *mtu)
mtu3_setbits(ibase, SSUSB_U2_CTRL(0),
SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN);
- if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG)
+ if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) {
mtu3_clrbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
+ if (mtu->is_u3_ip)
+ mtu3_clrbits(ibase, SSUSB_U3_CTRL(0),
+ SSUSB_U3_PORT_DUAL_MODE);
+ }
mtu3_setbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
}
diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c
index bbcd3332471d..3828ed6d38a2 100644
--- a/drivers/usb/mtu3/mtu3_gadget.c
+++ b/drivers/usb/mtu3/mtu3_gadget.c
@@ -573,6 +573,7 @@ static int mtu3_gadget_stop(struct usb_gadget *g)
spin_unlock_irqrestore(&mtu->lock, flags);
+ synchronize_irq(mtu->irq);
return 0;
}
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index b6b4f99a399c..2a874058dff1 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1868,10 +1868,14 @@ static void musb_pm_runtime_check_session(struct musb *musb)
MUSB_DEVCTL_HR;
switch (devctl & ~s) {
case MUSB_QUIRK_B_DISCONNECT_99:
- musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n");
- schedule_delayed_work(&musb->irq_work,
- msecs_to_jiffies(1000));
- break;
+ if (musb->quirk_retries && !musb->flush_irq_work) {
+ musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n");
+ schedule_delayed_work(&musb->irq_work,
+ msecs_to_jiffies(1000));
+ musb->quirk_retries--;
+ break;
+ }
+ /* fall through */
case MUSB_QUIRK_B_INVALID_VBUS_91:
if (musb->quirk_retries && !musb->flush_irq_work) {
musb_dbg(musb,
@@ -2108,32 +2112,35 @@ int musb_queue_resume_work(struct musb *musb,
{
struct musb_pending_work *w;
unsigned long flags;
+ bool is_suspended;
int error;
if (WARN_ON(!callback))
return -EINVAL;
- if (pm_runtime_active(musb->controller))
- return callback(musb, data);
+ spin_lock_irqsave(&musb->list_lock, flags);
+ is_suspended = musb->is_runtime_suspended;
- w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC);
- if (!w)
- return -ENOMEM;
+ if (is_suspended) {
+ w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC);
+ if (!w) {
+ error = -ENOMEM;
+ goto out_unlock;
+ }
+
+ w->callback = callback;
+ w->data = data;
- w->callback = callback;
- w->data = data;
- spin_lock_irqsave(&musb->list_lock, flags);
- if (musb->is_runtime_suspended) {
list_add_tail(&w->node, &musb->pending_list);
error = 0;
- } else {
- dev_err(musb->controller, "could not add resume work %p\n",
- callback);
- devm_kfree(musb->controller, w);
- error = -EINPROGRESS;
}
+
+out_unlock:
spin_unlock_irqrestore(&musb->list_lock, flags);
+ if (!is_suspended)
+ error = callback(musb, data);
+
return error;
}
EXPORT_SYMBOL_GPL(musb_queue_resume_work);
@@ -2737,6 +2744,13 @@ static int musb_resume(struct device *dev)
musb_enable_interrupts(musb);
musb_platform_enable(musb);
+ /* session might be disabled in suspend */
+ if (musb->port_mode == MUSB_HOST &&
+ !(musb->ops->quirks & MUSB_PRESERVE_SESSION)) {
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+ }
+
spin_lock_irqsave(&musb->lock, flags);
error = musb_run_resume_work(musb);
if (error)
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index f42858e2b54c..0c6204add616 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -168,6 +168,11 @@ static ssize_t musb_test_mode_write(struct file *file,
u8 test;
char buf[24];
+ memset(buf, 0x00, sizeof(buf));
+
+ if (copy_from_user(buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
pm_runtime_get_sync(musb->controller);
test = musb_readb(musb->mregs, MUSB_TESTMODE);
if (test) {
@@ -176,11 +181,6 @@ static ssize_t musb_test_mode_write(struct file *file,
goto ret;
}
- memset(buf, 0x00, sizeof(buf));
-
- if (copy_from_user(buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
- return -EFAULT;
-
if (strstarts(buf, "force host full-speed"))
test = MUSB_TEST_FORCE_HOST | MUSB_TEST_FORCE_FS;
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index aeb53ec5cc6a..6b92a57382db 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -126,6 +126,7 @@ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
}
usbhs_pipe_clear_without_sequence(pipe, 0, 0);
+ usbhs_pipe_running(pipe, 0);
__usbhsf_pkt_del(pkt);
}
diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c
index 9e5afdde1adb..40576e7176d8 100644
--- a/drivers/usb/renesas_usbhs/pipe.c
+++ b/drivers/usb/renesas_usbhs/pipe.c
@@ -746,6 +746,8 @@ struct usbhs_pipe *usbhs_pipe_malloc(struct usbhs_priv *priv,
void usbhs_pipe_free(struct usbhs_pipe *pipe)
{
+ usbhsp_pipe_select(pipe);
+ usbhsp_pipe_cfg_set(pipe, 0xFFFF, 0);
usbhsp_put_pipe(pipe);
}
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 955ab97b9b22..c87cb25e70ec 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -80,9 +80,12 @@
#define CH341_LCR_CS5 0x00
static const struct usb_device_id id_table[] = {
- { USB_DEVICE(0x4348, 0x5523) },
- { USB_DEVICE(0x1a86, 0x7523) },
+ { USB_DEVICE(0x1a86, 0x5512) },
{ USB_DEVICE(0x1a86, 0x5523) },
+ { USB_DEVICE(0x1a86, 0x7522) },
+ { USB_DEVICE(0x1a86, 0x7523) },
+ { USB_DEVICE(0x4348, 0x5523) },
+ { USB_DEVICE(0x9986, 0x7523) },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 7ae121567098..7d602e6ccbc0 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -61,6 +61,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
{ USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
+ { USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */
{ USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
{ USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
{ USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
@@ -145,6 +146,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
{ USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
{ USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
+ { USB_DEVICE(0x10C4, 0x88D8) }, /* Acuity Brands nLight Air Adapter */
{ USB_DEVICE(0x10C4, 0x88FB) }, /* CESINEL MEDCAL STII Network Analyzer */
{ USB_DEVICE(0x10C4, 0x8938) }, /* CESINEL MEDCAL S II Network Analyzer */
{ USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
@@ -201,6 +203,9 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
{ USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */
{ USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */
+ { USB_DEVICE(0x1901, 0x0197) }, /* GE CS1000 Display serial interface */
+ { USB_DEVICE(0x1901, 0x0198) }, /* GE CS1000 M.2 Key E serial interface */
+ { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */
{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
@@ -271,6 +276,8 @@ static struct usb_serial_driver cp210x_device = {
.break_ctl = cp210x_break_ctl,
.set_termios = cp210x_set_termios,
.tx_empty = cp210x_tx_empty,
+ .throttle = usb_serial_generic_throttle,
+ .unthrottle = usb_serial_generic_unthrottle,
.tiocmget = cp210x_tiocmget,
.tiocmset = cp210x_tiocmset,
.attach = cp210x_attach,
@@ -893,6 +900,7 @@ static void cp210x_get_termios_port(struct usb_serial_port *port,
u32 baud;
u16 bits;
u32 ctl_hs;
+ u32 flow_repl;
cp210x_read_u32_reg(port, CP210X_GET_BAUDRATE, &baud);
@@ -993,6 +1001,22 @@ static void cp210x_get_termios_port(struct usb_serial_port *port,
ctl_hs = le32_to_cpu(flow_ctl.ulControlHandshake);
if (ctl_hs & CP210X_SERIAL_CTS_HANDSHAKE) {
dev_dbg(dev, "%s - flow control = CRTSCTS\n", __func__);
+ /*
+ * When the port is closed, the CP210x hardware disables
+ * auto-RTS and RTS is deasserted but it leaves auto-CTS when
+ * in hardware flow control mode. When re-opening the port, if
+ * auto-CTS is enabled on the cp210x, then auto-RTS must be
+ * re-enabled in the driver.
+ */
+ flow_repl = le32_to_cpu(flow_ctl.ulFlowReplace);
+ flow_repl &= ~CP210X_SERIAL_RTS_MASK;
+ flow_repl |= CP210X_SERIAL_RTS_SHIFT(CP210X_SERIAL_RTS_FLOW_CTL);
+ flow_ctl.ulFlowReplace = cpu_to_le32(flow_repl);
+ cp210x_write_reg_block(port,
+ CP210X_SET_FLOW,
+ &flow_ctl,
+ sizeof(flow_ctl));
+
cflag |= CRTSCTS;
} else {
dev_dbg(dev, "%s - flow control = NONE\n", __func__);
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index ebd76ab07b72..36dd688b5795 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -357,11 +357,12 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
struct device *dev = &port->dev;
int status = urb->status;
unsigned long flags;
+ bool resubmitted = false;
- set_bit(0, &port->write_urbs_free);
if (status) {
dev_dbg(dev, "%s - nonzero write bulk status received: %d\n",
__func__, status);
+ set_bit(0, &port->write_urbs_free);
return;
}
@@ -394,6 +395,8 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
goto exit;
}
+ resubmitted = true;
+
dev_dbg(dev, "%s - priv->wrsent=%d\n", __func__, priv->wrsent);
dev_dbg(dev, "%s - priv->wrfilled=%d\n", __func__, priv->wrfilled);
@@ -410,6 +413,8 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
exit:
spin_unlock_irqrestore(&priv->lock, flags);
+ if (!resubmitted)
+ set_bit(0, &port->write_urbs_free);
usb_serial_port_softint(port);
}
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index 2c58649fd47a..4ed8de1e5991 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -59,6 +59,7 @@ static const struct usb_device_id id_table_earthmate[] = {
static const struct usb_device_id id_table_cyphidcomrs232[] = {
{ USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
+ { USB_DEVICE(VENDOR_ID_SAI, PRODUCT_ID_CYPHIDCOM) },
{ USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) },
{ USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) },
{ } /* Terminating entry */
@@ -73,6 +74,7 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) },
{ USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) },
{ USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
+ { USB_DEVICE(VENDOR_ID_SAI, PRODUCT_ID_CYPHIDCOM) },
{ USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) },
{ USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) },
{ USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) },
diff --git a/drivers/usb/serial/cypress_m8.h b/drivers/usb/serial/cypress_m8.h
index 35e223751c0e..16b7410ad057 100644
--- a/drivers/usb/serial/cypress_m8.h
+++ b/drivers/usb/serial/cypress_m8.h
@@ -25,6 +25,9 @@
#define VENDOR_ID_CYPRESS 0x04b4
#define PRODUCT_ID_CYPHIDCOM 0x5500
+/* Simply Automated HID->COM UPB PIM (using Cypress PID 0x5500) */
+#define VENDOR_ID_SAI 0x17dd
+
/* FRWD Dongle - a GPS sports watch */
#define VENDOR_ID_FRWD 0x6737
#define PRODUCT_ID_CYPHIDCOM_FRWD 0x0001
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index e7f244cf2c07..a618276ead98 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -19,7 +19,6 @@
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
-#include <linux/workqueue.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/wait.h>
@@ -198,14 +197,12 @@ struct digi_port {
int dp_throttle_restart;
wait_queue_head_t dp_flush_wait;
wait_queue_head_t dp_close_wait; /* wait queue for close */
- struct work_struct dp_wakeup_work;
struct usb_serial_port *dp_port;
};
/* Local Function Declarations */
-static void digi_wakeup_write_lock(struct work_struct *work);
static int digi_write_oob_command(struct usb_serial_port *port,
unsigned char *buf, int count, int interruptible);
static int digi_write_inb_command(struct usb_serial_port *port,
@@ -356,26 +353,6 @@ __releases(lock)
return timeout;
}
-
-/*
- * Digi Wakeup Write
- *
- * Wake up port, line discipline, and tty processes sleeping
- * on writes.
- */
-
-static void digi_wakeup_write_lock(struct work_struct *work)
-{
- struct digi_port *priv =
- container_of(work, struct digi_port, dp_wakeup_work);
- struct usb_serial_port *port = priv->dp_port;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->dp_port_lock, flags);
- tty_port_tty_wakeup(&port->port);
- spin_unlock_irqrestore(&priv->dp_port_lock, flags);
-}
-
/*
* Digi Write OOB Command
*
@@ -987,6 +964,7 @@ static void digi_write_bulk_callback(struct urb *urb)
unsigned long flags;
int ret = 0;
int status = urb->status;
+ bool wakeup;
/* port and serial sanity check */
if (port == NULL || (priv = usb_get_serial_port_data(port)) == NULL) {
@@ -1013,6 +991,7 @@ static void digi_write_bulk_callback(struct urb *urb)
}
/* try to send any buffered data on this port */
+ wakeup = true;
spin_lock_irqsave(&priv->dp_port_lock, flags);
priv->dp_write_urb_in_use = 0;
if (priv->dp_out_buf_len > 0) {
@@ -1028,19 +1007,18 @@ static void digi_write_bulk_callback(struct urb *urb)
if (ret == 0) {
priv->dp_write_urb_in_use = 1;
priv->dp_out_buf_len = 0;
+ wakeup = false;
}
}
- /* wake up processes sleeping on writes immediately */
- tty_port_tty_wakeup(&port->port);
- /* also queue up a wakeup at scheduler time, in case we */
- /* lost the race in write_chan(). */
- schedule_work(&priv->dp_wakeup_work);
-
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
+
if (ret && ret != -EPERM)
dev_err_console(port,
"%s: usb_submit_urb failed, ret=%d, port=%d\n",
__func__, ret, priv->dp_port_num);
+
+ if (wakeup)
+ tty_port_tty_wakeup(&port->port);
}
static int digi_write_room(struct tty_struct *tty)
@@ -1240,7 +1218,6 @@ static int digi_port_init(struct usb_serial_port *port, unsigned port_num)
init_waitqueue_head(&priv->dp_transmit_idle_wait);
init_waitqueue_head(&priv->dp_flush_wait);
init_waitqueue_head(&priv->dp_close_wait);
- INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock);
priv->dp_port = port;
init_waitqueue_head(&port->write_wait);
@@ -1509,13 +1486,14 @@ static int digi_read_oob_callback(struct urb *urb)
rts = C_CRTSCTS(tty);
if (tty && opcode == DIGI_CMD_READ_INPUT_SIGNALS) {
+ bool wakeup = false;
+
spin_lock_irqsave(&priv->dp_port_lock, flags);
/* convert from digi flags to termiox flags */
if (val & DIGI_READ_INPUT_SIGNALS_CTS) {
priv->dp_modem_signals |= TIOCM_CTS;
- /* port must be open to use tty struct */
if (rts)
- tty_port_tty_wakeup(&port->port);
+ wakeup = true;
} else {
priv->dp_modem_signals &= ~TIOCM_CTS;
/* port must be open to use tty struct */
@@ -1534,6 +1512,9 @@ static int digi_read_oob_callback(struct urb *urb)
priv->dp_modem_signals &= ~TIOCM_CD;
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
+
+ if (wakeup)
+ tty_port_tty_wakeup(&port->port);
} else if (opcode == DIGI_CMD_TRANSMIT_IDLE) {
spin_lock_irqsave(&priv->dp_port_lock, flags);
priv->dp_transmit_idle = 1;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 3c0f38cd3a5a..37575d7983c0 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -703,6 +703,7 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) },
{ USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) },
{ USB_DEVICE(XSENS_VID, XSENS_MTDEVBOARD_PID) },
+ { USB_DEVICE(XSENS_VID, XSENS_MTIUSBCONVERTER_PID) },
{ USB_DEVICE(XSENS_VID, XSENS_MTW_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
{ USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
@@ -1023,9 +1024,17 @@ static const struct usb_device_id id_table_combined[] = {
/* Sienna devices */
{ USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) },
{ USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) },
+ /* IDS GmbH devices */
+ { USB_DEVICE(IDS_VID, IDS_SI31A_PID) },
+ { USB_DEVICE(IDS_VID, IDS_CM31A_PID) },
/* U-Blox devices */
{ USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) },
{ USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) },
+ /* FreeCalypso USB adapters */
+ { USB_DEVICE(FTDI_VID, FTDI_FALCONIA_JTAG_BUF_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, FTDI_FALCONIA_JTAG_UNBUF_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ } /* Terminating entry */
};
@@ -1364,8 +1373,9 @@ static int change_speed(struct tty_struct *tty, struct usb_serial_port *port)
index_value = get_ftdi_divisor(tty, port);
value = (u16)index_value;
index = (u16)(index_value >> 16);
- if ((priv->chip_type == FT2232C) || (priv->chip_type == FT2232H) ||
- (priv->chip_type == FT4232H) || (priv->chip_type == FT232H)) {
+ if (priv->chip_type == FT2232C || priv->chip_type == FT2232H ||
+ priv->chip_type == FT4232H || priv->chip_type == FT232H ||
+ priv->chip_type == FTX) {
/* Probably the BM type needs the MSB of the encoded fractional
* divider also moved like for the chips above. Any infos? */
index = (u16)((index << 8) | priv->interface);
@@ -2037,12 +2047,11 @@ static int ftdi_prepare_write_buffer(struct usb_serial_port *port,
#define FTDI_RS_ERR_MASK (FTDI_RS_BI | FTDI_RS_PE | FTDI_RS_FE | FTDI_RS_OE)
static int ftdi_process_packet(struct usb_serial_port *port,
- struct ftdi_private *priv, char *packet, int len)
+ struct ftdi_private *priv, unsigned char *buf, int len)
{
+ unsigned char status;
int i;
- char status;
char flag;
- char *ch;
if (len < 2) {
dev_dbg(&port->dev, "malformed packet\n");
@@ -2052,7 +2061,7 @@ static int ftdi_process_packet(struct usb_serial_port *port,
/* Compare new line status to the old one, signal if different/
N.B. packet may be processed more than once, but differences
are only processed once. */
- status = packet[0] & FTDI_STATUS_B0_MASK;
+ status = buf[0] & FTDI_STATUS_B0_MASK;
if (status != priv->prev_status) {
char diff_status = status ^ priv->prev_status;
@@ -2078,13 +2087,12 @@ static int ftdi_process_packet(struct usb_serial_port *port,
}
/* save if the transmitter is empty or not */
- if (packet[1] & FTDI_RS_TEMT)
+ if (buf[1] & FTDI_RS_TEMT)
priv->transmit_empty = 1;
else
priv->transmit_empty = 0;
- len -= 2;
- if (!len)
+ if (len == 2)
return 0; /* status only */
/*
@@ -2092,40 +2100,41 @@ static int ftdi_process_packet(struct usb_serial_port *port,
* data payload to avoid over-reporting.
*/
flag = TTY_NORMAL;
- if (packet[1] & FTDI_RS_ERR_MASK) {
+ if (buf[1] & FTDI_RS_ERR_MASK) {
/* Break takes precedence over parity, which takes precedence
* over framing errors */
- if (packet[1] & FTDI_RS_BI) {
+ if (buf[1] & FTDI_RS_BI) {
flag = TTY_BREAK;
port->icount.brk++;
usb_serial_handle_break(port);
- } else if (packet[1] & FTDI_RS_PE) {
+ } else if (buf[1] & FTDI_RS_PE) {
flag = TTY_PARITY;
port->icount.parity++;
- } else if (packet[1] & FTDI_RS_FE) {
+ } else if (buf[1] & FTDI_RS_FE) {
flag = TTY_FRAME;
port->icount.frame++;
}
/* Overrun is special, not associated with a char */
- if (packet[1] & FTDI_RS_OE) {
+ if (buf[1] & FTDI_RS_OE) {
port->icount.overrun++;
tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
}
}
- port->icount.rx += len;
- ch = packet + 2;
+ port->icount.rx += len - 2;
if (port->port.console && port->sysrq) {
- for (i = 0; i < len; i++, ch++) {
- if (!usb_serial_handle_sysrq_char(port, *ch))
- tty_insert_flip_char(&port->port, *ch, flag);
+ for (i = 2; i < len; i++) {
+ if (usb_serial_handle_sysrq_char(port, buf[i]))
+ continue;
+ tty_insert_flip_char(&port->port, buf[i], flag);
}
} else {
- tty_insert_flip_string_fixed_flag(&port->port, ch, flag, len);
+ tty_insert_flip_string_fixed_flag(&port->port, buf + 2, flag,
+ len - 2);
}
- return len;
+ return len - 2;
}
static void ftdi_process_read_urb(struct urb *urb)
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index e8373528264c..d854e04a4286 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -39,6 +39,13 @@
#define FTDI_LUMEL_PD12_PID 0x6002
+/*
+ * Custom USB adapters made by Falconia Partners LLC
+ * for FreeCalypso project, ID codes allocated to Falconia by FTDI.
+ */
+#define FTDI_FALCONIA_JTAG_BUF_PID 0x7150
+#define FTDI_FALCONIA_JTAG_UNBUF_PID 0x7151
+
/* Sienna Serial Interface by Secyourit GmbH */
#define FTDI_SIENNA_PID 0x8348
@@ -160,6 +167,7 @@
#define XSENS_AWINDA_DONGLE_PID 0x0102
#define XSENS_MTW_PID 0x0200 /* Xsens MTw */
#define XSENS_MTDEVBOARD_PID 0x0300 /* Motion Tracker Development Board */
+#define XSENS_MTIUSBCONVERTER_PID 0x0301 /* MTi USB converter */
#define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */
/* Xsens devices using FTDI VID */
@@ -1560,6 +1568,13 @@
#define UNJO_ISODEBUG_V1_PID 0x150D
/*
+ * IDS GmbH
+ */
+#define IDS_VID 0x2CAF
+#define IDS_SI31A_PID 0x13A2
+#define IDS_CM31A_PID 0x13A3
+
+/*
* U-Blox products (http://www.u-blox.com).
*/
#define UBLOX_VID 0x1546
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 633550ec3025..f29c3a936a08 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -1138,8 +1138,8 @@ static void garmin_read_process(struct garmin_data *garmin_data_p,
send it directly to the tty port */
if (garmin_data_p->flags & FLAGS_QUEUING) {
pkt_add(garmin_data_p, data, data_length);
- } else if (bulk_data ||
- getLayerId(data) == GARMIN_LAYERID_APPL) {
+ } else if (bulk_data || (data_length >= sizeof(u32) &&
+ getLayerId(data) == GARMIN_LAYERID_APPL)) {
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= APP_RESP_SEEN;
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index dab8c18e726f..44c902c7d240 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -3021,26 +3021,32 @@ static int edge_startup(struct usb_serial *serial)
response = -ENODEV;
}
- usb_free_urb(edge_serial->interrupt_read_urb);
- kfree(edge_serial->interrupt_in_buffer);
-
- usb_free_urb(edge_serial->read_urb);
- kfree(edge_serial->bulk_in_buffer);
-
- kfree(edge_serial);
-
- return response;
+ goto error;
}
/* start interrupt read for this edgeport this interrupt will
* continue as long as the edgeport is connected */
response = usb_submit_urb(edge_serial->interrupt_read_urb,
GFP_KERNEL);
- if (response)
+ if (response) {
dev_err(ddev, "%s - Error %d submitting control urb\n",
__func__, response);
+
+ goto error;
+ }
}
return response;
+
+error:
+ usb_free_urb(edge_serial->interrupt_read_urb);
+ kfree(edge_serial->interrupt_in_buffer);
+
+ usb_free_urb(edge_serial->read_urb);
+ kfree(edge_serial->bulk_in_buffer);
+
+ kfree(edge_serial);
+
+ return response;
}
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 449e89db9cea..6dc93652afc1 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -353,10 +353,11 @@ static void iuu_led_activity_on(struct urb *urb)
struct usb_serial_port *port = urb->context;
int result;
char *buf_ptr = port->write_urb->transfer_buffer;
- *buf_ptr++ = IUU_SET_LED;
+
if (xmas) {
- get_random_bytes(buf_ptr, 6);
- *(buf_ptr+7) = 1;
+ buf_ptr[0] = IUU_SET_LED;
+ get_random_bytes(buf_ptr + 1, 6);
+ buf_ptr[7] = 1;
} else {
iuu_rgbf_fill_buffer(buf_ptr, 255, 255, 0, 0, 0, 0, 255);
}
@@ -374,13 +375,14 @@ static void iuu_led_activity_off(struct urb *urb)
struct usb_serial_port *port = urb->context;
int result;
char *buf_ptr = port->write_urb->transfer_buffer;
+
if (xmas) {
iuu_rxcmd(urb);
return;
- } else {
- *buf_ptr++ = IUU_SET_LED;
- iuu_rgbf_fill_buffer(buf_ptr, 0, 0, 255, 255, 0, 0, 255);
}
+
+ iuu_rgbf_fill_buffer(buf_ptr, 0, 0, 255, 255, 0, 0, 255);
+
usb_fill_bulk_urb(port->write_urb, port->serial->dev,
usb_sndbulkpipe(port->serial->dev,
port->bulk_out_endpointAddress),
@@ -534,23 +536,29 @@ static int iuu_uart_flush(struct usb_serial_port *port)
struct device *dev = &port->dev;
int i;
int status;
- u8 rxcmd = IUU_UART_RX;
+ u8 *rxcmd;
struct iuu_private *priv = usb_get_serial_port_data(port);
if (iuu_led(port, 0xF000, 0, 0, 0xFF) < 0)
return -EIO;
+ rxcmd = kmalloc(1, GFP_KERNEL);
+ if (!rxcmd)
+ return -ENOMEM;
+
+ rxcmd[0] = IUU_UART_RX;
+
for (i = 0; i < 2; i++) {
- status = bulk_immediate(port, &rxcmd, 1);
+ status = bulk_immediate(port, rxcmd, 1);
if (status != IUU_OPERATION_OK) {
dev_dbg(dev, "%s - uart_flush_write error\n", __func__);
- return status;
+ goto out_free;
}
status = read_immediate(port, &priv->len, 1);
if (status != IUU_OPERATION_OK) {
dev_dbg(dev, "%s - uart_flush_read error\n", __func__);
- return status;
+ goto out_free;
}
if (priv->len > 0) {
@@ -558,12 +566,16 @@ static int iuu_uart_flush(struct usb_serial_port *port)
status = read_immediate(port, priv->buf, priv->len);
if (status != IUU_OPERATION_OK) {
dev_dbg(dev, "%s - uart_flush_read error\n", __func__);
- return status;
+ goto out_free;
}
}
}
dev_dbg(dev, "%s - uart_flush_read OK!\n", __func__);
iuu_led(port, 0, 0xF000, 0, 0xFF);
+
+out_free:
+ kfree(rxcmd);
+
return status;
}
@@ -697,14 +709,16 @@ static int iuu_uart_write(struct tty_struct *tty, struct usb_serial_port *port,
struct iuu_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
- if (count > 256)
- return -ENOMEM;
-
spin_lock_irqsave(&priv->lock, flags);
+ count = min(count, 256 - priv->writelen);
+ if (count == 0)
+ goto out;
+
/* fill the buffer */
memcpy(priv->writebuf + priv->writelen, buf, count);
priv->writelen += count;
+out:
spin_unlock_irqrestore(&priv->lock, flags);
return count;
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 38d43c4b7ce5..766969cf72b2 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -40,11 +40,12 @@
#define DRIVER_AUTHOR "Brian Warner <warner@lothar.com>"
#define DRIVER_DESC "USB Keyspan PDA Converter driver"
+#define KEYSPAN_TX_THRESHOLD 16
+
struct keyspan_pda_private {
int tx_room;
int tx_throttled;
- struct work_struct wakeup_work;
- struct work_struct unthrottle_work;
+ struct work_struct unthrottle_work;
struct usb_serial *serial;
struct usb_serial_port *port;
};
@@ -97,15 +98,6 @@ static const struct usb_device_id id_table_fake_xircom[] = {
};
#endif
-static void keyspan_pda_wakeup_write(struct work_struct *work)
-{
- struct keyspan_pda_private *priv =
- container_of(work, struct keyspan_pda_private, wakeup_work);
- struct usb_serial_port *port = priv->port;
-
- tty_port_tty_wakeup(&port->port);
-}
-
static void keyspan_pda_request_unthrottle(struct work_struct *work)
{
struct keyspan_pda_private *priv =
@@ -120,7 +112,7 @@ static void keyspan_pda_request_unthrottle(struct work_struct *work)
7, /* request_unthrottle */
USB_TYPE_VENDOR | USB_RECIP_INTERFACE
| USB_DIR_OUT,
- 16, /* value: threshold */
+ KEYSPAN_TX_THRESHOLD,
0, /* index */
NULL,
0,
@@ -139,6 +131,8 @@ static void keyspan_pda_rx_interrupt(struct urb *urb)
int retval;
int status = urb->status;
struct keyspan_pda_private *priv;
+ unsigned long flags;
+
priv = usb_get_serial_port_data(port);
switch (status) {
@@ -172,18 +166,21 @@ static void keyspan_pda_rx_interrupt(struct urb *urb)
break;
case 1:
/* status interrupt */
- if (len < 3) {
+ if (len < 2) {
dev_warn(&port->dev, "short interrupt message received\n");
break;
}
- dev_dbg(&port->dev, "rx int, d1=%d, d2=%d\n", data[1], data[2]);
+ dev_dbg(&port->dev, "rx int, d1=%d\n", data[1]);
switch (data[1]) {
case 1: /* modemline change */
break;
case 2: /* tx unthrottle interrupt */
+ spin_lock_irqsave(&port->lock, flags);
priv->tx_throttled = 0;
+ priv->tx_room = max(priv->tx_room, KEYSPAN_TX_THRESHOLD);
+ spin_unlock_irqrestore(&port->lock, flags);
/* queue up a wakeup at scheduler time */
- schedule_work(&priv->wakeup_work);
+ usb_serial_port_softint(port);
break;
default:
break;
@@ -443,6 +440,7 @@ static int keyspan_pda_write(struct tty_struct *tty,
int request_unthrottle = 0;
int rc = 0;
struct keyspan_pda_private *priv;
+ unsigned long flags;
priv = usb_get_serial_port_data(port);
/* guess how much room is left in the device's ring buffer, and if we
@@ -462,13 +460,13 @@ static int keyspan_pda_write(struct tty_struct *tty,
the TX urb is in-flight (wait until it completes)
the device is full (wait until it says there is room)
*/
- spin_lock_bh(&port->lock);
+ spin_lock_irqsave(&port->lock, flags);
if (!test_bit(0, &port->write_urbs_free) || priv->tx_throttled) {
- spin_unlock_bh(&port->lock);
+ spin_unlock_irqrestore(&port->lock, flags);
return 0;
}
clear_bit(0, &port->write_urbs_free);
- spin_unlock_bh(&port->lock);
+ spin_unlock_irqrestore(&port->lock, flags);
/* At this point the URB is in our control, nobody else can submit it
again (the only sudden transition was the one from EINPROGRESS to
@@ -514,7 +512,8 @@ static int keyspan_pda_write(struct tty_struct *tty,
goto exit;
}
}
- if (count > priv->tx_room) {
+
+ if (count >= priv->tx_room) {
/* we're about to completely fill the Tx buffer, so
we'll be throttled afterwards. */
count = priv->tx_room;
@@ -547,7 +546,7 @@ static int keyspan_pda_write(struct tty_struct *tty,
rc = count;
exit:
- if (rc < 0)
+ if (rc <= 0)
set_bit(0, &port->write_urbs_free);
return rc;
}
@@ -556,27 +555,28 @@ exit:
static void keyspan_pda_write_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
- struct keyspan_pda_private *priv;
set_bit(0, &port->write_urbs_free);
- priv = usb_get_serial_port_data(port);
/* queue up a wakeup at scheduler time */
- schedule_work(&priv->wakeup_work);
+ usb_serial_port_softint(port);
}
static int keyspan_pda_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
- struct keyspan_pda_private *priv;
- priv = usb_get_serial_port_data(port);
- /* used by n_tty.c for processing of tabs and such. Giving it our
- conservative guess is probably good enough, but needs testing by
- running a console through the device. */
- return priv->tx_room;
-}
+ struct keyspan_pda_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+ int room = 0;
+ spin_lock_irqsave(&port->lock, flags);
+ if (test_bit(0, &port->write_urbs_free) && !priv->tx_throttled)
+ room = priv->tx_room;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return room;
+}
static int keyspan_pda_chars_in_buffer(struct tty_struct *tty)
{
@@ -656,8 +656,12 @@ error:
}
static void keyspan_pda_close(struct usb_serial_port *port)
{
+ struct keyspan_pda_private *priv = usb_get_serial_port_data(port);
+
usb_kill_urb(port->write_urb);
usb_kill_urb(port->interrupt_in_urb);
+
+ cancel_work_sync(&priv->unthrottle_work);
}
@@ -715,7 +719,6 @@ static int keyspan_pda_port_probe(struct usb_serial_port *port)
if (!priv)
return -ENOMEM;
- INIT_WORK(&priv->wakeup_work, keyspan_pda_wakeup_write);
INIT_WORK(&priv->unthrottle_work, keyspan_pda_request_unthrottle);
priv->serial = port->serial;
priv->port = port;
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index 5ee48b0650c4..5f6b82ebccc5 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -276,12 +276,12 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
priv->cfg.unknown2 = cfg->unknown2;
spin_unlock_irqrestore(&priv->lock, flags);
+ kfree(cfg);
+
/* READ_ON and urb submission */
rc = usb_serial_generic_open(tty, port);
- if (rc) {
- retval = rc;
- goto err_free_cfg;
- }
+ if (rc)
+ return rc;
rc = usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
@@ -324,8 +324,6 @@ err_disable_read:
KLSI_TIMEOUT);
err_generic_close:
usb_serial_generic_close(port);
-err_free_cfg:
- kfree(cfg);
return retval;
}
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index c0232b67a40f..1e3ee2bfbcd0 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -638,6 +638,8 @@ static void parport_mos7715_restore_state(struct parport *pp,
spin_unlock(&release_lock);
return;
}
+ mos_parport->shadowDCR = s->u.pc.ctr;
+ mos_parport->shadowECR = s->u.pc.ecr;
write_parport_reg_nonblock(mos_parport, MOS7720_DCR,
mos_parport->shadowDCR);
write_parport_reg_nonblock(mos_parport, MOS7720_ECR,
@@ -1248,8 +1250,10 @@ static int mos7720_write(struct tty_struct *tty, struct usb_serial_port *port,
if (urb->transfer_buffer == NULL) {
urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
GFP_ATOMIC);
- if (!urb->transfer_buffer)
+ if (!urb->transfer_buffer) {
+ bytes_sent = -ENOMEM;
goto exit;
+ }
}
transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE);
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 4a7bd26841af..c78dfb7fd394 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1340,8 +1340,10 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
if (urb->transfer_buffer == NULL) {
urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
GFP_ATOMIC);
- if (!urb->transfer_buffer)
+ if (!urb->transfer_buffer) {
+ bytes_sent = -ENOMEM;
goto exit;
+ }
}
transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 8dad374ec81d..5e3430d456f3 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -245,10 +245,12 @@ static void option_instat_callback(struct urb *urb);
/* These Quectel products use Quectel's vendor ID */
#define QUECTEL_PRODUCT_EC21 0x0121
#define QUECTEL_PRODUCT_EC25 0x0125
+#define QUECTEL_PRODUCT_EG95 0x0195
#define QUECTEL_PRODUCT_BG96 0x0296
#define QUECTEL_PRODUCT_EP06 0x0306
#define QUECTEL_PRODUCT_EM12 0x0512
#define QUECTEL_PRODUCT_RM500Q 0x0800
+#define QUECTEL_PRODUCT_EC200T 0x6026
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
@@ -417,11 +419,14 @@ static void option_instat_callback(struct urb *urb);
#define CINTERION_PRODUCT_PH8 0x0053
#define CINTERION_PRODUCT_AHXX 0x0055
#define CINTERION_PRODUCT_PLXX 0x0060
+#define CINTERION_PRODUCT_EXS82 0x006c
#define CINTERION_PRODUCT_PH8_2RMNET 0x0082
#define CINTERION_PRODUCT_PH8_AUDIO 0x0083
#define CINTERION_PRODUCT_AHXX_2RMNET 0x0084
#define CINTERION_PRODUCT_AHXX_AUDIO 0x0085
#define CINTERION_PRODUCT_CLS8 0x00b0
+#define CINTERION_PRODUCT_MV31_MBIM 0x00b3
+#define CINTERION_PRODUCT_MV31_RMNET 0x00b7
/* Olivetti products */
#define OLIVETTI_VENDOR_ID 0x0b3c
@@ -527,6 +532,7 @@ static void option_instat_callback(struct urb *urb);
/* Cellient products */
#define CELLIENT_VENDOR_ID 0x2692
#define CELLIENT_PRODUCT_MEN200 0x9005
+#define CELLIENT_PRODUCT_MPL200 0x9025
/* Hyundai Petatel Inc. products */
#define PETATEL_VENDOR_ID 0x1ff4
@@ -559,6 +565,9 @@ static void option_instat_callback(struct urb *urb);
/* Device flags */
+/* Highest interface number which can be used with NCTRL() and RSVD() */
+#define FLAG_IFNUM_MAX 7
+
/* Interface does not support modem-control requests */
#define NCTRL(ifnum) ((BIT(ifnum) & 0xff) << 8)
@@ -1093,10 +1102,15 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M),
.driver_info = RSVD(1) | RSVD(3) },
/* Quectel products using Quectel vendor ID */
- { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
- .driver_info = RSVD(4) },
- { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
- .driver_info = RSVD(4) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0xff, 0xff),
+ .driver_info = NUMEP2 },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0xff, 0xff),
+ .driver_info = NUMEP2 },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff),
+ .driver_info = NUMEP2 },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
@@ -1105,10 +1119,13 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0xff, 0x30) }, /* EM160R-GL */
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
.driver_info = ZLP },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
@@ -1157,6 +1174,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1031, 0xff), /* Telit LE910C1-EUX */
+ .driver_info = NCTRL(0) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1033, 0xff), /* Telit LE910C1-EUX (ECM) */
+ .driver_info = NCTRL(0) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
.driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
@@ -1175,6 +1196,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1053, 0xff), /* Telit FN980 (ECM) */
.driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1054, 0xff), /* Telit FT980-KS */
+ .driver_info = NCTRL(2) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1055, 0xff), /* Telit FN980 (PCIe) */
+ .driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -1187,6 +1212,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1203, 0xff), /* Telit LE910Cx (RNDIS) */
+ .driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
@@ -1201,6 +1228,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1230, 0xff), /* Telit LE910Cx (rmnet) */
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1231, 0xff), /* Telit LE910Cx (RNDIS) */
+ .driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x1260),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x1261),
@@ -1209,6 +1240,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */
.driver_info = NCTRL(0) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7010, 0xff), /* Telit LE910-S1 (RNDIS) */
+ .driver_info = NCTRL(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff), /* Telit LE910-S1 (ECM) */
+ .driver_info = NCTRL(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
.driver_info = NCTRL(0) | ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
@@ -1538,7 +1573,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1274, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1275, 0xff, 0xff, 0xff) },
+ { USB_DEVICE(ZTE_VENDOR_ID, 0x1275), /* ZTE P685M */
+ .driver_info = RSVD(3) | RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1276, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1277, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1278, 0xff, 0xff, 0xff) },
@@ -1812,6 +1848,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */
{ USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9011, 0xff), /* Simcom SIM7500/SIM7600 RNDIS mode */
.driver_info = RSVD(7) },
+ { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9205, 0xff) }, /* Simcom SIM7070/SIM7080/SIM7090 AT+ECM mode */
+ { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9206, 0xff) }, /* Simcom SIM7070/SIM7080/SIM7090 AT-only mode */
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
.driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
@@ -1876,12 +1914,17 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_CLS8, 0xff),
.driver_info = RSVD(0) | RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EXS82, 0xff) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_MBIM, 0xff),
+ .driver_info = RSVD(3)},
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff),
+ .driver_info = RSVD(0)},
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
.driver_info = RSVD(4) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
@@ -1969,6 +2012,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
+ { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MPL200),
+ .driver_info = RSVD(1) | RSVD(4) },
{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) },
{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
{ USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) }, /* TP-Link LTE Module */
@@ -2018,12 +2063,17 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
- { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */
+ { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */
.driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
{ USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
.driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
.driver_info = RSVD(6) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
+ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
+ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
+ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
@@ -2067,6 +2117,14 @@ static struct usb_serial_driver * const serial_drivers[] = {
module_usb_serial_driver(serial_drivers, option_ids);
+static bool iface_is_reserved(unsigned long device_flags, u8 ifnum)
+{
+ if (ifnum > FLAG_IFNUM_MAX)
+ return false;
+
+ return device_flags & RSVD(ifnum);
+}
+
static int option_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
@@ -2083,7 +2141,7 @@ static int option_probe(struct usb_serial *serial,
* the same class/subclass/protocol as the serial interfaces. Look at
* the Windows driver .INF files for reserved interface numbers.
*/
- if (device_flags & RSVD(iface_desc->bInterfaceNumber))
+ if (iface_is_reserved(device_flags, iface_desc->bInterfaceNumber))
return -ENODEV;
/*
@@ -2099,6 +2157,14 @@ static int option_probe(struct usb_serial *serial,
return 0;
}
+static bool iface_no_modem_control(unsigned long device_flags, u8 ifnum)
+{
+ if (ifnum > FLAG_IFNUM_MAX)
+ return false;
+
+ return device_flags & NCTRL(ifnum);
+}
+
static int option_attach(struct usb_serial *serial)
{
struct usb_interface_descriptor *iface_desc;
@@ -2114,7 +2180,7 @@ static int option_attach(struct usb_serial *serial)
iface_desc = &serial->interface->cur_altsetting->desc;
- if (!(device_flags & NCTRL(iface_desc->bInterfaceNumber)))
+ if (!iface_no_modem_control(device_flags, iface_desc->bInterfaceNumber))
data->use_send_setup = 1;
if (device_flags & ZLP)
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 7751b94ac7f5..7df0fbede21b 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -94,6 +94,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD220TA_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD381_PRODUCT_ID) },
+ { USB_DEVICE(HP_VENDOR_ID, HP_LD381GC_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD960TA_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },
@@ -106,6 +107,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
{ USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
{ USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
+ { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530GC_PRODUCT_ID) },
{ USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
{ USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) },
{ } /* Terminating entry */
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index c98db6b650a5..3e5442573fe4 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -121,6 +121,7 @@
/* Hewlett-Packard POS Pole Displays */
#define HP_VENDOR_ID 0x03f0
+#define HP_LD381GC_PRODUCT_ID 0x0183
#define HP_LM920_PRODUCT_ID 0x026b
#define HP_TD620_PRODUCT_ID 0x0956
#define HP_LD960_PRODUCT_ID 0x0b39
@@ -151,6 +152,7 @@
/* ADLINK ND-6530 RS232,RS485 and RS422 adapter */
#define ADLINK_VENDOR_ID 0x0b63
#define ADLINK_ND6530_PRODUCT_ID 0x6530
+#define ADLINK_ND6530GC_PRODUCT_ID 0x653a
/* SMART USB Serial Adapter */
#define SMART_VENDOR_ID 0x0b8c
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 613f91add03d..0f60363c1bbc 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -155,6 +155,7 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */
+ {DEVICE_SWI(0x1199, 0x9062)}, /* Sierra Wireless EM7305 QDL */
{DEVICE_SWI(0x1199, 0x9063)}, /* Sierra Wireless EM7305 */
{DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */
{DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */
@@ -173,6 +174,8 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */
{DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */
+ {DEVICE_SWI(0x413c, 0x81cb)}, /* Dell Wireless 5816e QDL */
+ {DEVICE_SWI(0x413c, 0x81cc)}, /* Dell Wireless 5816e */
{DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */
{DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */
{DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index c9201e0a8241..ccd516ea2962 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -37,6 +37,7 @@
/* Vendor and product ids */
#define TI_VENDOR_ID 0x0451
#define IBM_VENDOR_ID 0x04b3
+#define STARTECH_VENDOR_ID 0x14b0
#define TI_3410_PRODUCT_ID 0x3410
#define IBM_4543_PRODUCT_ID 0x4543
#define IBM_454B_PRODUCT_ID 0x454b
@@ -374,6 +375,7 @@ static const struct usb_device_id ti_id_table_3410[] = {
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) },
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) },
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) },
+ { USB_DEVICE(STARTECH_VENDOR_ID, TI_3410_PRODUCT_ID) },
{ } /* terminator */
};
@@ -412,6 +414,7 @@ static const struct usb_device_id ti_id_table_combined[] = {
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) },
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) },
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) },
+ { USB_DEVICE(STARTECH_VENDOR_ID, TI_3410_PRODUCT_ID) },
{ } /* terminator */
};
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 35406cb4a726..997ff88ec04b 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -299,6 +299,10 @@ static void usb_wwan_indat_callback(struct urb *urb)
if (status) {
dev_dbg(dev, "%s: nonzero status: %d on endpoint %02x.\n",
__func__, status, endpoint);
+
+ /* don't resubmit on fatal errors */
+ if (status == -ESHUTDOWN || status == -ENOENT)
+ return;
} else {
if (urb->actual_length) {
tty_insert_flip_string(&port->port, data,
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 96cb0409dd89..737b765d0f6e 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -651,6 +651,13 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
need_auto_sense = 1;
}
+ /* Some devices (Kindle) require another command after SYNC CACHE */
+ if ((us->fflags & US_FL_SENSE_AFTER_SYNC) &&
+ srb->cmnd[0] == SYNCHRONIZE_CACHE) {
+ usb_stor_dbg(us, "-- sense after SYNC CACHE\n");
+ need_auto_sense = 1;
+ }
+
/*
* If we have a failure, we're going to do a REQUEST_SENSE
* automatically. Note that we differentiate between a command
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 62ca8e29da48..1fc7143c35a3 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -81,6 +81,19 @@ static void uas_free_streams(struct uas_dev_info *devinfo);
static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *prefix,
int status);
+/*
+ * This driver needs its own workqueue, as we need to control memory allocation.
+ *
+ * In the course of error handling and power management uas_wait_for_pending_cmnds()
+ * needs to flush pending work items. In these contexts we cannot allocate memory
+ * by doing block IO as we would deadlock. For the same reason we cannot wait
+ * for anything allocating memory not heeding these constraints.
+ *
+ * So we have to control all work items that can be on the workqueue we flush.
+ * Hence we cannot share a queue and need our own.
+ */
+static struct workqueue_struct *workqueue;
+
static void uas_do_work(struct work_struct *work)
{
struct uas_dev_info *devinfo =
@@ -109,7 +122,7 @@ static void uas_do_work(struct work_struct *work)
if (!err)
cmdinfo->state &= ~IS_IN_WORK_LIST;
else
- schedule_work(&devinfo->work);
+ queue_work(workqueue, &devinfo->work);
}
out:
spin_unlock_irqrestore(&devinfo->lock, flags);
@@ -134,7 +147,7 @@ static void uas_add_work(struct uas_cmd_info *cmdinfo)
lockdep_assert_held(&devinfo->lock);
cmdinfo->state |= IS_IN_WORK_LIST;
- schedule_work(&devinfo->work);
+ queue_work(workqueue, &devinfo->work);
}
static void uas_zap_pending(struct uas_dev_info *devinfo, int result)
@@ -190,6 +203,9 @@ static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *prefix,
struct uas_cmd_info *ci = (void *)&cmnd->SCp;
struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+ if (status == -ENODEV) /* too late */
+ return;
+
scmd_printk(KERN_INFO, cmnd,
"%s %d uas-tag %d inflight:%s%s%s%s%s%s%s%s%s%s%s%s ",
prefix, status, cmdinfo->uas_tag,
@@ -653,8 +669,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
if (devinfo->resetting) {
cmnd->result = DID_ERROR << 16;
cmnd->scsi_done(cmnd);
- spin_unlock_irqrestore(&devinfo->lock, flags);
- return 0;
+ goto zombie;
}
/* Find a free uas-tag */
@@ -690,6 +705,16 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
cmdinfo->state &= ~(SUBMIT_DATA_IN_URB | SUBMIT_DATA_OUT_URB);
err = uas_submit_urbs(cmnd, devinfo);
+ /*
+ * in case of fatal errors the SCSI layer is peculiar
+ * a command that has finished is a success for the purpose
+ * of queueing, no matter how fatal the error
+ */
+ if (err == -ENODEV) {
+ cmnd->result = DID_ERROR << 16;
+ cmnd->scsi_done(cmnd);
+ goto zombie;
+ }
if (err) {
/* If we did nothing, give up now */
if (cmdinfo->state & SUBMIT_STATUS_URB) {
@@ -700,6 +725,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
}
devinfo->cmnd[idx] = cmnd;
+zombie:
spin_unlock_irqrestore(&devinfo->lock, flags);
return 0;
}
@@ -848,6 +874,9 @@ static int uas_slave_configure(struct scsi_device *sdev)
if (devinfo->flags & US_FL_NO_READ_CAPACITY_16)
sdev->no_read_capacity_16 = 1;
+ /* Some disks cannot handle WRITE_SAME */
+ if (devinfo->flags & US_FL_NO_SAME)
+ sdev->no_write_same = 1;
/*
* Some disks return the total number of blocks in response
* to READ CAPACITY rather than the highest block number.
@@ -1233,7 +1262,31 @@ static struct usb_driver uas_driver = {
.id_table = uas_usb_ids,
};
-module_usb_driver(uas_driver);
+static int __init uas_init(void)
+{
+ int rv;
+
+ workqueue = alloc_workqueue("uas", WQ_MEM_RECLAIM, 0);
+ if (!workqueue)
+ return -ENOMEM;
+
+ rv = usb_register(&uas_driver);
+ if (rv) {
+ destroy_workqueue(workqueue);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void __exit uas_exit(void)
+{
+ usb_deregister(&uas_driver);
+ destroy_workqueue(workqueue);
+}
+
+module_init(uas_init);
+module_exit(uas_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR(
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 1880f3e13f57..861153d294b6 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2212,6 +2212,18 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200,
US_FL_NO_READ_DISC_INFO ),
/*
+ * Reported by Matthias Schwarzott <zzam@gentoo.org>
+ * The Amazon Kindle treats SYNCHRONIZE CACHE as an indication that
+ * the host may be finished with it, and automatically ejects its
+ * emulated media unless it receives another command within one second.
+ */
+UNUSUAL_DEV( 0x1949, 0x0004, 0x0000, 0x9999,
+ "Amazon",
+ "Kindle",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_SENSE_AFTER_SYNC ),
+
+/*
* Reported by Oliver Neukum <oneukum@suse.com>
* This device morphes spontaneously into another device if the access
* pattern of Windows isn't followed. Thus writable media would be dirty
@@ -2323,6 +2335,13 @@ UNUSUAL_DEV( 0x3340, 0xffff, 0x0000, 0x0000,
USB_SC_DEVICE,USB_PR_DEVICE,NULL,
US_FL_MAX_SECTORS_64 ),
+/* Reported by Cyril Roelandt <tipecaml@gmail.com> */
+UNUSUAL_DEV( 0x357d, 0x7788, 0x0114, 0x0114,
+ "JMicron",
+ "USB to ATA/ATAPI Bridge",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_BROKEN_FUA | US_FL_IGNORE_UAS ),
+
/* Reported by Andrey Rahmatullin <wrar@altlinux.org> */
UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100,
"iRiver",
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 1b23741036ee..cb7b15ecb7ab 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -28,6 +28,23 @@
* and don't forget to CC: the USB development list <linux-usb@vger.kernel.org>
*/
+/* Reported-by: Till Dörges <doerges@pre-sense.de> */
+UNUSUAL_DEV(0x054c, 0x087d, 0x0000, 0x9999,
+ "Sony",
+ "PSZ-HA*",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_REPORT_OPCODES),
+
+/*
+ * Initially Reported-by: Julian Groß <julian.g@posteo.de>
+ * Further reports David C. Partridge <david.partridge@perdrix.co.uk>
+ */
+UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999,
+ "LaCie",
+ "2Big Quadra USB3",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME),
+
/*
* Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
* commands in UAS mode. Observed with the 1.28 firmware; are there others?
@@ -73,6 +90,20 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BROKEN_FUA),
+/* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
+UNUSUAL_DEV(0x154b, 0xf00b, 0x0000, 0x9999,
+ "PNY",
+ "Pro Elite SSD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_ATA_1X),
+
+/* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
+UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999,
+ "PNY",
+ "Pro Elite SSD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_ATA_1X),
+
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
"VIA",
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 9a79cd9762f3..2349dfa3b176 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -541,6 +541,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
case 'j':
f |= US_FL_NO_REPORT_LUNS;
break;
+ case 'k':
+ f |= US_FL_NO_SAME;
+ break;
case 'l':
f |= US_FL_NOT_LOCKABLE;
break;
diff --git a/drivers/usb/typec/tcpci.c b/drivers/usb/typec/tcpci.c
index dfae41fe1331..2c34add37708 100644
--- a/drivers/usb/typec/tcpci.c
+++ b/drivers/usb/typec/tcpci.c
@@ -20,6 +20,15 @@
#define PD_RETRY_COUNT 3
+#define tcpc_presenting_cc1_rd(reg) \
+ (!(TCPC_ROLE_CTRL_DRP & (reg)) && \
+ (((reg) & (TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT)) == \
+ (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT)))
+#define tcpc_presenting_cc2_rd(reg) \
+ (!(TCPC_ROLE_CTRL_DRP & (reg)) && \
+ (((reg) & (TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT)) == \
+ (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT)))
+
struct tcpci {
struct device *dev;
@@ -168,19 +177,25 @@ static int tcpci_get_cc(struct tcpc_dev *tcpc,
enum typec_cc_status *cc1, enum typec_cc_status *cc2)
{
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
- unsigned int reg;
+ unsigned int reg, role_control;
int ret;
+ ret = regmap_read(tcpci->regmap, TCPC_ROLE_CTRL, &role_control);
+ if (ret < 0)
+ return ret;
+
ret = regmap_read(tcpci->regmap, TCPC_CC_STATUS, &reg);
if (ret < 0)
return ret;
*cc1 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC1_SHIFT) &
TCPC_CC_STATUS_CC1_MASK,
- reg & TCPC_CC_STATUS_TERM);
+ reg & TCPC_CC_STATUS_TERM ||
+ tcpc_presenting_cc1_rd(role_control));
*cc2 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC2_SHIFT) &
TCPC_CC_STATUS_CC2_MASK,
- reg & TCPC_CC_STATUS_TERM);
+ reg & TCPC_CC_STATUS_TERM ||
+ tcpc_presenting_cc2_rd(role_control));
return 0;
}
diff --git a/drivers/usb/typec/tcpci_rt1711h.c b/drivers/usb/typec/tcpci_rt1711h.c
index 017389021b96..b56a0880a044 100644
--- a/drivers/usb/typec/tcpci_rt1711h.c
+++ b/drivers/usb/typec/tcpci_rt1711h.c
@@ -179,26 +179,6 @@ out:
return tcpci_irq(chip->tcpci);
}
-static int rt1711h_init_alert(struct rt1711h_chip *chip,
- struct i2c_client *client)
-{
- int ret;
-
- /* Disable chip interrupts before requesting irq */
- ret = rt1711h_write16(chip, TCPC_ALERT_MASK, 0);
- if (ret < 0)
- return ret;
-
- ret = devm_request_threaded_irq(chip->dev, client->irq, NULL,
- rt1711h_irq,
- IRQF_ONESHOT | IRQF_TRIGGER_LOW,
- dev_name(chip->dev), chip);
- if (ret < 0)
- return ret;
- enable_irq_wake(client->irq);
- return 0;
-}
-
static int rt1711h_sw_reset(struct rt1711h_chip *chip)
{
int ret;
@@ -260,7 +240,8 @@ static int rt1711h_probe(struct i2c_client *client,
if (ret < 0)
return ret;
- ret = rt1711h_init_alert(chip, client);
+ /* Disable chip interrupts before requesting irq */
+ ret = rt1711h_write16(chip, TCPC_ALERT_MASK, 0);
if (ret < 0)
return ret;
@@ -271,6 +252,14 @@ static int rt1711h_probe(struct i2c_client *client,
if (IS_ERR_OR_NULL(chip->tcpci))
return PTR_ERR(chip->tcpci);
+ ret = devm_request_threaded_irq(chip->dev, client->irq, NULL,
+ rt1711h_irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+ dev_name(chip->dev), chip);
+ if (ret < 0)
+ return ret;
+ enable_irq_wake(client->irq);
+
return 0;
}
diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c
index 29d72e9b0f01..af41d4dce3ad 100644
--- a/drivers/usb/typec/tcpm.c
+++ b/drivers/usb/typec/tcpm.c
@@ -2727,12 +2727,12 @@ static void tcpm_reset_port(struct tcpm_port *port)
static void tcpm_detach(struct tcpm_port *port)
{
- if (!port->attached)
- return;
-
if (tcpm_port_is_disconnected(port))
port->hard_reset_count = 0;
+ if (!port->attached)
+ return;
+
tcpm_reset_port(port);
}
@@ -3486,7 +3486,7 @@ static void run_state_machine(struct tcpm_port *port)
*/
tcpm_set_pwr_role(port, TYPEC_SOURCE);
tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
- tcpm_set_state(port, SRC_STARTUP, 0);
+ tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
break;
case VCONN_SWAP_ACCEPT:
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index a18112a83fae..dda8bd39c918 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -64,11 +64,15 @@ static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data)
static int ucsi_acpi_probe(struct platform_device *pdev)
{
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
struct ucsi_acpi *ua;
struct resource *res;
acpi_status status;
int ret;
+ if (adev->dep_unmet)
+ return -EPROBE_DEFER;
+
ua = devm_kzalloc(&pdev->dev, sizeof(*ua), GFP_KERNEL);
if (!ua)
return -ENOMEM;
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index 7931e6cecc70..0081c1073b08 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -46,6 +46,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
int sockfd = 0;
struct socket *socket;
int rv;
+ struct task_struct *tcp_rx = NULL;
+ struct task_struct *tcp_tx = NULL;
if (!sdev) {
dev_err(dev, "sdev is null\n");
@@ -61,6 +63,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
dev_info(dev, "stub up\n");
+ mutex_lock(&sdev->ud.sysfs_lock);
spin_lock_irq(&sdev->ud.lock);
if (sdev->ud.status != SDEV_ST_AVAILABLE) {
@@ -69,23 +72,49 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
}
socket = sockfd_lookup(sockfd, &err);
- if (!socket)
+ if (!socket) {
+ dev_err(dev, "failed to lookup sock");
goto err;
+ }
- sdev->ud.tcp_socket = socket;
- sdev->ud.sockfd = sockfd;
+ if (socket->type != SOCK_STREAM) {
+ dev_err(dev, "Expecting SOCK_STREAM - found %d",
+ socket->type);
+ goto sock_err;
+ }
+ /* unlock and create threads and get tasks */
spin_unlock_irq(&sdev->ud.lock);
+ tcp_rx = kthread_create(stub_rx_loop, &sdev->ud, "stub_rx");
+ if (IS_ERR(tcp_rx)) {
+ sockfd_put(socket);
+ goto unlock_mutex;
+ }
+ tcp_tx = kthread_create(stub_tx_loop, &sdev->ud, "stub_tx");
+ if (IS_ERR(tcp_tx)) {
+ kthread_stop(tcp_rx);
+ sockfd_put(socket);
+ goto unlock_mutex;
+ }
- sdev->ud.tcp_rx = kthread_get_run(stub_rx_loop, &sdev->ud,
- "stub_rx");
- sdev->ud.tcp_tx = kthread_get_run(stub_tx_loop, &sdev->ud,
- "stub_tx");
+ /* get task structs now */
+ get_task_struct(tcp_rx);
+ get_task_struct(tcp_tx);
+ /* lock and update sdev->ud state */
spin_lock_irq(&sdev->ud.lock);
+ sdev->ud.tcp_socket = socket;
+ sdev->ud.sockfd = sockfd;
+ sdev->ud.tcp_rx = tcp_rx;
+ sdev->ud.tcp_tx = tcp_tx;
sdev->ud.status = SDEV_ST_USED;
spin_unlock_irq(&sdev->ud.lock);
+ wake_up_process(sdev->ud.tcp_rx);
+ wake_up_process(sdev->ud.tcp_tx);
+
+ mutex_unlock(&sdev->ud.sysfs_lock);
+
} else {
dev_info(dev, "stub down\n");
@@ -96,12 +125,17 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
spin_unlock_irq(&sdev->ud.lock);
usbip_event_add(&sdev->ud, SDEV_EVENT_DOWN);
+ mutex_unlock(&sdev->ud.sysfs_lock);
}
return count;
+sock_err:
+ sockfd_put(socket);
err:
spin_unlock_irq(&sdev->ud.lock);
+unlock_mutex:
+ mutex_unlock(&sdev->ud.sysfs_lock);
return -EINVAL;
}
static DEVICE_ATTR_WO(usbip_sockfd);
@@ -267,6 +301,7 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev)
sdev->ud.side = USBIP_STUB;
sdev->ud.status = SDEV_ST_AVAILABLE;
spin_lock_init(&sdev->ud.lock);
+ mutex_init(&sdev->ud.sysfs_lock);
sdev->ud.tcp_socket = NULL;
sdev->ud.sockfd = -1;
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index 8be857a4fa13..a7e6ce96f62c 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -263,6 +263,9 @@ struct usbip_device {
/* lock for status */
spinlock_t lock;
+ /* mutex for synchronizing sysfs store paths */
+ struct mutex sysfs_lock;
+
int sockfd;
struct socket *tcp_socket;
diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c
index 5d88917c9631..086ca76dd053 100644
--- a/drivers/usb/usbip/usbip_event.c
+++ b/drivers/usb/usbip/usbip_event.c
@@ -70,6 +70,7 @@ static void event_handler(struct work_struct *work)
while ((ud = get_event()) != NULL) {
usbip_dbg_eh("pending event %lx\n", ud->event);
+ mutex_lock(&ud->sysfs_lock);
/*
* NOTE: shutdown must come first.
* Shutdown the device.
@@ -90,6 +91,7 @@ static void event_handler(struct work_struct *work)
ud->eh_ops.unusable(ud);
unset_event(ud, USBIP_EH_UNUSABLE);
}
+ mutex_unlock(&ud->sysfs_lock);
wake_up(&ud->eh_waitq);
}
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index d5a036bf904b..b13ed5a7618d 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -396,6 +396,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
default:
usbip_dbg_vhci_rh(" ClearPortFeature: default %x\n",
wValue);
+ if (wValue >= 32)
+ goto error;
vhci_hcd->port_status[rhport] &= ~(1 << wValue);
break;
}
@@ -592,6 +594,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
pr_err("invalid port number %d\n", wIndex);
goto error;
}
+ if (wValue >= 32)
+ goto error;
if (hcd->speed == HCD_USB3) {
if ((vhci_hcd->port_status[rhport] &
USB_SS_PORT_STAT_POWER) != 0) {
@@ -1097,6 +1101,7 @@ static void vhci_device_init(struct vhci_device *vdev)
vdev->ud.side = USBIP_VHCI;
vdev->ud.status = VDEV_ST_NULL;
spin_lock_init(&vdev->ud.lock);
+ mutex_init(&vdev->ud.sysfs_lock);
INIT_LIST_HEAD(&vdev->priv_rx);
INIT_LIST_HEAD(&vdev->priv_tx);
diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
index be37aec250c2..ebc7be1d9820 100644
--- a/drivers/usb/usbip/vhci_sysfs.c
+++ b/drivers/usb/usbip/vhci_sysfs.c
@@ -185,6 +185,8 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport)
usbip_dbg_vhci_sysfs("enter\n");
+ mutex_lock(&vdev->ud.sysfs_lock);
+
/* lock */
spin_lock_irqsave(&vhci->lock, flags);
spin_lock(&vdev->ud.lock);
@@ -195,6 +197,7 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport)
/* unlock */
spin_unlock(&vdev->ud.lock);
spin_unlock_irqrestore(&vhci->lock, flags);
+ mutex_unlock(&vdev->ud.sysfs_lock);
return -EINVAL;
}
@@ -205,6 +208,8 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport)
usbip_event_add(&vdev->ud, VDEV_EVENT_DOWN);
+ mutex_unlock(&vdev->ud.sysfs_lock);
+
return 0;
}
@@ -312,6 +317,8 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
struct vhci *vhci;
int err;
unsigned long flags;
+ struct task_struct *tcp_rx = NULL;
+ struct task_struct *tcp_tx = NULL;
/*
* @rhport: port number of vhci_hcd
@@ -347,14 +354,43 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
else
vdev = &vhci->vhci_hcd_hs->vdev[rhport];
+ mutex_lock(&vdev->ud.sysfs_lock);
+
/* Extract socket from fd. */
socket = sockfd_lookup(sockfd, &err);
- if (!socket)
- return -EINVAL;
+ if (!socket) {
+ dev_err(dev, "failed to lookup sock");
+ err = -EINVAL;
+ goto unlock_mutex;
+ }
+ if (socket->type != SOCK_STREAM) {
+ dev_err(dev, "Expecting SOCK_STREAM - found %d",
+ socket->type);
+ sockfd_put(socket);
+ err = -EINVAL;
+ goto unlock_mutex;
+ }
- /* now need lock until setting vdev status as used */
+ /* create threads before locking */
+ tcp_rx = kthread_create(vhci_rx_loop, &vdev->ud, "vhci_rx");
+ if (IS_ERR(tcp_rx)) {
+ sockfd_put(socket);
+ err = -EINVAL;
+ goto unlock_mutex;
+ }
+ tcp_tx = kthread_create(vhci_tx_loop, &vdev->ud, "vhci_tx");
+ if (IS_ERR(tcp_tx)) {
+ kthread_stop(tcp_rx);
+ sockfd_put(socket);
+ err = -EINVAL;
+ goto unlock_mutex;
+ }
+
+ /* get task structs now */
+ get_task_struct(tcp_rx);
+ get_task_struct(tcp_tx);
- /* begin a lock */
+ /* now begin lock until setting vdev status set */
spin_lock_irqsave(&vhci->lock, flags);
spin_lock(&vdev->ud.lock);
@@ -364,13 +400,16 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
spin_unlock_irqrestore(&vhci->lock, flags);
sockfd_put(socket);
+ kthread_stop_put(tcp_rx);
+ kthread_stop_put(tcp_tx);
dev_err(dev, "port %d already used\n", rhport);
/*
* Will be retried from userspace
* if there's another free port.
*/
- return -EBUSY;
+ err = -EBUSY;
+ goto unlock_mutex;
}
dev_info(dev, "pdev(%u) rhport(%u) sockfd(%d)\n",
@@ -382,18 +421,28 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
vdev->speed = speed;
vdev->ud.sockfd = sockfd;
vdev->ud.tcp_socket = socket;
+ vdev->ud.tcp_rx = tcp_rx;
+ vdev->ud.tcp_tx = tcp_tx;
vdev->ud.status = VDEV_ST_NOTASSIGNED;
spin_unlock(&vdev->ud.lock);
spin_unlock_irqrestore(&vhci->lock, flags);
/* end the lock */
- vdev->ud.tcp_rx = kthread_get_run(vhci_rx_loop, &vdev->ud, "vhci_rx");
- vdev->ud.tcp_tx = kthread_get_run(vhci_tx_loop, &vdev->ud, "vhci_tx");
+ wake_up_process(vdev->ud.tcp_rx);
+ wake_up_process(vdev->ud.tcp_tx);
rh_port_connect(vdev, speed);
+ dev_info(dev, "Device attached\n");
+
+ mutex_unlock(&vdev->ud.sysfs_lock);
+
return count;
+
+unlock_mutex:
+ mutex_unlock(&vdev->ud.sysfs_lock);
+ return err;
}
static DEVICE_ATTR_WO(attach);
diff --git a/drivers/usb/usbip/vudc_dev.c b/drivers/usb/usbip/vudc_dev.c
index 1634d8698e15..74568325d8c0 100644
--- a/drivers/usb/usbip/vudc_dev.c
+++ b/drivers/usb/usbip/vudc_dev.c
@@ -571,6 +571,7 @@ static int init_vudc_hw(struct vudc *udc)
init_waitqueue_head(&udc->tx_waitq);
spin_lock_init(&ud->lock);
+ mutex_init(&ud->sysfs_lock);
ud->status = SDEV_ST_AVAILABLE;
ud->side = USBIP_VUDC;
diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c
index 6dcd3ff655c3..7b1437a51304 100644
--- a/drivers/usb/usbip/vudc_sysfs.c
+++ b/drivers/usb/usbip/vudc_sysfs.c
@@ -12,6 +12,7 @@
#include <linux/usb/ch9.h>
#include <linux/sysfs.h>
#include <linux/kthread.h>
+#include <linux/file.h>
#include <linux/byteorder/generic.h>
#include "usbip_common.h"
@@ -90,8 +91,9 @@ unlock:
}
static BIN_ATTR_RO(dev_desc, sizeof(struct usb_device_descriptor));
-static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *attr,
- const char *in, size_t count)
+static ssize_t usbip_sockfd_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *in, size_t count)
{
struct vudc *udc = (struct vudc *) dev_get_drvdata(dev);
int rv;
@@ -100,6 +102,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
struct socket *socket;
unsigned long flags;
int ret;
+ struct task_struct *tcp_rx = NULL;
+ struct task_struct *tcp_tx = NULL;
rv = kstrtoint(in, 0, &sockfd);
if (rv != 0)
@@ -109,6 +113,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
dev_err(dev, "no device");
return -ENODEV;
}
+ mutex_lock(&udc->ud.sysfs_lock);
spin_lock_irqsave(&udc->lock, flags);
/* Don't export what we don't have */
if (!udc->driver || !udc->pullup) {
@@ -138,24 +143,58 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
goto unlock_ud;
}
- udc->ud.tcp_socket = socket;
+ if (socket->type != SOCK_STREAM) {
+ dev_err(dev, "Expecting SOCK_STREAM - found %d",
+ socket->type);
+ ret = -EINVAL;
+ goto sock_err;
+ }
+ /* unlock and create threads and get tasks */
spin_unlock_irq(&udc->ud.lock);
spin_unlock_irqrestore(&udc->lock, flags);
- udc->ud.tcp_rx = kthread_get_run(&v_rx_loop,
- &udc->ud, "vudc_rx");
- udc->ud.tcp_tx = kthread_get_run(&v_tx_loop,
- &udc->ud, "vudc_tx");
+ tcp_rx = kthread_create(&v_rx_loop, &udc->ud, "vudc_rx");
+ if (IS_ERR(tcp_rx)) {
+ sockfd_put(socket);
+ mutex_unlock(&udc->ud.sysfs_lock);
+ return -EINVAL;
+ }
+ tcp_tx = kthread_create(&v_tx_loop, &udc->ud, "vudc_tx");
+ if (IS_ERR(tcp_tx)) {
+ kthread_stop(tcp_rx);
+ sockfd_put(socket);
+ mutex_unlock(&udc->ud.sysfs_lock);
+ return -EINVAL;
+ }
+
+ /* get task structs now */
+ get_task_struct(tcp_rx);
+ get_task_struct(tcp_tx);
+ /* lock and update udc->ud state */
spin_lock_irqsave(&udc->lock, flags);
spin_lock_irq(&udc->ud.lock);
+
+ udc->ud.tcp_socket = socket;
+ udc->ud.tcp_rx = tcp_rx;
+ udc->ud.tcp_tx = tcp_tx;
udc->ud.status = SDEV_ST_USED;
+
spin_unlock_irq(&udc->ud.lock);
ktime_get_ts64(&udc->start_time);
v_start_timer(udc);
udc->connected = 1;
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ wake_up_process(udc->ud.tcp_rx);
+ wake_up_process(udc->ud.tcp_tx);
+
+ mutex_unlock(&udc->ud.sysfs_lock);
+ return count;
+
} else {
if (!udc->connected) {
dev_err(dev, "Device not connected");
@@ -174,13 +213,17 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
}
spin_unlock_irqrestore(&udc->lock, flags);
+ mutex_unlock(&udc->ud.sysfs_lock);
return count;
+sock_err:
+ sockfd_put(socket);
unlock_ud:
spin_unlock_irq(&udc->ud.lock);
unlock:
spin_unlock_irqrestore(&udc->lock, flags);
+ mutex_unlock(&udc->ud.sysfs_lock);
return ret;
}
diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
index e7770b511d03..c99fcc6c2eba 100644
--- a/drivers/vfio/mdev/mdev_sysfs.c
+++ b/drivers/vfio/mdev/mdev_sysfs.c
@@ -108,12 +108,13 @@ struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent,
return ERR_PTR(-ENOMEM);
type->kobj.kset = parent->mdev_types_kset;
+ type->parent = parent;
ret = kobject_init_and_add(&type->kobj, &mdev_type_ktype, NULL,
"%s-%s", dev_driver_string(parent->dev),
group->name);
if (ret) {
- kfree(type);
+ kobject_put(&type->kobj);
return ERR_PTR(ret);
}
@@ -135,7 +136,6 @@ struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent,
}
type->group = group;
- type->parent = parent;
return type;
attrs_failed:
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 66783a37f450..c48e1d84efb6 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -29,6 +29,7 @@
#include <linux/vfio.h>
#include <linux/vgaarb.h>
#include <linux/nospec.h>
+#include <linux/sched/mm.h>
#include "vfio_pci_private.h"
@@ -117,8 +118,6 @@ static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
int bar;
struct vfio_pci_dummy_resource *dummy_res;
- INIT_LIST_HEAD(&vdev->dummy_resources_list);
-
for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
res = vdev->pdev->resource + bar;
@@ -181,6 +180,7 @@ no_mmap:
static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
static void vfio_pci_disable(struct vfio_pci_device *vdev);
+static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data);
/*
* INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
@@ -407,6 +407,19 @@ static void vfio_pci_release(void *device_data)
if (!(--vdev->refcnt)) {
vfio_spapr_pci_eeh_release(vdev->pdev);
vfio_pci_disable(vdev);
+ mutex_lock(&vdev->igate);
+ if (vdev->err_trigger) {
+ eventfd_ctx_put(vdev->err_trigger);
+ vdev->err_trigger = NULL;
+ }
+ mutex_unlock(&vdev->igate);
+
+ mutex_lock(&vdev->igate);
+ if (vdev->req_trigger) {
+ eventfd_ctx_put(vdev->req_trigger);
+ vdev->req_trigger = NULL;
+ }
+ mutex_unlock(&vdev->igate);
}
mutex_unlock(&driver_lock);
@@ -623,6 +636,12 @@ int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
return 0;
}
+struct vfio_devices {
+ struct vfio_device **devices;
+ int cur_index;
+ int max_index;
+};
+
static long vfio_pci_ioctl(void *device_data,
unsigned int cmd, unsigned long arg)
{
@@ -696,7 +715,7 @@ static long vfio_pci_ioctl(void *device_data,
{
void __iomem *io;
size_t size;
- u16 orig_cmd;
+ u16 cmd;
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.flags = 0;
@@ -716,10 +735,7 @@ static long vfio_pci_ioctl(void *device_data,
* Is it really there? Enable memory decode for
* implicit access in pci_map_rom().
*/
- pci_read_config_word(pdev, PCI_COMMAND, &orig_cmd);
- pci_write_config_word(pdev, PCI_COMMAND,
- orig_cmd | PCI_COMMAND_MEMORY);
-
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
io = pci_map_rom(pdev, &size);
if (io) {
info.flags = VFIO_REGION_INFO_FLAG_READ;
@@ -727,8 +743,8 @@ static long vfio_pci_ioctl(void *device_data,
} else {
info.size = 0;
}
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
- pci_write_config_word(pdev, PCI_COMMAND, orig_cmd);
break;
}
case VFIO_PCI_VGA_REGION_INDEX:
@@ -865,8 +881,16 @@ static long vfio_pci_ioctl(void *device_data,
return ret;
} else if (cmd == VFIO_DEVICE_RESET) {
- return vdev->reset_works ?
- pci_try_reset_function(vdev->pdev) : -EINVAL;
+ int ret;
+
+ if (!vdev->reset_works)
+ return -EINVAL;
+
+ vfio_pci_zap_and_down_write_memory_lock(vdev);
+ ret = pci_try_reset_function(vdev->pdev);
+ up_write(&vdev->memory_lock);
+
+ return ret;
} else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
struct vfio_pci_hot_reset_info hdr;
@@ -946,8 +970,9 @@ reset_info_exit:
int32_t *group_fds;
struct vfio_pci_group_entry *groups;
struct vfio_pci_group_info info;
+ struct vfio_devices devs = { .cur_index = 0 };
bool slot = false;
- int i, count = 0, ret = 0;
+ int i, group_idx, mem_idx = 0, count = 0, ret = 0;
minsz = offsetofend(struct vfio_pci_hot_reset, count);
@@ -999,9 +1024,9 @@ reset_info_exit:
* user interface and store the group and iommu ID. This
* ensures the group is held across the reset.
*/
- for (i = 0; i < hdr.count; i++) {
+ for (group_idx = 0; group_idx < hdr.count; group_idx++) {
struct vfio_group *group;
- struct fd f = fdget(group_fds[i]);
+ struct fd f = fdget(group_fds[group_idx]);
if (!f.file) {
ret = -EBADF;
break;
@@ -1014,8 +1039,9 @@ reset_info_exit:
break;
}
- groups[i].group = group;
- groups[i].id = vfio_external_user_iommu_id(group);
+ groups[group_idx].group = group;
+ groups[group_idx].id =
+ vfio_external_user_iommu_id(group);
}
kfree(group_fds);
@@ -1034,13 +1060,63 @@ reset_info_exit:
ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
vfio_pci_validate_devs,
&info, slot);
- if (!ret)
- /* User has access, do the reset */
- ret = pci_reset_bus(vdev->pdev);
+ if (ret)
+ goto hot_reset_release;
+
+ devs.max_index = count;
+ devs.devices = kcalloc(count, sizeof(struct vfio_device *),
+ GFP_KERNEL);
+ if (!devs.devices) {
+ ret = -ENOMEM;
+ goto hot_reset_release;
+ }
+
+ /*
+ * We need to get memory_lock for each device, but devices
+ * can share mmap_sem, therefore we need to zap and hold
+ * the vma_lock for each device, and only then get each
+ * memory_lock.
+ */
+ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
+ vfio_pci_try_zap_and_vma_lock_cb,
+ &devs, slot);
+ if (ret)
+ goto hot_reset_release;
+
+ for (; mem_idx < devs.cur_index; mem_idx++) {
+ struct vfio_pci_device *tmp;
+
+ tmp = vfio_device_data(devs.devices[mem_idx]);
+
+ ret = down_write_trylock(&tmp->memory_lock);
+ if (!ret) {
+ ret = -EBUSY;
+ goto hot_reset_release;
+ }
+ mutex_unlock(&tmp->vma_lock);
+ }
+
+ /* User has access, do the reset */
+ ret = pci_reset_bus(vdev->pdev);
hot_reset_release:
- for (i--; i >= 0; i--)
- vfio_group_put_external_user(groups[i].group);
+ for (i = 0; i < devs.cur_index; i++) {
+ struct vfio_device *device;
+ struct vfio_pci_device *tmp;
+
+ device = devs.devices[i];
+ tmp = vfio_device_data(device);
+
+ if (i < mem_idx)
+ up_write(&tmp->memory_lock);
+ else
+ mutex_unlock(&tmp->vma_lock);
+ vfio_device_put(device);
+ }
+ kfree(devs.devices);
+
+ for (group_idx--; group_idx >= 0; group_idx--)
+ vfio_group_put_external_user(groups[group_idx].group);
kfree(groups);
return ret;
@@ -1121,6 +1197,202 @@ static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
}
+/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */
+static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try)
+{
+ struct vfio_pci_mmap_vma *mmap_vma, *tmp;
+
+ /*
+ * Lock ordering:
+ * vma_lock is nested under mmap_sem for vm_ops callback paths.
+ * The memory_lock semaphore is used by both code paths calling
+ * into this function to zap vmas and the vm_ops.fault callback
+ * to protect the memory enable state of the device.
+ *
+ * When zapping vmas we need to maintain the mmap_sem => vma_lock
+ * ordering, which requires using vma_lock to walk vma_list to
+ * acquire an mm, then dropping vma_lock to get the mmap_sem and
+ * reacquiring vma_lock. This logic is derived from similar
+ * requirements in uverbs_user_mmap_disassociate().
+ *
+ * mmap_sem must always be the top-level lock when it is taken.
+ * Therefore we can only hold the memory_lock write lock when
+ * vma_list is empty, as we'd need to take mmap_sem to clear
+ * entries. vma_list can only be guaranteed empty when holding
+ * vma_lock, thus memory_lock is nested under vma_lock.
+ *
+ * This enables the vm_ops.fault callback to acquire vma_lock,
+ * followed by memory_lock read lock, while already holding
+ * mmap_sem without risk of deadlock.
+ */
+ while (1) {
+ struct mm_struct *mm = NULL;
+
+ if (try) {
+ if (!mutex_trylock(&vdev->vma_lock))
+ return 0;
+ } else {
+ mutex_lock(&vdev->vma_lock);
+ }
+ while (!list_empty(&vdev->vma_list)) {
+ mmap_vma = list_first_entry(&vdev->vma_list,
+ struct vfio_pci_mmap_vma,
+ vma_next);
+ mm = mmap_vma->vma->vm_mm;
+ if (mmget_not_zero(mm))
+ break;
+
+ list_del(&mmap_vma->vma_next);
+ kfree(mmap_vma);
+ mm = NULL;
+ }
+ if (!mm)
+ return 1;
+ mutex_unlock(&vdev->vma_lock);
+
+ if (try) {
+ if (!down_read_trylock(&mm->mmap_sem)) {
+ mmput(mm);
+ return 0;
+ }
+ } else {
+ down_read(&mm->mmap_sem);
+ }
+ if (mmget_still_valid(mm)) {
+ if (try) {
+ if (!mutex_trylock(&vdev->vma_lock)) {
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+ return 0;
+ }
+ } else {
+ mutex_lock(&vdev->vma_lock);
+ }
+ list_for_each_entry_safe(mmap_vma, tmp,
+ &vdev->vma_list, vma_next) {
+ struct vm_area_struct *vma = mmap_vma->vma;
+
+ if (vma->vm_mm != mm)
+ continue;
+
+ list_del(&mmap_vma->vma_next);
+ kfree(mmap_vma);
+
+ zap_vma_ptes(vma, vma->vm_start,
+ vma->vm_end - vma->vm_start);
+ }
+ mutex_unlock(&vdev->vma_lock);
+ }
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+ }
+}
+
+void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device *vdev)
+{
+ vfio_pci_zap_and_vma_lock(vdev, false);
+ down_write(&vdev->memory_lock);
+ mutex_unlock(&vdev->vma_lock);
+}
+
+u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev)
+{
+ u16 cmd;
+
+ down_write(&vdev->memory_lock);
+ pci_read_config_word(vdev->pdev, PCI_COMMAND, &cmd);
+ if (!(cmd & PCI_COMMAND_MEMORY))
+ pci_write_config_word(vdev->pdev, PCI_COMMAND,
+ cmd | PCI_COMMAND_MEMORY);
+
+ return cmd;
+}
+
+void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, u16 cmd)
+{
+ pci_write_config_word(vdev->pdev, PCI_COMMAND, cmd);
+ up_write(&vdev->memory_lock);
+}
+
+/* Caller holds vma_lock */
+static int __vfio_pci_add_vma(struct vfio_pci_device *vdev,
+ struct vm_area_struct *vma)
+{
+ struct vfio_pci_mmap_vma *mmap_vma;
+
+ mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL);
+ if (!mmap_vma)
+ return -ENOMEM;
+
+ mmap_vma->vma = vma;
+ list_add(&mmap_vma->vma_next, &vdev->vma_list);
+
+ return 0;
+}
+
+/*
+ * Zap mmaps on open so that we can fault them in on access and therefore
+ * our vma_list only tracks mappings accessed since last zap.
+ */
+static void vfio_pci_mmap_open(struct vm_area_struct *vma)
+{
+ zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+}
+
+static void vfio_pci_mmap_close(struct vm_area_struct *vma)
+{
+ struct vfio_pci_device *vdev = vma->vm_private_data;
+ struct vfio_pci_mmap_vma *mmap_vma;
+
+ mutex_lock(&vdev->vma_lock);
+ list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
+ if (mmap_vma->vma == vma) {
+ list_del(&mmap_vma->vma_next);
+ kfree(mmap_vma);
+ break;
+ }
+ }
+ mutex_unlock(&vdev->vma_lock);
+}
+
+static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct vfio_pci_device *vdev = vma->vm_private_data;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
+
+ mutex_lock(&vdev->vma_lock);
+ down_read(&vdev->memory_lock);
+
+ if (!__vfio_pci_memory_enabled(vdev)) {
+ ret = VM_FAULT_SIGBUS;
+ mutex_unlock(&vdev->vma_lock);
+ goto up_out;
+ }
+
+ if (__vfio_pci_add_vma(vdev, vma)) {
+ ret = VM_FAULT_OOM;
+ mutex_unlock(&vdev->vma_lock);
+ goto up_out;
+ }
+
+ mutex_unlock(&vdev->vma_lock);
+
+ if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot))
+ ret = VM_FAULT_SIGBUS;
+
+up_out:
+ up_read(&vdev->memory_lock);
+ return ret;
+}
+
+static const struct vm_operations_struct vfio_pci_mmap_ops = {
+ .open = vfio_pci_mmap_open,
+ .close = vfio_pci_mmap_close,
+ .fault = vfio_pci_mmap_fault,
+};
+
static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
{
struct vfio_pci_device *vdev = device_data;
@@ -1170,8 +1442,14 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
- return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- req_len, vma->vm_page_prot);
+ /*
+ * See remap_pfn_range(), called from vfio_pci_fault() but we can't
+ * change vm_flags within the fault handler. Set them now.
+ */
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_ops = &vfio_pci_mmap_ops;
+
+ return 0;
}
static void vfio_pci_request(void *device_data, unsigned int count)
@@ -1242,7 +1520,11 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&vdev->igate);
spin_lock_init(&vdev->irqlock);
mutex_init(&vdev->ioeventfds_lock);
+ INIT_LIST_HEAD(&vdev->dummy_resources_list);
INIT_LIST_HEAD(&vdev->ioeventfds_list);
+ mutex_init(&vdev->vma_lock);
+ INIT_LIST_HEAD(&vdev->vma_list);
+ init_rwsem(&vdev->memory_lock);
ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
if (ret) {
@@ -1338,12 +1620,6 @@ static struct pci_driver vfio_pci_driver = {
.err_handler = &vfio_err_handlers,
};
-struct vfio_devices {
- struct vfio_device **devices;
- int cur_index;
- int max_index;
-};
-
static int vfio_pci_get_devs(struct pci_dev *pdev, void *data)
{
struct vfio_devices *devs = data;
@@ -1365,6 +1641,39 @@ static int vfio_pci_get_devs(struct pci_dev *pdev, void *data)
return 0;
}
+static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data)
+{
+ struct vfio_devices *devs = data;
+ struct vfio_device *device;
+ struct vfio_pci_device *vdev;
+
+ if (devs->cur_index == devs->max_index)
+ return -ENOSPC;
+
+ device = vfio_device_get_from_dev(&pdev->dev);
+ if (!device)
+ return -EINVAL;
+
+ if (pci_dev_driver(pdev) != &vfio_pci_driver) {
+ vfio_device_put(device);
+ return -EBUSY;
+ }
+
+ vdev = vfio_device_data(device);
+
+ /*
+ * Locking multiple devices is prone to deadlock, runaway and
+ * unwind if we hit contention.
+ */
+ if (!vfio_pci_zap_and_vma_lock(vdev, true)) {
+ vfio_device_put(device);
+ return -EBUSY;
+ }
+
+ devs->devices[devs->cur_index++] = device;
+ return 0;
+}
+
/*
* Attempt to do a bus/slot reset if there are devices affected by a reset for
* this device that are needs_reset and all of the affected devices are unused
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 423ea1f98441..a1a26465d224 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -398,6 +398,20 @@ static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write)
*(__le32 *)(&p->write[off]) = cpu_to_le32(write);
}
+/* Caller should hold memory_lock semaphore */
+bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
+
+ /*
+ * SR-IOV VF memory enable is handled by the MSE bit in the
+ * PF SR-IOV capability, there's therefore no need to trigger
+ * faults based on the virtual value.
+ */
+ return pdev->is_virtfn || (cmd & PCI_COMMAND_MEMORY);
+}
+
/*
* Restore the *real* BARs after we detect a FLR or backdoor reset.
* (backdoor = some device specific technique that we didn't catch)
@@ -558,13 +572,18 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
new_cmd = le32_to_cpu(val);
+ phys_io = !!(phys_cmd & PCI_COMMAND_IO);
+ virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO);
+ new_io = !!(new_cmd & PCI_COMMAND_IO);
+
phys_mem = !!(phys_cmd & PCI_COMMAND_MEMORY);
virt_mem = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_MEMORY);
new_mem = !!(new_cmd & PCI_COMMAND_MEMORY);
- phys_io = !!(phys_cmd & PCI_COMMAND_IO);
- virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO);
- new_io = !!(new_cmd & PCI_COMMAND_IO);
+ if (!new_mem)
+ vfio_pci_zap_and_down_write_memory_lock(vdev);
+ else
+ down_write(&vdev->memory_lock);
/*
* If the user is writing mem/io enable (new_mem/io) and we
@@ -581,8 +600,11 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
}
count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
- if (count < 0)
+ if (count < 0) {
+ if (offset == PCI_COMMAND)
+ up_write(&vdev->memory_lock);
return count;
+ }
/*
* Save current memory/io enable bits in vconfig to allow for
@@ -593,6 +615,8 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
*virt_cmd &= cpu_to_le16(~mask);
*virt_cmd |= cpu_to_le16(new_cmd & mask);
+
+ up_write(&vdev->memory_lock);
}
/* Emulate INTx disable */
@@ -830,8 +854,11 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
pos - offset + PCI_EXP_DEVCAP,
&cap);
- if (!ret && (cap & PCI_EXP_DEVCAP_FLR))
+ if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) {
+ vfio_pci_zap_and_down_write_memory_lock(vdev);
pci_try_reset_function(vdev->pdev);
+ up_write(&vdev->memory_lock);
+ }
}
/*
@@ -909,8 +936,11 @@ static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos,
pos - offset + PCI_AF_CAP,
&cap);
- if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP))
+ if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) {
+ vfio_pci_zap_and_down_write_memory_lock(vdev);
pci_try_reset_function(vdev->pdev);
+ up_write(&vdev->memory_lock);
+ }
}
return count;
@@ -1464,7 +1494,12 @@ static int vfio_cap_init(struct vfio_pci_device *vdev)
if (ret)
return ret;
- if (cap <= PCI_CAP_ID_MAX) {
+ /*
+ * ID 0 is a NULL capability, conflicting with our fake
+ * PCI_CAP_ID_BASIC. As it has no content, consider it
+ * hidden for now.
+ */
+ if (cap && cap <= PCI_CAP_ID_MAX) {
len = pci_cap_length[cap];
if (len == 0xFF) { /* Variable length */
len = vfio_cap_len(vdev, cap, pos);
@@ -1703,6 +1738,15 @@ int vfio_config_init(struct vfio_pci_device *vdev)
vconfig[PCI_INTERRUPT_PIN]);
vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */
+
+ /*
+ * VFs do no implement the memory enable bit of the COMMAND
+ * register therefore we'll not have it set in our initial
+ * copy of config space after pci_enable_device(). For
+ * consistency with PFs, set the virtual enable bit here.
+ */
+ *(__le16 *)&vconfig[PCI_COMMAND] |=
+ cpu_to_le16(PCI_COMMAND_MEMORY);
}
if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx)
@@ -1732,8 +1776,11 @@ void vfio_config_free(struct vfio_pci_device *vdev)
vdev->vconfig = NULL;
kfree(vdev->pci_config_map);
vdev->pci_config_map = NULL;
- kfree(vdev->msi_perm);
- vdev->msi_perm = NULL;
+ if (vdev->msi_perm) {
+ free_perm_bits(vdev->msi_perm);
+ kfree(vdev->msi_perm);
+ vdev->msi_perm = NULL;
+ }
}
/*
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 94594dc63c41..c989f777bf77 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -252,6 +252,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
struct pci_dev *pdev = vdev->pdev;
unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
int ret;
+ u16 cmd;
if (!is_irq_none(vdev))
return -EINVAL;
@@ -261,13 +262,16 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
return -ENOMEM;
/* return the number of supported vectors if we can't get all: */
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag);
if (ret < nvec) {
if (ret > 0)
pci_free_irq_vectors(pdev);
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
kfree(vdev->ctx);
return ret;
}
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
vdev->num_ctx = nvec;
vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
@@ -290,6 +294,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
struct pci_dev *pdev = vdev->pdev;
struct eventfd_ctx *trigger;
int irq, ret;
+ u16 cmd;
if (vector < 0 || vector >= vdev->num_ctx)
return -EINVAL;
@@ -298,7 +303,11 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
if (vdev->ctx[vector].trigger) {
irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
+
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
free_irq(irq, vdev->ctx[vector].trigger);
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
+
kfree(vdev->ctx[vector].name);
eventfd_ctx_put(vdev->ctx[vector].trigger);
vdev->ctx[vector].trigger = NULL;
@@ -326,6 +335,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
* such a reset it would be unsuccessful. To avoid this, restore the
* cached value of the message prior to enabling.
*/
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
if (msix) {
struct msi_msg msg;
@@ -335,6 +345,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
ret = request_irq(irq, vfio_msihandler, 0,
vdev->ctx[vector].name, trigger);
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
if (ret) {
kfree(vdev->ctx[vector].name);
eventfd_ctx_put(trigger);
@@ -344,11 +355,13 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
vdev->ctx[vector].producer.token = trigger;
vdev->ctx[vector].producer.irq = irq;
ret = irq_bypass_register_producer(&vdev->ctx[vector].producer);
- if (unlikely(ret))
+ if (unlikely(ret)) {
dev_info(&pdev->dev,
"irq bypass producer (token %p) registration fails: %d\n",
vdev->ctx[vector].producer.token, ret);
+ vdev->ctx[vector].producer.token = NULL;
+ }
vdev->ctx[vector].trigger = trigger;
return 0;
@@ -379,6 +392,7 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
{
struct pci_dev *pdev = vdev->pdev;
int i;
+ u16 cmd;
for (i = 0; i < vdev->num_ctx; i++) {
vfio_virqfd_disable(&vdev->ctx[i].unmask);
@@ -387,7 +401,9 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
pci_free_irq_vectors(pdev);
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
/*
* Both disable paths above use pci_intx_for_msi() to clear DisINTx
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index cde3b5d3441a..17d2bae5b013 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -76,6 +76,11 @@ struct vfio_pci_dummy_resource {
struct list_head res_next;
};
+struct vfio_pci_mmap_vma {
+ struct vm_area_struct *vma;
+ struct list_head vma_next;
+};
+
struct vfio_pci_device {
struct pci_dev *pdev;
void __iomem *barmap[PCI_STD_RESOURCE_END + 1];
@@ -111,6 +116,9 @@ struct vfio_pci_device {
struct list_head dummy_resources_list;
struct mutex ioeventfds_lock;
struct list_head ioeventfds_list;
+ struct mutex vma_lock;
+ struct list_head vma_list;
+ struct rw_semaphore memory_lock;
};
#define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
@@ -149,6 +157,14 @@ extern int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
unsigned int type, unsigned int subtype,
const struct vfio_pci_regops *ops,
size_t size, u32 flags, void *data);
+
+extern bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev);
+extern void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device
+ *vdev);
+extern u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev);
+extern void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev,
+ u16 cmd);
+
#ifdef CONFIG_VFIO_PCI_IGD
extern int vfio_pci_igd_init(struct vfio_pci_device *vdev);
#else
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index a6029d0a5524..3d0ec2bbe131 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -165,6 +165,7 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
size_t x_start = 0, x_end = 0;
resource_size_t end;
void __iomem *io;
+ struct resource *res = &vdev->pdev->resource[bar];
ssize_t done;
if (pci_resource_start(pdev, bar))
@@ -180,6 +181,14 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
count = min(count, (size_t)(end - pos));
+ if (res->flags & IORESOURCE_MEM) {
+ down_read(&vdev->memory_lock);
+ if (!__vfio_pci_memory_enabled(vdev)) {
+ up_read(&vdev->memory_lock);
+ return -EIO;
+ }
+ }
+
if (bar == PCI_ROM_RESOURCE) {
/*
* The ROM can fill less space than the BAR, so we start the
@@ -187,13 +196,17 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
* filling large ROM BARs much faster.
*/
io = pci_map_rom(pdev, &x_start);
- if (!io)
- return -ENOMEM;
+ if (!io) {
+ done = -ENOMEM;
+ goto out;
+ }
x_end = end;
} else {
int ret = vfio_pci_setup_barmap(vdev, bar);
- if (ret)
- return ret;
+ if (ret) {
+ done = ret;
+ goto out;
+ }
io = vdev->barmap[bar];
}
@@ -210,6 +223,9 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
if (bar == PCI_ROM_RESOURCE)
pci_unmap_rom(pdev, io);
+out:
+ if (res->flags & IORESOURCE_MEM)
+ up_read(&vdev->memory_lock);
return done;
}
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index c0cd824be2b7..460760d0becf 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -273,7 +273,7 @@ static int vfio_platform_open(void *device_data)
ret = pm_runtime_get_sync(vdev->device);
if (ret < 0)
- goto err_pm;
+ goto err_rst;
ret = vfio_platform_call_reset(vdev, &extra_dbg);
if (ret && vdev->reset_required) {
@@ -290,7 +290,6 @@ static int vfio_platform_open(void *device_data)
err_rst:
pm_runtime_put(vdev->device);
-err_pm:
vfio_platform_irq_cleanup(vdev);
err_irq:
vfio_platform_regions_cleanup(vdev);
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index c36275754086..95ce167a8ad9 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -343,6 +343,32 @@ static int put_pfn(unsigned long pfn, int prot)
return 0;
}
+static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
+ unsigned long vaddr, unsigned long *pfn,
+ bool write_fault)
+{
+ int ret;
+
+ ret = follow_pfn(vma, vaddr, pfn);
+ if (ret) {
+ bool unlocked = false;
+
+ ret = fixup_user_fault(NULL, mm, vaddr,
+ FAULT_FLAG_REMOTE |
+ (write_fault ? FAULT_FLAG_WRITE : 0),
+ &unlocked);
+ if (unlocked)
+ return -EAGAIN;
+
+ if (ret)
+ return ret;
+
+ ret = follow_pfn(vma, vaddr, pfn);
+ }
+
+ return ret;
+}
+
static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
int prot, unsigned long *pfn)
{
@@ -382,12 +408,16 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
down_read(&mm->mmap_sem);
+retry:
vma = find_vma_intersection(mm, vaddr, vaddr + 1);
if (vma && vma->vm_flags & VM_PFNMAP) {
- *pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
- if (is_invalid_reserved_pfn(*pfn))
- ret = 0;
+ ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE);
+ if (ret == -EAGAIN)
+ goto retry;
+
+ if (!ret && !is_invalid_reserved_pfn(*pfn))
+ ret = -EFAULT;
}
up_read(&mm->mmap_sem);
@@ -598,7 +628,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
continue;
}
- remote_vaddr = dma->vaddr + iova - dma->iova;
+ remote_vaddr = dma->vaddr + (iova - dma->iova);
ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i],
do_accounting);
if (ret)
@@ -606,7 +636,8 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]);
if (ret) {
- vfio_unpin_page_external(dma, iova, do_accounting);
+ if (put_pfn(phys_pfn[i], dma->prot) && do_accounting)
+ vfio_lock_acct(dma, -1, true);
goto pin_unwind;
}
}
@@ -1193,13 +1224,16 @@ static int vfio_bus_type(struct device *dev, void *data)
static int vfio_iommu_replay(struct vfio_iommu *iommu,
struct vfio_domain *domain)
{
- struct vfio_domain *d;
+ struct vfio_domain *d = NULL;
struct rb_node *n;
unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
int ret;
/* Arbitrarily pick the first domain in the list for lookups */
- d = list_first_entry(&iommu->domain_list, struct vfio_domain, next);
+ if (!list_empty(&iommu->domain_list))
+ d = list_first_entry(&iommu->domain_list,
+ struct vfio_domain, next);
+
n = rb_first(&iommu->dma_list);
for (; n; n = rb_next(n)) {
@@ -1217,6 +1251,11 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
phys_addr_t p;
dma_addr_t i;
+ if (WARN_ON(!d)) { /* mapped w/o a domain?! */
+ ret = -EINVAL;
+ goto unwind;
+ }
+
phys = iommu_iova_to_phys(d->domain, iova);
if (WARN_ON(!phys)) {
@@ -1246,7 +1285,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
if (npage <= 0) {
WARN_ON(!npage);
ret = (int)npage;
- return ret;
+ goto unwind;
}
phys = pfn << PAGE_SHIFT;
@@ -1255,14 +1294,67 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
ret = iommu_map(domain->domain, iova, phys,
size, dma->prot | domain->prot);
- if (ret)
- return ret;
+ if (ret) {
+ if (!dma->iommu_mapped)
+ vfio_unpin_pages_remote(dma, iova,
+ phys >> PAGE_SHIFT,
+ size >> PAGE_SHIFT,
+ true);
+ goto unwind;
+ }
iova += size;
}
+ }
+
+ /* All dmas are now mapped, defer to second tree walk for unwind */
+ for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
+ struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
+
dma->iommu_mapped = true;
}
+
return 0;
+
+unwind:
+ for (; n; n = rb_prev(n)) {
+ struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
+ dma_addr_t iova;
+
+ if (dma->iommu_mapped) {
+ iommu_unmap(domain->domain, dma->iova, dma->size);
+ continue;
+ }
+
+ iova = dma->iova;
+ while (iova < dma->iova + dma->size) {
+ phys_addr_t phys, p;
+ size_t size;
+ dma_addr_t i;
+
+ phys = iommu_iova_to_phys(domain->domain, iova);
+ if (!phys) {
+ iova += PAGE_SIZE;
+ continue;
+ }
+
+ size = PAGE_SIZE;
+ p = phys + size;
+ i = iova + size;
+ while (i < dma->iova + dma->size &&
+ p == iommu_iova_to_phys(domain->domain, i)) {
+ size += PAGE_SIZE;
+ p += PAGE_SIZE;
+ i += PAGE_SIZE;
+ }
+
+ iommu_unmap(domain->domain, iova, size);
+ vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT,
+ size >> PAGE_SHIFT, true);
+ }
+ }
+
+ return ret;
}
/*
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 88c8c158ec25..0c7bbc92b22a 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -613,6 +613,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
size_t len, total_len = 0;
int err;
struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
+ struct ubuf_info *ubuf;
bool zcopy_used;
int sent_pkts = 0;
@@ -645,9 +646,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
/* use msg_control to pass vhost zerocopy ubuf info to skb */
if (zcopy_used) {
- struct ubuf_info *ubuf;
ubuf = nvq->ubuf_info + nvq->upend_idx;
-
vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head);
vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
ubuf->callback = vhost_zerocopy_callback;
@@ -675,7 +674,8 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
err = sock->ops->sendmsg(sock, &msg, len);
if (unlikely(err < 0)) {
if (zcopy_used) {
- vhost_net_ubuf_put(ubufs);
+ if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS)
+ vhost_net_ubuf_put(ubufs);
nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
% UIO_MAXIOV;
}
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 98b6eb902df9..732327756ee1 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -322,8 +322,8 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->kick = NULL;
vq->call_ctx = NULL;
vq->log_ctx = NULL;
- vhost_reset_is_le(vq);
vhost_disable_cross_endian(vq);
+ vhost_reset_is_le(vq);
vq->busyloop_timeout = 0;
vq->umem = NULL;
vq->iotlb = NULL;
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index a94d700a4503..59c61744dcc1 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -273,13 +273,14 @@ __vringh_iov(struct vringh *vrh, u16 i,
desc_max = vrh->vring.num;
up_next = -1;
+ /* You must want something! */
+ if (WARN_ON(!riov && !wiov))
+ return -EINVAL;
+
if (riov)
riov->i = riov->used = 0;
- else if (wiov)
+ if (wiov)
wiov->i = wiov->used = 0;
- else
- /* You must want something! */
- BUG();
for (;;) {
void *addr;
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 5f5c5de31f10..6ee320259e4f 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -182,14 +182,14 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
break;
}
- vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
- added = true;
-
- /* Deliver to monitoring devices all correctly transmitted
- * packets.
+ /* Deliver to monitoring devices all packets that we
+ * will transmit.
*/
virtio_transport_deliver_tap_pkt(pkt);
+ vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
+ added = true;
+
pkt->off += payload_len;
total_len += payload_len;
@@ -383,6 +383,52 @@ static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
return val < vq->num;
}
+static struct virtio_transport vhost_transport = {
+ .transport = {
+ .get_local_cid = vhost_transport_get_local_cid,
+
+ .init = virtio_transport_do_socket_init,
+ .destruct = virtio_transport_destruct,
+ .release = virtio_transport_release,
+ .connect = virtio_transport_connect,
+ .shutdown = virtio_transport_shutdown,
+ .cancel_pkt = vhost_transport_cancel_pkt,
+
+ .dgram_enqueue = virtio_transport_dgram_enqueue,
+ .dgram_dequeue = virtio_transport_dgram_dequeue,
+ .dgram_bind = virtio_transport_dgram_bind,
+ .dgram_allow = virtio_transport_dgram_allow,
+
+ .stream_enqueue = virtio_transport_stream_enqueue,
+ .stream_dequeue = virtio_transport_stream_dequeue,
+ .stream_has_data = virtio_transport_stream_has_data,
+ .stream_has_space = virtio_transport_stream_has_space,
+ .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
+ .stream_is_active = virtio_transport_stream_is_active,
+ .stream_allow = virtio_transport_stream_allow,
+
+ .notify_poll_in = virtio_transport_notify_poll_in,
+ .notify_poll_out = virtio_transport_notify_poll_out,
+ .notify_recv_init = virtio_transport_notify_recv_init,
+ .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
+ .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
+ .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
+ .notify_send_init = virtio_transport_notify_send_init,
+ .notify_send_pre_block = virtio_transport_notify_send_pre_block,
+ .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
+ .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
+
+ .set_buffer_size = virtio_transport_set_buffer_size,
+ .set_min_buffer_size = virtio_transport_set_min_buffer_size,
+ .set_max_buffer_size = virtio_transport_set_max_buffer_size,
+ .get_buffer_size = virtio_transport_get_buffer_size,
+ .get_min_buffer_size = virtio_transport_get_min_buffer_size,
+ .get_max_buffer_size = virtio_transport_get_max_buffer_size,
+ },
+
+ .send_pkt = vhost_transport_send_pkt,
+};
+
static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
{
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
@@ -439,7 +485,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid &&
le64_to_cpu(pkt->hdr.dst_cid) ==
vhost_transport_get_local_cid())
- virtio_transport_recv_pkt(pkt);
+ virtio_transport_recv_pkt(&vhost_transport, pkt);
else
virtio_transport_free_pkt(pkt);
@@ -499,6 +545,11 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
mutex_unlock(&vq->mutex);
}
+ /* Some packets may have been queued before the device was started,
+ * let's kick the send worker to send them.
+ */
+ vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
+
mutex_unlock(&vsock->dev.mutex);
return 0;
@@ -787,52 +838,6 @@ static struct miscdevice vhost_vsock_misc = {
.fops = &vhost_vsock_fops,
};
-static struct virtio_transport vhost_transport = {
- .transport = {
- .get_local_cid = vhost_transport_get_local_cid,
-
- .init = virtio_transport_do_socket_init,
- .destruct = virtio_transport_destruct,
- .release = virtio_transport_release,
- .connect = virtio_transport_connect,
- .shutdown = virtio_transport_shutdown,
- .cancel_pkt = vhost_transport_cancel_pkt,
-
- .dgram_enqueue = virtio_transport_dgram_enqueue,
- .dgram_dequeue = virtio_transport_dgram_dequeue,
- .dgram_bind = virtio_transport_dgram_bind,
- .dgram_allow = virtio_transport_dgram_allow,
-
- .stream_enqueue = virtio_transport_stream_enqueue,
- .stream_dequeue = virtio_transport_stream_dequeue,
- .stream_has_data = virtio_transport_stream_has_data,
- .stream_has_space = virtio_transport_stream_has_space,
- .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
- .stream_is_active = virtio_transport_stream_is_active,
- .stream_allow = virtio_transport_stream_allow,
-
- .notify_poll_in = virtio_transport_notify_poll_in,
- .notify_poll_out = virtio_transport_notify_poll_out,
- .notify_recv_init = virtio_transport_notify_recv_init,
- .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
- .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
- .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
- .notify_send_init = virtio_transport_notify_send_init,
- .notify_send_pre_block = virtio_transport_notify_send_pre_block,
- .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
- .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
-
- .set_buffer_size = virtio_transport_set_buffer_size,
- .set_min_buffer_size = virtio_transport_set_min_buffer_size,
- .set_max_buffer_size = virtio_transport_set_max_buffer_size,
- .get_buffer_size = virtio_transport_get_buffer_size,
- .get_min_buffer_size = virtio_transport_get_min_buffer_size,
- .get_max_buffer_size = virtio_transport_get_max_buffer_size,
- },
-
- .send_pkt = vhost_transport_send_pkt,
-};
-
static int __init vhost_vsock_init(void)
{
int ret;
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index 73612485ed07..bd43d8cff389 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -460,7 +460,7 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
ret = regulator_enable(lp->enable);
if (ret < 0) {
dev_err(lp->dev, "failed to enable vddio: %d\n", ret);
- return ret;
+ goto disable_supply;
}
/*
@@ -475,24 +475,34 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
ret = lp855x_configure(lp);
if (ret) {
dev_err(lp->dev, "device config err: %d", ret);
- return ret;
+ goto disable_vddio;
}
ret = lp855x_backlight_register(lp);
if (ret) {
dev_err(lp->dev,
"failed to register backlight. err: %d\n", ret);
- return ret;
+ goto disable_vddio;
}
ret = sysfs_create_group(&lp->dev->kobj, &lp855x_attr_group);
if (ret) {
dev_err(lp->dev, "failed to register sysfs. err: %d\n", ret);
- return ret;
+ goto disable_vddio;
}
backlight_update_status(lp->bl);
+
return 0;
+
+disable_vddio:
+ if (lp->enable)
+ regulator_disable(lp->enable);
+disable_supply:
+ if (lp->supply)
+ regulator_disable(lp->supply);
+
+ return ret;
}
static int lp855x_remove(struct i2c_client *cl)
@@ -501,6 +511,8 @@ static int lp855x_remove(struct i2c_client *cl)
lp->bl->props.brightness = 0;
backlight_update_status(lp->bl);
+ if (lp->enable)
+ regulator_disable(lp->enable);
if (lp->supply)
regulator_disable(lp->supply);
sysfs_remove_group(&lp->dev->kobj, &lp855x_attr_group);
diff --git a/drivers/video/backlight/sky81452-backlight.c b/drivers/video/backlight/sky81452-backlight.c
index d414c7a3acf5..a2f77625b717 100644
--- a/drivers/video/backlight/sky81452-backlight.c
+++ b/drivers/video/backlight/sky81452-backlight.c
@@ -207,6 +207,7 @@ static struct sky81452_bl_platform_data *sky81452_bl_parse_dt(
num_entry);
if (ret < 0) {
dev_err(dev, "led-sources node is invalid.\n");
+ of_node_put(np);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 787792c3d08d..40d5fea8513c 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -21,52 +21,6 @@ config VGA_CONSOLE
Say Y.
-config VGACON_SOFT_SCROLLBACK
- bool "Enable Scrollback Buffer in System RAM"
- depends on VGA_CONSOLE
- default n
- help
- The scrollback buffer of the standard VGA console is located in
- the VGA RAM. The size of this RAM is fixed and is quite small.
- If you require a larger scrollback buffer, this can be placed in
- System RAM which is dynamically allocated during initialization.
- Placing the scrollback buffer in System RAM will slightly slow
- down the console.
-
- If you want this feature, say 'Y' here and enter the amount of
- RAM to allocate for this buffer. If unsure, say 'N'.
-
-config VGACON_SOFT_SCROLLBACK_SIZE
- int "Scrollback Buffer Size (in KB)"
- depends on VGACON_SOFT_SCROLLBACK
- range 1 1024
- default "64"
- help
- Enter the amount of System RAM to allocate for scrollback
- buffers of VGA consoles. Each 64KB will give you approximately
- 16 80x25 screenfuls of scrollback buffer.
-
-config VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT
- bool "Persistent Scrollback History for each console by default"
- depends on VGACON_SOFT_SCROLLBACK
- default n
- help
- Say Y here if the scrollback history should persist by default when
- switching between consoles. Otherwise, the scrollback history will be
- flushed each time the console is switched. This feature can also be
- enabled using the boot command line parameter
- 'vgacon.scrollback_persistent=1'.
-
- This feature might break your tool of choice to flush the scrollback
- buffer, e.g. clear(1) will work fine but Debian's clear_console(1)
- will be broken, which might cause security issues.
- You can use the escape sequence \e[3J instead if this feature is
- activated.
-
- Note that a buffer of VGACON_SOFT_SCROLLBACK_SIZE is taken for each
- created tty device.
- So if you use a RAM-constrained system, say N here.
-
config MDA_CONSOLE
depends on !M68K && !PARISC && ISA
tristate "MDA text console (dual-headed)"
diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c
index 7f2526b43b33..02b24ae8b9cb 100644
--- a/drivers/video/console/newport_con.c
+++ b/drivers/video/console/newport_con.c
@@ -31,17 +31,14 @@
#include <linux/linux_logo.h>
#include <linux/font.h>
-#define FONT_DATA ((unsigned char *)font_vga_8x16.data)
+#define NEWPORT_LEN 0x10000
-/* borrowed from fbcon.c */
-#define REFCOUNT(fd) (((int *)(fd))[-1])
-#define FNTSIZE(fd) (((int *)(fd))[-2])
-#define FNTCHARCNT(fd) (((int *)(fd))[-3])
-#define FONT_EXTRA_WORDS 3
+#define FONT_DATA ((unsigned char *)font_vga_8x16.data)
static unsigned char *font_data[MAX_NR_CONSOLES];
static struct newport_regs *npregs;
+static unsigned long newport_addr;
static int logo_active;
static int topscan;
@@ -519,6 +516,7 @@ static int newport_set_font(int unit, struct console_font *op)
FNTSIZE(new_data) = size;
FNTCHARCNT(new_data) = op->charcount;
REFCOUNT(new_data) = 0; /* usage counter */
+ FNTSUM(new_data) = 0;
p = new_data;
for (i = 0; i < op->charcount; i++) {
@@ -701,7 +699,6 @@ const struct consw newport_con = {
static int newport_probe(struct gio_device *dev,
const struct gio_device_id *id)
{
- unsigned long newport_addr;
int err;
if (!dev->resource.start)
@@ -711,7 +708,7 @@ static int newport_probe(struct gio_device *dev,
return -EBUSY; /* we only support one Newport as console */
newport_addr = dev->resource.start + 0xF0000;
- if (!request_mem_region(newport_addr, 0x10000, "Newport"))
+ if (!request_mem_region(newport_addr, NEWPORT_LEN, "Newport"))
return -ENODEV;
npregs = (struct newport_regs *)/* ioremap cannot fail */
@@ -719,6 +716,11 @@ static int newport_probe(struct gio_device *dev,
console_lock();
err = do_take_over_console(&newport_con, 0, MAX_NR_CONSOLES - 1, 1);
console_unlock();
+
+ if (err) {
+ iounmap((void *)npregs);
+ release_mem_region(newport_addr, NEWPORT_LEN);
+ }
return err;
}
@@ -726,6 +728,7 @@ static void newport_remove(struct gio_device *dev)
{
give_up_console(&newport_con);
iounmap((void *)npregs);
+ release_mem_region(newport_addr, NEWPORT_LEN);
}
static struct gio_device_id newport_ids[] = {
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index bfaa9ec4bc1f..a992d922b3a7 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -165,210 +165,6 @@ static inline void vga_set_mem_top(struct vc_data *c)
write_vga(12, (c->vc_visible_origin - vga_vram_base) / 2);
}
-#ifdef CONFIG_VGACON_SOFT_SCROLLBACK
-/* software scrollback */
-struct vgacon_scrollback_info {
- void *data;
- int tail;
- int size;
- int rows;
- int cnt;
- int cur;
- int save;
- int restore;
-};
-
-static struct vgacon_scrollback_info *vgacon_scrollback_cur;
-static struct vgacon_scrollback_info vgacon_scrollbacks[MAX_NR_CONSOLES];
-static bool scrollback_persistent = \
- IS_ENABLED(CONFIG_VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT);
-module_param_named(scrollback_persistent, scrollback_persistent, bool, 0000);
-MODULE_PARM_DESC(scrollback_persistent, "Enable persistent scrollback for all vga consoles");
-
-static void vgacon_scrollback_reset(int vc_num, size_t reset_size)
-{
- struct vgacon_scrollback_info *scrollback = &vgacon_scrollbacks[vc_num];
-
- if (scrollback->data && reset_size > 0)
- memset(scrollback->data, 0, reset_size);
-
- scrollback->cnt = 0;
- scrollback->tail = 0;
- scrollback->cur = 0;
-}
-
-static void vgacon_scrollback_init(int vc_num)
-{
- int pitch = vga_video_num_columns * 2;
- size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024;
- int rows = size / pitch;
- void *data;
-
- data = kmalloc_array(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE, 1024,
- GFP_NOWAIT);
-
- vgacon_scrollbacks[vc_num].data = data;
- vgacon_scrollback_cur = &vgacon_scrollbacks[vc_num];
-
- vgacon_scrollback_cur->rows = rows - 1;
- vgacon_scrollback_cur->size = rows * pitch;
-
- vgacon_scrollback_reset(vc_num, size);
-}
-
-static void vgacon_scrollback_switch(int vc_num)
-{
- if (!scrollback_persistent)
- vc_num = 0;
-
- if (!vgacon_scrollbacks[vc_num].data) {
- vgacon_scrollback_init(vc_num);
- } else {
- if (scrollback_persistent) {
- vgacon_scrollback_cur = &vgacon_scrollbacks[vc_num];
- } else {
- size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024;
-
- vgacon_scrollback_reset(vc_num, size);
- }
- }
-}
-
-static void vgacon_scrollback_startup(void)
-{
- vgacon_scrollback_cur = &vgacon_scrollbacks[0];
- vgacon_scrollback_init(0);
-}
-
-static void vgacon_scrollback_update(struct vc_data *c, int t, int count)
-{
- void *p;
-
- if (!vgacon_scrollback_cur->data || !vgacon_scrollback_cur->size ||
- c->vc_num != fg_console)
- return;
-
- p = (void *) (c->vc_origin + t * c->vc_size_row);
-
- while (count--) {
- scr_memcpyw(vgacon_scrollback_cur->data +
- vgacon_scrollback_cur->tail,
- p, c->vc_size_row);
-
- vgacon_scrollback_cur->cnt++;
- p += c->vc_size_row;
- vgacon_scrollback_cur->tail += c->vc_size_row;
-
- if (vgacon_scrollback_cur->tail >= vgacon_scrollback_cur->size)
- vgacon_scrollback_cur->tail = 0;
-
- if (vgacon_scrollback_cur->cnt > vgacon_scrollback_cur->rows)
- vgacon_scrollback_cur->cnt = vgacon_scrollback_cur->rows;
-
- vgacon_scrollback_cur->cur = vgacon_scrollback_cur->cnt;
- }
-}
-
-static void vgacon_restore_screen(struct vc_data *c)
-{
- c->vc_origin = c->vc_visible_origin;
- vgacon_scrollback_cur->save = 0;
-
- if (!vga_is_gfx && !vgacon_scrollback_cur->restore) {
- scr_memcpyw((u16 *) c->vc_origin, (u16 *) c->vc_screenbuf,
- c->vc_screenbuf_size > vga_vram_size ?
- vga_vram_size : c->vc_screenbuf_size);
- vgacon_scrollback_cur->restore = 1;
- vgacon_scrollback_cur->cur = vgacon_scrollback_cur->cnt;
- }
-}
-
-static void vgacon_scrolldelta(struct vc_data *c, int lines)
-{
- int start, end, count, soff;
-
- if (!lines) {
- vgacon_restore_screen(c);
- return;
- }
-
- if (!vgacon_scrollback_cur->data)
- return;
-
- if (!vgacon_scrollback_cur->save) {
- vgacon_cursor(c, CM_ERASE);
- vgacon_save_screen(c);
- c->vc_origin = (unsigned long)c->vc_screenbuf;
- vgacon_scrollback_cur->save = 1;
- }
-
- vgacon_scrollback_cur->restore = 0;
- start = vgacon_scrollback_cur->cur + lines;
- end = start + abs(lines);
-
- if (start < 0)
- start = 0;
-
- if (start > vgacon_scrollback_cur->cnt)
- start = vgacon_scrollback_cur->cnt;
-
- if (end < 0)
- end = 0;
-
- if (end > vgacon_scrollback_cur->cnt)
- end = vgacon_scrollback_cur->cnt;
-
- vgacon_scrollback_cur->cur = start;
- count = end - start;
- soff = vgacon_scrollback_cur->tail -
- ((vgacon_scrollback_cur->cnt - end) * c->vc_size_row);
- soff -= count * c->vc_size_row;
-
- if (soff < 0)
- soff += vgacon_scrollback_cur->size;
-
- count = vgacon_scrollback_cur->cnt - start;
-
- if (count > c->vc_rows)
- count = c->vc_rows;
-
- if (count) {
- int copysize;
-
- int diff = c->vc_rows - count;
- void *d = (void *) c->vc_visible_origin;
- void *s = (void *) c->vc_screenbuf;
-
- count *= c->vc_size_row;
- /* how much memory to end of buffer left? */
- copysize = min(count, vgacon_scrollback_cur->size - soff);
- scr_memcpyw(d, vgacon_scrollback_cur->data + soff, copysize);
- d += copysize;
- count -= copysize;
-
- if (count) {
- scr_memcpyw(d, vgacon_scrollback_cur->data, count);
- d += count;
- }
-
- if (diff)
- scr_memcpyw(d, s, diff * c->vc_size_row);
- } else
- vgacon_cursor(c, CM_MOVE);
-}
-
-static void vgacon_flush_scrollback(struct vc_data *c)
-{
- size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024;
-
- vgacon_scrollback_reset(c->vc_num, size);
-}
-#else
-#define vgacon_scrollback_startup(...) do { } while (0)
-#define vgacon_scrollback_init(...) do { } while (0)
-#define vgacon_scrollback_update(...) do { } while (0)
-#define vgacon_scrollback_switch(...) do { } while (0)
-
static void vgacon_restore_screen(struct vc_data *c)
{
if (c->vc_origin != c->vc_visible_origin)
@@ -382,11 +178,6 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
vga_set_mem_top(c);
}
-static void vgacon_flush_scrollback(struct vc_data *c)
-{
-}
-#endif /* CONFIG_VGACON_SOFT_SCROLLBACK */
-
static const char *vgacon_startup(void)
{
const char *display_desc = NULL;
@@ -569,10 +360,7 @@ static const char *vgacon_startup(void)
vgacon_xres = screen_info.orig_video_cols * VGA_FONTWIDTH;
vgacon_yres = vga_scan_lines;
- if (!vga_init_done) {
- vgacon_scrollback_startup();
- vga_init_done = true;
- }
+ vga_init_done = true;
return display_desc;
}
@@ -596,7 +384,7 @@ static void vgacon_init(struct vc_data *c, int init)
vc_resize(c, vga_video_num_columns, vga_video_num_lines);
c->vc_scan_lines = vga_scan_lines;
- c->vc_font.height = vga_video_font_height;
+ c->vc_font.height = c->vc_cell_height = vga_video_font_height;
c->vc_complement_mask = 0x7700;
if (vga_512_chars)
c->vc_hi_font_mask = 0x0800;
@@ -729,32 +517,32 @@ static void vgacon_cursor(struct vc_data *c, int mode)
switch (c->vc_cursor_type & 0x0f) {
case CUR_UNDERLINE:
vgacon_set_cursor_size(c->vc_x,
- c->vc_font.height -
- (c->vc_font.height <
+ c->vc_cell_height -
+ (c->vc_cell_height <
10 ? 2 : 3),
- c->vc_font.height -
- (c->vc_font.height <
+ c->vc_cell_height -
+ (c->vc_cell_height <
10 ? 1 : 2));
break;
case CUR_TWO_THIRDS:
vgacon_set_cursor_size(c->vc_x,
- c->vc_font.height / 3,
- c->vc_font.height -
- (c->vc_font.height <
+ c->vc_cell_height / 3,
+ c->vc_cell_height -
+ (c->vc_cell_height <
10 ? 1 : 2));
break;
case CUR_LOWER_THIRD:
vgacon_set_cursor_size(c->vc_x,
- (c->vc_font.height * 2) / 3,
- c->vc_font.height -
- (c->vc_font.height <
+ (c->vc_cell_height * 2) / 3,
+ c->vc_cell_height -
+ (c->vc_cell_height <
10 ? 1 : 2));
break;
case CUR_LOWER_HALF:
vgacon_set_cursor_size(c->vc_x,
- c->vc_font.height / 2,
- c->vc_font.height -
- (c->vc_font.height <
+ c->vc_cell_height / 2,
+ c->vc_cell_height -
+ (c->vc_cell_height <
10 ? 1 : 2));
break;
case CUR_NONE:
@@ -765,7 +553,7 @@ static void vgacon_cursor(struct vc_data *c, int mode)
break;
default:
vgacon_set_cursor_size(c->vc_x, 1,
- c->vc_font.height);
+ c->vc_cell_height);
break;
}
break;
@@ -776,13 +564,13 @@ static int vgacon_doresize(struct vc_data *c,
unsigned int width, unsigned int height)
{
unsigned long flags;
- unsigned int scanlines = height * c->vc_font.height;
+ unsigned int scanlines = height * c->vc_cell_height;
u8 scanlines_lo = 0, r7 = 0, vsync_end = 0, mode, max_scan;
raw_spin_lock_irqsave(&vga_lock, flags);
vgacon_xres = width * VGA_FONTWIDTH;
- vgacon_yres = height * c->vc_font.height;
+ vgacon_yres = height * c->vc_cell_height;
if (vga_video_type >= VIDEO_TYPE_VGAC) {
outb_p(VGA_CRTC_MAX_SCAN, vga_video_port_reg);
max_scan = inb_p(vga_video_port_val);
@@ -837,9 +625,9 @@ static int vgacon_doresize(struct vc_data *c,
static int vgacon_switch(struct vc_data *c)
{
int x = c->vc_cols * VGA_FONTWIDTH;
- int y = c->vc_rows * c->vc_font.height;
+ int y = c->vc_rows * c->vc_cell_height;
int rows = screen_info.orig_video_lines * vga_default_font_height/
- c->vc_font.height;
+ c->vc_cell_height;
/*
* We need to save screen size here as it's the only way
* we can spot the screen has been resized and we need to
@@ -863,7 +651,6 @@ static int vgacon_switch(struct vc_data *c)
vgacon_doresize(c, c->vc_cols, c->vc_rows);
}
- vgacon_scrollback_switch(c->vc_num);
return 0; /* Redrawing not needed */
}
@@ -1271,7 +1058,7 @@ static int vgacon_adjust_height(struct vc_data *vc, unsigned fontheight)
cursor_size_lastto = 0;
c->vc_sw->con_cursor(c, CM_DRAW);
}
- c->vc_font.height = fontheight;
+ c->vc_font.height = c->vc_cell_height = fontheight;
vc_resize(c, 0, rows); /* Adjust console size */
}
}
@@ -1319,12 +1106,20 @@ static int vgacon_resize(struct vc_data *c, unsigned int width,
if ((width << 1) * height > vga_vram_size)
return -EINVAL;
+ if (user) {
+ /*
+ * Ho ho! Someone (svgatextmode, eh?) may have reprogrammed
+ * the video mode! Set the new defaults then and go away.
+ */
+ screen_info.orig_video_cols = width;
+ screen_info.orig_video_lines = height;
+ vga_default_font_height = c->vc_cell_height;
+ return 0;
+ }
if (width % 2 || width > screen_info.orig_video_cols ||
height > (screen_info.orig_video_lines * vga_default_font_height)/
- c->vc_font.height)
- /* let svgatextmode tinker with video timings and
- return success */
- return (user) ? 0 : -EINVAL;
+ c->vc_cell_height)
+ return -EINVAL;
if (con_is_visible(c) && !vga_is_gfx) /* who knows */
vgacon_doresize(c, width, height);
@@ -1380,7 +1175,6 @@ static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b,
oldo = c->vc_origin;
delta = lines * c->vc_size_row;
if (dir == SM_UP) {
- vgacon_scrollback_update(c, t, lines);
if (c->vc_scr_end + delta >= vga_vram_end) {
scr_memcpyw((u16 *) vga_vram_base,
(u16 *) (oldo + delta),
@@ -1444,7 +1238,6 @@ const struct consw vga_con = {
.con_save_screen = vgacon_save_screen,
.con_build_attr = vgacon_build_attr,
.con_invert_region = vgacon_invert_region,
- .con_flush_scrollback = vgacon_flush_scrollback,
};
EXPORT_SYMBOL(vga_con);
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index f99558d006bf..97c4319797d5 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -1303,6 +1303,7 @@ config FB_ATY
select FB_CFB_IMAGEBLIT
select FB_BACKLIGHT if FB_ATY_BACKLIGHT
select FB_MACMODES if PPC
+ select FB_ATY_CT if SPARC64 && PCI
help
This driver supports graphics boards with the ATI Mach64 chips.
Say Y if you have such a graphics board.
@@ -1313,7 +1314,6 @@ config FB_ATY
config FB_ATY_CT
bool "Mach64 CT/VT/GT/LT (incl. 3D RAGE) support"
depends on PCI && FB_ATY
- default y if SPARC64 && PCI
help
Say Y here to support use of ATI's 64-bit Rage boards (or other
boards based on the Mach64 CT, VT, GT, and LT chipsets) as a
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index 4ed55e6bbb84..6d01ae3984c7 100644
--- a/drivers/video/fbdev/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
@@ -1071,8 +1071,8 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
}
INIT_LIST_HEAD(&pdata->pwr_gpios);
- ret = -ENOMEM;
for (i = 0; i < gpiod_count(dev, "atmel,power-control"); i++) {
+ ret = -ENOMEM;
gpiod = devm_gpiod_get_index(dev, "atmel,power-control",
i, GPIOD_ASIS);
if (IS_ERR(gpiod))
diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
index e8594bbaea60..c6109a385cac 100644
--- a/drivers/video/fbdev/aty/radeon_base.c
+++ b/drivers/video/fbdev/aty/radeon_base.c
@@ -2327,7 +2327,7 @@ static int radeonfb_pci_register(struct pci_dev *pdev,
ret = radeon_kick_out_firmware_fb(pdev);
if (ret)
- return ret;
+ goto err_release_fb;
/* request the mem regions */
ret = pci_request_region(pdev, 0, "radeonfb framebuffer");
diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c
index ca935c09a261..436365efae73 100644
--- a/drivers/video/fbdev/core/bitblit.c
+++ b/drivers/video/fbdev/core/bitblit.c
@@ -216,7 +216,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
region.color = color;
region.rop = ROP_COPY;
- if (rw && !bottom_only) {
+ if ((int) rw > 0 && !bottom_only) {
region.dx = info->var.xoffset + rs;
region.dy = 0;
region.width = rw;
@@ -224,7 +224,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
info->fbops->fb_fillrect(info, &region);
}
- if (bh) {
+ if ((int) bh > 0) {
region.dx = info->var.xoffset;
region.dy = info->var.yoffset + bs;
region.width = rs;
@@ -234,7 +234,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
}
static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
- int softback_lines, int fg, int bg)
+ int fg, int bg)
{
struct fb_cursor cursor;
struct fbcon_ops *ops = info->fbcon_par;
@@ -247,15 +247,6 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
cursor.set = 0;
- if (softback_lines) {
- if (y + softback_lines >= vc->vc_rows) {
- mode = CM_ERASE;
- ops->cursor_flash = 0;
- return;
- } else
- y += softback_lines;
- }
-
c = scr_readw((u16 *) vc->vc_pos);
attribute = get_attribute(info, c);
src = vc->vc_font.data + ((c & charmask) * (w * vc->vc_font.height));
diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
index 2811c4afde01..e8ea76848104 100644
--- a/drivers/video/fbdev/core/fbcmap.c
+++ b/drivers/video/fbdev/core/fbcmap.c
@@ -101,17 +101,17 @@ int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags)
if (!len)
return 0;
- cmap->red = kmalloc(size, flags);
+ cmap->red = kzalloc(size, flags);
if (!cmap->red)
goto fail;
- cmap->green = kmalloc(size, flags);
+ cmap->green = kzalloc(size, flags);
if (!cmap->green)
goto fail;
- cmap->blue = kmalloc(size, flags);
+ cmap->blue = kzalloc(size, flags);
if (!cmap->blue)
goto fail;
if (transp) {
- cmap->transp = kmalloc(size, flags);
+ cmap->transp = kzalloc(size, flags);
if (!cmap->transp)
goto fail;
} else {
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index cb93a6b38160..bf7959fdf9f4 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -102,12 +102,6 @@ static int logo_lines;
/* logo_shown is an index to vc_cons when >= 0; otherwise follows FBCON_LOGO
enums. */
static int logo_shown = FBCON_LOGO_CANSHOW;
-/* Software scrollback */
-static int fbcon_softback_size = 32768;
-static unsigned long softback_buf, softback_curr;
-static unsigned long softback_in;
-static unsigned long softback_top, softback_end;
-static int softback_lines;
/* console mappings */
static int first_fb_vc;
static int last_fb_vc = MAX_NR_CONSOLES - 1;
@@ -148,8 +142,6 @@ static int margin_color;
static const struct consw fb_con;
-#define CM_SOFTBACK (8)
-
#define advance_row(p, delta) (unsigned short *)((unsigned long)(p) + (delta) * vc->vc_size_row)
static int fbcon_set_origin(struct vc_data *);
@@ -355,18 +347,6 @@ static int get_color(struct vc_data *vc, struct fb_info *info,
return color;
}
-static void fbcon_update_softback(struct vc_data *vc)
-{
- int l = fbcon_softback_size / vc->vc_size_row;
-
- if (l > 5)
- softback_end = softback_buf + l * vc->vc_size_row;
- else
- /* Smaller scrollback makes no sense, and 0 would screw
- the operation totally */
- softback_top = 0;
-}
-
static void fb_flashcursor(struct work_struct *work)
{
struct fb_info *info = container_of(work, struct fb_info, queue);
@@ -396,7 +376,7 @@ static void fb_flashcursor(struct work_struct *work)
c = scr_readw((u16 *) vc->vc_pos);
mode = (!ops->cursor_flash || ops->cursor_state.enable) ?
CM_ERASE : CM_DRAW;
- ops->cursor(vc, info, mode, softback_lines, get_color(vc, info, c, 1),
+ ops->cursor(vc, info, mode, get_color(vc, info, c, 1),
get_color(vc, info, c, 0));
console_unlock();
}
@@ -453,13 +433,7 @@ static int __init fb_console_setup(char *this_opt)
}
if (!strncmp(options, "scrollback:", 11)) {
- options += 11;
- if (*options) {
- fbcon_softback_size = simple_strtoul(options, &options, 0);
- if (*options == 'k' || *options == 'K') {
- fbcon_softback_size *= 1024;
- }
- }
+ pr_warn("Ignoring scrollback size option\n");
continue;
}
@@ -988,31 +962,6 @@ static const char *fbcon_startup(void)
set_blitting_type(vc, info);
- if (info->fix.type != FB_TYPE_TEXT) {
- if (fbcon_softback_size) {
- if (!softback_buf) {
- softback_buf =
- (unsigned long)
- kmalloc(fbcon_softback_size,
- GFP_KERNEL);
- if (!softback_buf) {
- fbcon_softback_size = 0;
- softback_top = 0;
- }
- }
- } else {
- if (softback_buf) {
- kfree((void *) softback_buf);
- softback_buf = 0;
- softback_top = 0;
- }
- }
- if (softback_buf)
- softback_in = softback_top = softback_curr =
- softback_buf;
- softback_lines = 0;
- }
-
/* Setup default font */
if (!p->fontdata && !vc->vc_font.data) {
if (!fontname[0] || !(font = find_font(fontname)))
@@ -1181,9 +1130,6 @@ static void fbcon_init(struct vc_data *vc, int init)
if (logo)
fbcon_prepare_logo(vc, info, cols, rows, new_cols, new_rows);
- if (vc == svc && softback_buf)
- fbcon_update_softback(vc);
-
if (ops->rotate_font && ops->rotate_font(info, vc)) {
ops->rotate = FB_ROTATE_UR;
set_blitting_type(vc, info);
@@ -1346,7 +1292,6 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
{
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
struct fbcon_ops *ops = info->fbcon_par;
- int y;
int c = scr_readw((u16 *) vc->vc_pos);
ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
@@ -1360,16 +1305,11 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
fbcon_add_cursor_timer(info);
ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1;
- if (mode & CM_SOFTBACK) {
- mode &= ~CM_SOFTBACK;
- y = softback_lines;
- } else {
- if (softback_lines)
- fbcon_set_origin(vc);
- y = 0;
- }
- ops->cursor(vc, info, mode, y, get_color(vc, info, c, 1),
+ if (!ops->cursor)
+ return;
+
+ ops->cursor(vc, info, mode, get_color(vc, info, c, 1),
get_color(vc, info, c, 0));
}
@@ -1440,8 +1380,6 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
if (con_is_visible(vc)) {
update_screen(vc);
- if (softback_buf)
- fbcon_update_softback(vc);
}
}
@@ -1579,99 +1517,6 @@ static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count)
scrollback_current = 0;
}
-static void fbcon_redraw_softback(struct vc_data *vc, struct display *p,
- long delta)
-{
- int count = vc->vc_rows;
- unsigned short *d, *s;
- unsigned long n;
- int line = 0;
-
- d = (u16 *) softback_curr;
- if (d == (u16 *) softback_in)
- d = (u16 *) vc->vc_origin;
- n = softback_curr + delta * vc->vc_size_row;
- softback_lines -= delta;
- if (delta < 0) {
- if (softback_curr < softback_top && n < softback_buf) {
- n += softback_end - softback_buf;
- if (n < softback_top) {
- softback_lines -=
- (softback_top - n) / vc->vc_size_row;
- n = softback_top;
- }
- } else if (softback_curr >= softback_top
- && n < softback_top) {
- softback_lines -=
- (softback_top - n) / vc->vc_size_row;
- n = softback_top;
- }
- } else {
- if (softback_curr > softback_in && n >= softback_end) {
- n += softback_buf - softback_end;
- if (n > softback_in) {
- n = softback_in;
- softback_lines = 0;
- }
- } else if (softback_curr <= softback_in && n > softback_in) {
- n = softback_in;
- softback_lines = 0;
- }
- }
- if (n == softback_curr)
- return;
- softback_curr = n;
- s = (u16 *) softback_curr;
- if (s == (u16 *) softback_in)
- s = (u16 *) vc->vc_origin;
- while (count--) {
- unsigned short *start;
- unsigned short *le;
- unsigned short c;
- int x = 0;
- unsigned short attr = 1;
-
- start = s;
- le = advance_row(s, 1);
- do {
- c = scr_readw(s);
- if (attr != (c & 0xff00)) {
- attr = c & 0xff00;
- if (s > start) {
- fbcon_putcs(vc, start, s - start,
- line, x);
- x += s - start;
- start = s;
- }
- }
- if (c == scr_readw(d)) {
- if (s > start) {
- fbcon_putcs(vc, start, s - start,
- line, x);
- x += s - start + 1;
- start = s + 1;
- } else {
- x++;
- start++;
- }
- }
- s++;
- d++;
- } while (s < le);
- if (s > start)
- fbcon_putcs(vc, start, s - start, line, x);
- line++;
- if (d == (u16 *) softback_end)
- d = (u16 *) softback_buf;
- if (d == (u16 *) softback_in)
- d = (u16 *) vc->vc_origin;
- if (s == (u16 *) softback_end)
- s = (u16 *) softback_buf;
- if (s == (u16 *) softback_in)
- s = (u16 *) vc->vc_origin;
- }
-}
-
static void fbcon_redraw_move(struct vc_data *vc, struct display *p,
int line, int count, int dy)
{
@@ -1811,31 +1656,6 @@ static void fbcon_redraw(struct vc_data *vc, struct display *p,
}
}
-static inline void fbcon_softback_note(struct vc_data *vc, int t,
- int count)
-{
- unsigned short *p;
-
- if (vc->vc_num != fg_console)
- return;
- p = (unsigned short *) (vc->vc_origin + t * vc->vc_size_row);
-
- while (count) {
- scr_memcpyw((u16 *) softback_in, p, vc->vc_size_row);
- count--;
- p = advance_row(p, 1);
- softback_in += vc->vc_size_row;
- if (softback_in == softback_end)
- softback_in = softback_buf;
- if (softback_in == softback_top) {
- softback_top += vc->vc_size_row;
- if (softback_top == softback_end)
- softback_top = softback_buf;
- }
- }
- softback_curr = softback_in;
-}
-
static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
enum con_scroll dir, unsigned int count)
{
@@ -1858,8 +1678,6 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
case SM_UP:
if (count > vc->vc_rows) /* Maximum realistic size */
count = vc->vc_rows;
- if (softback_top)
- fbcon_softback_note(vc, t, count);
if (logo_shown >= 0)
goto redraw_up;
switch (p->scrollmode) {
@@ -2152,6 +1970,9 @@ static void updatescrollmode(struct display *p,
}
}
+#define PITCH(w) (((w) + 7) >> 3)
+#define CALC_FONTSZ(h, p, c) ((h) * (p) * (c)) /* size = height * pitch * charcount */
+
static int fbcon_resize(struct vc_data *vc, unsigned int width,
unsigned int height, unsigned int user)
{
@@ -2161,6 +1982,24 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
struct fb_var_screeninfo var = info->var;
int x_diff, y_diff, virt_w, virt_h, virt_fw, virt_fh;
+ if (p->userfont && FNTSIZE(vc->vc_font.data)) {
+ int size;
+ int pitch = PITCH(vc->vc_font.width);
+
+ /*
+ * If user font, ensure that a possible change to user font
+ * height or width will not allow a font data out-of-bounds access.
+ * NOTE: must use original charcount in calculation as font
+ * charcount can change and cannot be used to determine the
+ * font data allocated size.
+ */
+ if (pitch <= 0)
+ return -EINVAL;
+ size = CALC_FONTSZ(vc->vc_font.height, pitch, FNTCHARCNT(vc->vc_font.data));
+ if (size > FNTSIZE(vc->vc_font.data))
+ return -EINVAL;
+ }
+
virt_w = FBCON_SWAP(ops->rotate, width, height);
virt_h = FBCON_SWAP(ops->rotate, height, width);
virt_fw = FBCON_SWAP(ops->rotate, vc->vc_font.width,
@@ -2186,7 +2025,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
return -EINVAL;
DPRINTK("resize now %ix%i\n", var.xres, var.yres);
- if (con_is_visible(vc)) {
+ if (con_is_visible(vc) && vc->vc_mode == KD_TEXT) {
var.activate = FB_ACTIVATE_NOW |
FB_ACTIVATE_FORCE;
fb_set_var(info, &var);
@@ -2209,14 +2048,6 @@ static int fbcon_switch(struct vc_data *vc)
info = registered_fb[con2fb_map[vc->vc_num]];
ops = info->fbcon_par;
- if (softback_top) {
- if (softback_lines)
- fbcon_set_origin(vc);
- softback_top = softback_curr = softback_in = softback_buf;
- softback_lines = 0;
- fbcon_update_softback(vc);
- }
-
if (logo_shown >= 0) {
struct vc_data *conp2 = vc_cons[logo_shown].d;
@@ -2442,6 +2273,9 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font)
if (font->width <= 8) {
j = vc->vc_font.height;
+ if (font->charcount * j > FNTSIZE(fontdata))
+ return -EINVAL;
+
for (i = 0; i < font->charcount; i++) {
memcpy(data, fontdata, j);
memset(data + j, 0, 32 - j);
@@ -2450,6 +2284,9 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font)
}
} else if (font->width <= 16) {
j = vc->vc_font.height * 2;
+ if (font->charcount * j > FNTSIZE(fontdata))
+ return -EINVAL;
+
for (i = 0; i < font->charcount; i++) {
memcpy(data, fontdata, j);
memset(data + j, 0, 64 - j);
@@ -2457,6 +2294,9 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font)
fontdata += j;
}
} else if (font->width <= 24) {
+ if (font->charcount * (vc->vc_font.height * sizeof(u32)) > FNTSIZE(fontdata))
+ return -EINVAL;
+
for (i = 0; i < font->charcount; i++) {
for (j = 0; j < vc->vc_font.height; j++) {
*data++ = fontdata[0];
@@ -2469,6 +2309,9 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font)
}
} else {
j = vc->vc_font.height * 4;
+ if (font->charcount * j > FNTSIZE(fontdata))
+ return -EINVAL;
+
for (i = 0; i < font->charcount; i++) {
memcpy(data, fontdata, j);
memset(data + j, 0, 128 - j);
@@ -2550,9 +2393,6 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
int cnt;
char *old_data = NULL;
- if (con_is_visible(vc) && softback_lines)
- fbcon_set_origin(vc);
-
resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
if (p->userfont)
old_data = vc->vc_font.data;
@@ -2578,8 +2418,6 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
cols /= w;
rows /= h;
vc_resize(vc, cols, rows);
- if (con_is_visible(vc) && softback_buf)
- fbcon_update_softback(vc);
} else if (con_is_visible(vc)
&& vc->vc_mode == KD_TEXT) {
fbcon_clear_margins(vc, 0);
@@ -2623,7 +2461,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
int size;
int i, csum;
u8 *new_data, *data = font->data;
- int pitch = (font->width+7) >> 3;
+ int pitch = PITCH(font->width);
/* Is there a reason why fbconsole couldn't handle any charcount >256?
* If not this check should be changed to charcount < 256 */
@@ -2639,7 +2477,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
if (fbcon_invalid_charcount(info, charcount))
return -EINVAL;
- size = h * pitch * charcount;
+ size = CALC_FONTSZ(h, pitch, charcount);
new_data = kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER);
@@ -2738,19 +2576,7 @@ static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table)
static u16 *fbcon_screen_pos(struct vc_data *vc, int offset)
{
- unsigned long p;
- int line;
-
- if (vc->vc_num != fg_console || !softback_lines)
- return (u16 *) (vc->vc_origin + offset);
- line = offset / vc->vc_size_row;
- if (line >= softback_lines)
- return (u16 *) (vc->vc_origin + offset -
- softback_lines * vc->vc_size_row);
- p = softback_curr + offset;
- if (p >= softback_end)
- p += softback_buf - softback_end;
- return (u16 *) p;
+ return (u16 *) (vc->vc_origin + offset);
}
static unsigned long fbcon_getxy(struct vc_data *vc, unsigned long pos,
@@ -2764,22 +2590,7 @@ static unsigned long fbcon_getxy(struct vc_data *vc, unsigned long pos,
x = offset % vc->vc_cols;
y = offset / vc->vc_cols;
- if (vc->vc_num == fg_console)
- y += softback_lines;
- ret = pos + (vc->vc_cols - x) * 2;
- } else if (vc->vc_num == fg_console && softback_lines) {
- unsigned long offset = pos - softback_curr;
-
- if (pos < softback_curr)
- offset += softback_end - softback_buf;
- offset /= 2;
- x = offset % vc->vc_cols;
- y = offset / vc->vc_cols;
ret = pos + (vc->vc_cols - x) * 2;
- if (ret == softback_end)
- ret = softback_buf;
- if (ret == softback_in)
- ret = vc->vc_origin;
} else {
/* Should not happen */
x = y = 0;
@@ -2807,106 +2618,11 @@ static void fbcon_invert_region(struct vc_data *vc, u16 * p, int cnt)
a = ((a) & 0x88ff) | (((a) & 0x7000) >> 4) |
(((a) & 0x0700) << 4);
scr_writew(a, p++);
- if (p == (u16 *) softback_end)
- p = (u16 *) softback_buf;
- if (p == (u16 *) softback_in)
- p = (u16 *) vc->vc_origin;
}
}
-static void fbcon_scrolldelta(struct vc_data *vc, int lines)
-{
- struct fb_info *info = registered_fb[con2fb_map[fg_console]];
- struct fbcon_ops *ops = info->fbcon_par;
- struct display *disp = &fb_display[fg_console];
- int offset, limit, scrollback_old;
-
- if (softback_top) {
- if (vc->vc_num != fg_console)
- return;
- if (vc->vc_mode != KD_TEXT || !lines)
- return;
- if (logo_shown >= 0) {
- struct vc_data *conp2 = vc_cons[logo_shown].d;
-
- if (conp2->vc_top == logo_lines
- && conp2->vc_bottom == conp2->vc_rows)
- conp2->vc_top = 0;
- if (logo_shown == vc->vc_num) {
- unsigned long p, q;
- int i;
-
- p = softback_in;
- q = vc->vc_origin +
- logo_lines * vc->vc_size_row;
- for (i = 0; i < logo_lines; i++) {
- if (p == softback_top)
- break;
- if (p == softback_buf)
- p = softback_end;
- p -= vc->vc_size_row;
- q -= vc->vc_size_row;
- scr_memcpyw((u16 *) q, (u16 *) p,
- vc->vc_size_row);
- }
- softback_in = softback_curr = p;
- update_region(vc, vc->vc_origin,
- logo_lines * vc->vc_cols);
- }
- logo_shown = FBCON_LOGO_CANSHOW;
- }
- fbcon_cursor(vc, CM_ERASE | CM_SOFTBACK);
- fbcon_redraw_softback(vc, disp, lines);
- fbcon_cursor(vc, CM_DRAW | CM_SOFTBACK);
- return;
- }
-
- if (!scrollback_phys_max)
- return;
-
- scrollback_old = scrollback_current;
- scrollback_current -= lines;
- if (scrollback_current < 0)
- scrollback_current = 0;
- else if (scrollback_current > scrollback_max)
- scrollback_current = scrollback_max;
- if (scrollback_current == scrollback_old)
- return;
-
- if (fbcon_is_inactive(vc, info))
- return;
-
- fbcon_cursor(vc, CM_ERASE);
-
- offset = disp->yscroll - scrollback_current;
- limit = disp->vrows;
- switch (disp->scrollmode) {
- case SCROLL_WRAP_MOVE:
- info->var.vmode |= FB_VMODE_YWRAP;
- break;
- case SCROLL_PAN_MOVE:
- case SCROLL_PAN_REDRAW:
- limit -= vc->vc_rows;
- info->var.vmode &= ~FB_VMODE_YWRAP;
- break;
- }
- if (offset < 0)
- offset += limit;
- else if (offset >= limit)
- offset -= limit;
-
- ops->var.xoffset = 0;
- ops->var.yoffset = offset * vc->vc_font.height;
- ops->update_start(info);
-
- if (!scrollback_current)
- fbcon_cursor(vc, CM_DRAW);
-}
-
static int fbcon_set_origin(struct vc_data *vc)
{
- if (softback_lines)
- fbcon_scrolldelta(vc, softback_lines);
return 0;
}
@@ -2970,8 +2686,6 @@ static void fbcon_modechanged(struct fb_info *info)
fbcon_set_palette(vc, color_table);
update_screen(vc);
- if (softback_buf)
- fbcon_update_softback(vc);
}
}
@@ -3413,7 +3127,6 @@ static const struct consw fb_con = {
.con_font_default = fbcon_set_def_font,
.con_font_copy = fbcon_copy_font,
.con_set_palette = fbcon_set_palette,
- .con_scrolldelta = fbcon_scrolldelta,
.con_set_origin = fbcon_set_origin,
.con_invert_region = fbcon_invert_region,
.con_screen_pos = fbcon_screen_pos,
@@ -3670,9 +3383,6 @@ static void fbcon_exit(void)
}
#endif
- kfree((void *)softback_buf);
- softback_buf = 0UL;
-
for_each_registered_fb(i) {
int pending = 0;
diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h
index 21912a3ba32f..c023009f2978 100644
--- a/drivers/video/fbdev/core/fbcon.h
+++ b/drivers/video/fbdev/core/fbcon.h
@@ -62,7 +62,7 @@ struct fbcon_ops {
void (*clear_margins)(struct vc_data *vc, struct fb_info *info,
int color, int bottom_only);
void (*cursor)(struct vc_data *vc, struct fb_info *info, int mode,
- int softback_lines, int fg, int bg);
+ int fg, int bg);
int (*update_start)(struct fb_info *info);
int (*rotate_font)(struct fb_info *info, struct vc_data *vc);
struct fb_var_screeninfo var; /* copy of the current fb_var_screeninfo */
@@ -152,13 +152,6 @@ static inline int attr_col_ec(int shift, struct vc_data *vc,
#define attr_bgcol_ec(bgshift, vc, info) attr_col_ec(bgshift, vc, info, 0)
#define attr_fgcol_ec(fgshift, vc, info) attr_col_ec(fgshift, vc, info, 1)
-/* Font */
-#define REFCOUNT(fd) (((int *)(fd))[-1])
-#define FNTSIZE(fd) (((int *)(fd))[-2])
-#define FNTCHARCNT(fd) (((int *)(fd))[-3])
-#define FNTSUM(fd) (((int *)(fd))[-4])
-#define FONT_EXTRA_WORDS 4
-
/*
* Scroll Method
*/
diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c
index dfa9a8aa4509..71ad6967a70e 100644
--- a/drivers/video/fbdev/core/fbcon_ccw.c
+++ b/drivers/video/fbdev/core/fbcon_ccw.c
@@ -201,7 +201,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info,
region.color = color;
region.rop = ROP_COPY;
- if (rw && !bottom_only) {
+ if ((int) rw > 0 && !bottom_only) {
region.dx = 0;
region.dy = info->var.yoffset;
region.height = rw;
@@ -209,7 +209,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info,
info->fbops->fb_fillrect(info, &region);
}
- if (bh) {
+ if ((int) bh > 0) {
region.dx = info->var.xoffset + bs;
region.dy = 0;
region.height = info->var.yres_virtual;
@@ -219,7 +219,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info,
}
static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
- int softback_lines, int fg, int bg)
+ int fg, int bg)
{
struct fb_cursor cursor;
struct fbcon_ops *ops = info->fbcon_par;
@@ -236,15 +236,6 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
cursor.set = 0;
- if (softback_lines) {
- if (y + softback_lines >= vc->vc_rows) {
- mode = CM_ERASE;
- ops->cursor_flash = 0;
- return;
- } else
- y += softback_lines;
- }
-
c = scr_readw((u16 *) vc->vc_pos);
attribute = get_attribute(info, c);
src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width));
diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c
index ce08251bfd38..31fe5dd651d4 100644
--- a/drivers/video/fbdev/core/fbcon_cw.c
+++ b/drivers/video/fbdev/core/fbcon_cw.c
@@ -184,7 +184,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info,
region.color = color;
region.rop = ROP_COPY;
- if (rw && !bottom_only) {
+ if ((int) rw > 0 && !bottom_only) {
region.dx = 0;
region.dy = info->var.yoffset + rs;
region.height = rw;
@@ -192,7 +192,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info,
info->fbops->fb_fillrect(info, &region);
}
- if (bh) {
+ if ((int) bh > 0) {
region.dx = info->var.xoffset;
region.dy = info->var.yoffset;
region.height = info->var.yres;
@@ -202,7 +202,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info,
}
static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
- int softback_lines, int fg, int bg)
+ int fg, int bg)
{
struct fb_cursor cursor;
struct fbcon_ops *ops = info->fbcon_par;
@@ -219,15 +219,6 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
cursor.set = 0;
- if (softback_lines) {
- if (y + softback_lines >= vc->vc_rows) {
- mode = CM_ERASE;
- ops->cursor_flash = 0;
- return;
- } else
- y += softback_lines;
- }
-
c = scr_readw((u16 *) vc->vc_pos);
attribute = get_attribute(info, c);
src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width));
diff --git a/drivers/video/fbdev/core/fbcon_rotate.c b/drivers/video/fbdev/core/fbcon_rotate.c
index c0d445294aa7..ac72d4f85f7d 100644
--- a/drivers/video/fbdev/core/fbcon_rotate.c
+++ b/drivers/video/fbdev/core/fbcon_rotate.c
@@ -14,6 +14,7 @@
#include <linux/fb.h>
#include <linux/vt_kern.h>
#include <linux/console.h>
+#include <linux/font.h>
#include <asm/types.h>
#include "fbcon.h"
#include "fbcon_rotate.h"
diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c
index 1936afc78fec..b2dd1370e39b 100644
--- a/drivers/video/fbdev/core/fbcon_ud.c
+++ b/drivers/video/fbdev/core/fbcon_ud.c
@@ -231,7 +231,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info,
region.color = color;
region.rop = ROP_COPY;
- if (rw && !bottom_only) {
+ if ((int) rw > 0 && !bottom_only) {
region.dy = 0;
region.dx = info->var.xoffset;
region.width = rw;
@@ -239,7 +239,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info,
info->fbops->fb_fillrect(info, &region);
}
- if (bh) {
+ if ((int) bh > 0) {
region.dy = info->var.yoffset;
region.dx = info->var.xoffset;
region.height = bh;
@@ -249,7 +249,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info,
}
static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode,
- int softback_lines, int fg, int bg)
+ int fg, int bg)
{
struct fb_cursor cursor;
struct fbcon_ops *ops = info->fbcon_par;
@@ -267,15 +267,6 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode,
cursor.set = 0;
- if (softback_lines) {
- if (y + softback_lines >= vc->vc_rows) {
- mode = CM_ERASE;
- ops->cursor_flash = 0;
- return;
- } else
- y += softback_lines;
- }
-
c = scr_readw((u16 *) vc->vc_pos);
attribute = get_attribute(info, c);
src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.height));
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index c48f083d522a..84845275dbef 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1122,7 +1122,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
case FBIOGET_FSCREENINFO:
if (!lock_fb_info(info))
return -ENODEV;
- fix = info->fix;
+ memcpy(&fix, &info->fix, sizeof(fix));
unlock_fb_info(info);
ret = copy_to_user(argp, &fix, sizeof(fix)) ? -EFAULT : 0;
diff --git a/drivers/video/fbdev/core/tileblit.c b/drivers/video/fbdev/core/tileblit.c
index 93390312957f..adff8d6ffe6f 100644
--- a/drivers/video/fbdev/core/tileblit.c
+++ b/drivers/video/fbdev/core/tileblit.c
@@ -13,6 +13,7 @@
#include <linux/fb.h>
#include <linux/vt_kern.h>
#include <linux/console.h>
+#include <linux/font.h>
#include <asm/types.h>
#include "fbcon.h"
@@ -80,7 +81,7 @@ static void tile_clear_margins(struct vc_data *vc, struct fb_info *info,
}
static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode,
- int softback_lines, int fg, int bg)
+ int fg, int bg)
{
struct fb_tilecursor cursor;
int use_sw = (vc->vc_cursor_type & 0x10);
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index cc1006375cac..f50cc1a7c31a 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -449,7 +449,7 @@ static int efifb_probe(struct platform_device *dev)
info->apertures->ranges[0].base = efifb_fix.smem_start;
info->apertures->ranges[0].size = size_remap;
- if (efi_enabled(EFI_BOOT) &&
+ if (efi_enabled(EFI_MEMMAP) &&
!efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
if ((efifb_fix.smem_start + efifb_fix.smem_len) >
(md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) {
diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c
index 59e1cae57948..03c0b1b8747b 100644
--- a/drivers/video/fbdev/hgafb.c
+++ b/drivers/video/fbdev/hgafb.c
@@ -286,7 +286,7 @@ static int hga_card_detect(void)
hga_vram = ioremap(0xb0000, hga_vram_len);
if (!hga_vram)
- goto error;
+ return -ENOMEM;
if (request_region(0x3b0, 12, "hgafb"))
release_io_ports = 1;
@@ -346,13 +346,18 @@ static int hga_card_detect(void)
hga_type_name = "Hercules";
break;
}
- return 1;
+ return 0;
error:
if (release_io_ports)
release_region(0x3b0, 12);
if (release_io_port)
release_region(0x3bf, 1);
- return 0;
+
+ iounmap(hga_vram);
+
+ pr_err("hgafb: HGA card not detected.\n");
+
+ return -EINVAL;
}
/**
@@ -550,13 +555,11 @@ static struct fb_ops hgafb_ops = {
static int hgafb_probe(struct platform_device *pdev)
{
struct fb_info *info;
+ int ret;
- if (! hga_card_detect()) {
- printk(KERN_INFO "hgafb: HGA card not detected.\n");
- if (hga_vram)
- iounmap(hga_vram);
- return -EINVAL;
- }
+ ret = hga_card_detect();
+ if (ret)
+ return ret;
printk(KERN_INFO "hgafb: %s with %ldK of memory detected.\n",
hga_type_name, hga_vram_len/1024);
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 403d8cd3e582..c907f96d6890 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -712,7 +712,10 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
goto err1;
}
- fb_virt = ioremap(par->mem->start, screen_fb_size);
+ /*
+ * Map the VRAM cacheable for performance.
+ */
+ fb_virt = ioremap_wc(par->mem->start, screen_fb_size);
if (!fb_virt)
goto err2;
diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
index ffcf553719a3..ecdcf358ad5e 100644
--- a/drivers/video/fbdev/imsttfb.c
+++ b/drivers/video/fbdev/imsttfb.c
@@ -1516,11 +1516,6 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
info->fix.smem_start = addr;
info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ?
0x400000 : 0x800000);
- if (!info->screen_base) {
- release_mem_region(addr, size);
- framebuffer_release(info);
- return -ENOMEM;
- }
info->fix.mmio_start = addr + 0x800000;
par->dc_regs = ioremap(addr + 0x800000, 0x1000);
par->cmap_regs_phys = addr + 0x840000;
diff --git a/drivers/video/fbdev/neofb.c b/drivers/video/fbdev/neofb.c
index 5d3a444083f7..2018e1ca33eb 100644
--- a/drivers/video/fbdev/neofb.c
+++ b/drivers/video/fbdev/neofb.c
@@ -1820,6 +1820,7 @@ static int neo_scan_monitor(struct fb_info *info)
#else
printk(KERN_ERR
"neofb: Only 640x480, 800x600/480 and 1024x768 panels are currently supported\n");
+ kfree(info->monspecs.modedb);
return -1;
#endif
default:
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
index a06d9c25765c..0bd582e845f3 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
@@ -531,8 +531,11 @@ int dispc_runtime_get(void)
DSSDBG("dispc_runtime_get\n");
r = pm_runtime_get_sync(&dispc.pdev->dev);
- WARN_ON(r < 0);
- return r < 0 ? r : 0;
+ if (WARN_ON(r < 0)) {
+ pm_runtime_put_sync(&dispc.pdev->dev);
+ return r;
+ }
+ return 0;
}
EXPORT_SYMBOL(dispc_runtime_get);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
index 8e1d60d48dbb..50792d31533b 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
@@ -1148,8 +1148,11 @@ static int dsi_runtime_get(struct platform_device *dsidev)
DSSDBG("dsi_runtime_get\n");
r = pm_runtime_get_sync(&dsi->pdev->dev);
- WARN_ON(r < 0);
- return r < 0 ? r : 0;
+ if (WARN_ON(r < 0)) {
+ pm_runtime_put_sync(&dsi->pdev->dev);
+ return r;
+ }
+ return 0;
}
static void dsi_runtime_put(struct platform_device *dsidev)
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.c b/drivers/video/fbdev/omap2/omapfb/dss/dss.c
index f0cac9e0eb94..faebf9a773ba 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.c
@@ -779,8 +779,11 @@ int dss_runtime_get(void)
DSSDBG("dss_runtime_get\n");
r = pm_runtime_get_sync(&dss.pdev->dev);
- WARN_ON(r < 0);
- return r < 0 ? r : 0;
+ if (WARN_ON(r < 0)) {
+ pm_runtime_put_sync(&dss.pdev->dev);
+ return r;
+ }
+ return 0;
}
void dss_runtime_put(void)
@@ -844,7 +847,7 @@ static const struct dss_features omap34xx_dss_feats = {
};
static const struct dss_features omap3630_dss_feats = {
- .fck_div_max = 32,
+ .fck_div_max = 31,
.dss_fck_multiplier = 1,
.parent_clk_name = "dpll4_ck",
.dpi_select_source = &dss_dpi_select_source_omap2_omap3,
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
index 28de56e21c74..9fd9a02bb871 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
@@ -50,9 +50,10 @@ static int hdmi_runtime_get(void)
DSSDBG("hdmi_runtime_get\n");
r = pm_runtime_get_sync(&hdmi.pdev->dev);
- WARN_ON(r < 0);
- if (r < 0)
+ if (WARN_ON(r < 0)) {
+ pm_runtime_put_sync(&hdmi.pdev->dev);
return r;
+ }
return 0;
}
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
index 2e2fcc3d6d4f..13f3a5ce5529 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
@@ -54,9 +54,10 @@ static int hdmi_runtime_get(void)
DSSDBG("hdmi_runtime_get\n");
r = pm_runtime_get_sync(&hdmi.pdev->dev);
- WARN_ON(r < 0);
- if (r < 0)
+ if (WARN_ON(r < 0)) {
+ pm_runtime_put_sync(&hdmi.pdev->dev);
return r;
+ }
return 0;
}
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/venc.c b/drivers/video/fbdev/omap2/omapfb/dss/venc.c
index 392464da12e4..96714b4596d2 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/venc.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/venc.c
@@ -402,8 +402,11 @@ static int venc_runtime_get(void)
DSSDBG("venc_runtime_get\n");
r = pm_runtime_get_sync(&venc.pdev->dev);
- WARN_ON(r < 0);
- return r < 0 ? r : 0;
+ if (WARN_ON(r < 0)) {
+ pm_runtime_put_sync(&venc.pdev->dev);
+ return r;
+ }
+ return 0;
}
static void venc_runtime_put(void)
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index 8a53d1de611d..3fd2cb4cdfa9 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -1027,6 +1027,8 @@ static int __init pvr2fb_setup(char *options)
if (!options || !*options)
return 0;
+ cable_arg[0] = output_arg[0] = 0;
+
while ((this_opt = strsep(&options, ","))) {
if (!*this_opt)
continue;
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index d59c8a59f582..90dee3e6f8bc 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -2446,8 +2446,8 @@ static int pxafb_remove(struct platform_device *dev)
free_pages_exact(fbi->video_mem, fbi->video_mem_size);
- dma_free_wc(&dev->dev, fbi->dma_buff_size, fbi->dma_buff,
- fbi->dma_buff_phys);
+ dma_free_coherent(&dev->dev, fbi->dma_buff_size, fbi->dma_buff,
+ fbi->dma_buff_phys);
return 0;
}
diff --git a/drivers/video/fbdev/sis/init.c b/drivers/video/fbdev/sis/init.c
index dfe3eb769638..fde27feae5d0 100644
--- a/drivers/video/fbdev/sis/init.c
+++ b/drivers/video/fbdev/sis/init.c
@@ -2428,6 +2428,11 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
i = 0;
+ if (SiS_Pr->ChipType == SIS_730)
+ queuedata = &FQBQData730[0];
+ else
+ queuedata = &FQBQData[0];
+
if(ModeNo > 0x13) {
/* Get VCLK */
@@ -2445,12 +2450,6 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
/* Get half colordepth */
colorth = colortharray[(SiS_Pr->SiS_ModeType - ModeEGA)];
- if(SiS_Pr->ChipType == SIS_730) {
- queuedata = &FQBQData730[0];
- } else {
- queuedata = &FQBQData[0];
- }
-
do {
templ = SiS_CalcDelay2(SiS_Pr, queuedata[i]) * VCLK * colorth;
diff --git a/drivers/video/fbdev/sis/init301.c b/drivers/video/fbdev/sis/init301.c
index 27a2b72e50e8..a8fb41f1a258 100644
--- a/drivers/video/fbdev/sis/init301.c
+++ b/drivers/video/fbdev/sis/init301.c
@@ -848,9 +848,7 @@ SiS_PanelDelay(struct SiS_Private *SiS_Pr, unsigned short DelayTime)
SiS_DDC2Delay(SiS_Pr, 0x4000);
}
- } else if((SiS_Pr->SiS_IF_DEF_LVDS == 1) /* ||
- (SiS_Pr->SiS_CustomT == CUT_COMPAQ1280) ||
- (SiS_Pr->SiS_CustomT == CUT_CLEVO1400) */ ) { /* 315 series, LVDS; Special */
+ } else if (SiS_Pr->SiS_IF_DEF_LVDS == 1) { /* 315 series, LVDS; Special */
if(SiS_Pr->SiS_IF_DEF_CH70xx == 0) {
PanelID = SiS_GetReg(SiS_Pr->SiS_P3d4,0x36);
diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c
index f1dcc6766d1e..1781ca697f66 100644
--- a/drivers/video/fbdev/sm712fb.c
+++ b/drivers/video/fbdev/sm712fb.c
@@ -1429,6 +1429,8 @@ static int smtc_map_smem(struct smtcfb_info *sfb,
static void smtc_unmap_smem(struct smtcfb_info *sfb)
{
if (sfb && sfb->fb->screen_base) {
+ if (sfb->chip_id == 0x720)
+ sfb->fb->screen_base -= 0x00200000;
iounmap(sfb->fb->screen_base);
sfb->fb->screen_base = NULL;
}
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index 5a0d6fb02bbc..f7823aa99340 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -1020,6 +1020,7 @@ static void dlfb_ops_destroy(struct fb_info *info)
}
vfree(dlfb->backing_buffer);
kfree(dlfb->edid);
+ dlfb_free_urb_list(dlfb);
usb_put_dev(dlfb->udev);
kfree(dlfb);
diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
index 2c6a576ed84c..3c4d20618de4 100644
--- a/drivers/video/fbdev/vga16fb.c
+++ b/drivers/video/fbdev/vga16fb.c
@@ -243,7 +243,7 @@ static void vga16fb_update_fix(struct fb_info *info)
}
static void vga16fb_clock_chip(struct vga16fb_par *par,
- unsigned int pixclock,
+ unsigned int *pixclock,
const struct fb_info *info,
int mul, int div)
{
@@ -259,14 +259,14 @@ static void vga16fb_clock_chip(struct vga16fb_par *par,
{ 0 /* bad */, 0x00, 0x00}};
int err;
- pixclock = (pixclock * mul) / div;
+ *pixclock = (*pixclock * mul) / div;
best = vgaclocks;
- err = pixclock - best->pixclock;
+ err = *pixclock - best->pixclock;
if (err < 0) err = -err;
for (ptr = vgaclocks + 1; ptr->pixclock; ptr++) {
int tmp;
- tmp = pixclock - ptr->pixclock;
+ tmp = *pixclock - ptr->pixclock;
if (tmp < 0) tmp = -tmp;
if (tmp < err) {
err = tmp;
@@ -275,7 +275,7 @@ static void vga16fb_clock_chip(struct vga16fb_par *par,
}
par->misc |= best->misc;
par->clkdiv = best->seq_clock_mode;
- pixclock = (best->pixclock * div) / mul;
+ *pixclock = (best->pixclock * div) / mul;
}
#define FAIL(X) return -EINVAL
@@ -497,10 +497,10 @@ static int vga16fb_check_var(struct fb_var_screeninfo *var,
if (mode & MODE_8BPP)
/* pixel clock == vga clock / 2 */
- vga16fb_clock_chip(par, var->pixclock, info, 1, 2);
+ vga16fb_clock_chip(par, &var->pixclock, info, 1, 2);
else
/* pixel clock == vga clock */
- vga16fb_clock_chip(par, var->pixclock, info, 1, 1);
+ vga16fb_clock_chip(par, &var->pixclock, info, 1, 1);
var->red.offset = var->green.offset = var->blue.offset =
var->transp.offset = 0;
@@ -1121,7 +1121,7 @@ static void vga_8planes_imageblit(struct fb_info *info, const struct fb_image *i
char oldop = setop(0);
char oldsr = setsr(0);
char oldmask = selectmask();
- const char *cdat = image->data;
+ const unsigned char *cdat = image->data;
u32 dx = image->dx;
char __iomem *where;
int y;
diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c
index 696106ecdff0..967030176d87 100644
--- a/drivers/video/fbdev/w100fb.c
+++ b/drivers/video/fbdev/w100fb.c
@@ -583,6 +583,7 @@ static void w100fb_restore_vidmem(struct w100fb_par *par)
memsize=par->mach->mem->size;
memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_extmem, memsize);
vfree(par->saved_extmem);
+ par->saved_extmem = NULL;
}
if (par->saved_intmem) {
memsize=MEM_INT_SIZE;
@@ -591,6 +592,7 @@ static void w100fb_restore_vidmem(struct w100fb_par *par)
else
memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_intmem, memsize);
vfree(par->saved_intmem);
+ par->saved_intmem = NULL;
}
}
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index 1bbd910d4ddb..2a7f7f47fe89 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -157,7 +157,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
unsigned int i;
long ret = 0;
- int num_pinned; /* return value from get_user_pages() */
+ int num_pinned = 0; /* return value from get_user_pages_fast() */
phys_addr_t remote_paddr; /* The next address in the remote buffer */
uint32_t count; /* The number of bytes left to copy */
@@ -174,7 +174,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
return -EINVAL;
/*
- * The array of pages returned by get_user_pages() covers only
+ * The array of pages returned by get_user_pages_fast() covers only
* page-aligned memory. Since the user buffer is probably not
* page-aligned, we need to handle the discrepancy.
*
@@ -224,7 +224,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
/*
* 'pages' is an array of struct page pointers that's initialized by
- * get_user_pages().
+ * get_user_pages_fast().
*/
pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
@@ -241,7 +241,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
if (!sg_list_unaligned) {
pr_debug("fsl-hv: could not allocate S/G list\n");
ret = -ENOMEM;
- goto exit;
+ goto free_pages;
}
sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list));
@@ -250,7 +250,6 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
num_pages, param.source != -1, pages);
if (num_pinned != num_pages) {
- /* get_user_pages() failed */
pr_debug("fsl-hv: could not lock source buffer\n");
ret = (num_pinned < 0) ? num_pinned : -EFAULT;
goto exit;
@@ -292,13 +291,13 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
virt_to_phys(sg_list), num_pages);
exit:
- if (pages) {
- for (i = 0; i < num_pages; i++)
- if (pages[i])
- put_page(pages[i]);
+ if (pages && (num_pinned > 0)) {
+ for (i = 0; i < num_pinned; i++)
+ put_page(pages[i]);
}
kfree(sg_list_unaligned);
+free_pages:
kfree(pages);
if (!ret)
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index 0afef60d0638..b3bc70a505fa 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -1408,7 +1408,7 @@ static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev,
or_mask = caps->u.in.or_mask;
not_mask = caps->u.in.not_mask;
- if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
+ if ((or_mask | not_mask) & ~VMMDEV_GUEST_CAPABILITIES_MASK)
return -EINVAL;
ret = vbg_set_session_capabilities(gdev, session, or_mask, not_mask,
@@ -1482,7 +1482,8 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
/* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */
if (req_no_size == VBG_IOCTL_VMMDEV_REQUEST(0) ||
- req == VBG_IOCTL_VMMDEV_REQUEST_BIG)
+ req == VBG_IOCTL_VMMDEV_REQUEST_BIG ||
+ req == VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT)
return vbg_ioctl_vmmrequest(gdev, session, data);
if (hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT)
@@ -1520,6 +1521,7 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
case VBG_IOCTL_HGCM_CALL(0):
return vbg_ioctl_hgcm_call(gdev, session, f32bit, data);
case VBG_IOCTL_LOG(0):
+ case VBG_IOCTL_LOG_ALT(0):
return vbg_ioctl_log(data);
}
diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h
index 7ad9ec45bfa9..61059e4d142b 100644
--- a/drivers/virt/vboxguest/vboxguest_core.h
+++ b/drivers/virt/vboxguest/vboxguest_core.h
@@ -15,6 +15,21 @@
#include <linux/vboxguest.h>
#include "vmmdev.h"
+/*
+ * The mainline kernel version (this version) of the vboxguest module
+ * contained a bug where it defined VBGL_IOCTL_VMMDEV_REQUEST_BIG and
+ * VBGL_IOCTL_LOG using _IOC(_IOC_READ | _IOC_WRITE, 'V', ...) instead
+ * of _IO(V, ...) as the out of tree VirtualBox upstream version does.
+ *
+ * These _ALT definitions keep compatibility with the wrong defines the
+ * mainline kernel version used for a while.
+ * Note the VirtualBox userspace bits have always been built against
+ * VirtualBox upstream's headers, so this is likely not necessary. But
+ * we must never break our ABI so we keep these around to be 100% sure.
+ */
+#define VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT _IOC(_IOC_READ | _IOC_WRITE, 'V', 3, 0)
+#define VBG_IOCTL_LOG_ALT(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 9, s)
+
struct vbg_session;
/** VBox guest memory balloon. */
diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c
index 6e2a9619192d..94e055ee7ad6 100644
--- a/drivers/virt/vboxguest/vboxguest_linux.c
+++ b/drivers/virt/vboxguest/vboxguest_linux.c
@@ -112,7 +112,8 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
* the need for a bounce-buffer and another copy later on.
*/
is_vmmdev_req = (req & ~IOCSIZE_MASK) == VBG_IOCTL_VMMDEV_REQUEST(0) ||
- req == VBG_IOCTL_VMMDEV_REQUEST_BIG;
+ req == VBG_IOCTL_VMMDEV_REQUEST_BIG ||
+ req == VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT;
if (is_vmmdev_req)
buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT);
diff --git a/drivers/virt/vboxguest/vmmdev.h b/drivers/virt/vboxguest/vmmdev.h
index 5e2ae978935d..63733ab2ab38 100644
--- a/drivers/virt/vboxguest/vmmdev.h
+++ b/drivers/virt/vboxguest/vmmdev.h
@@ -206,6 +206,8 @@ VMMDEV_ASSERT_SIZE(vmmdev_mask, 24 + 8);
* not.
*/
#define VMMDEV_GUEST_SUPPORTS_GRAPHICS BIT(2)
+/* The mask of valid capabilities, for sanity checking. */
+#define VMMDEV_GUEST_CAPABILITIES_MASK 0x00000007U
/** struct vmmdev_hypervisorinfo - Hypervisor info structure. */
struct vmmdev_hypervisorinfo {
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 6228b48d1e12..df7980aef927 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -828,6 +828,9 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
{
struct vring_virtqueue *vq = to_vvq(_vq);
+ if (unlikely(vq->broken))
+ return false;
+
virtio_mb(vq->weak_barriers);
return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
}
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index 50b46c4399ea..075454053e5e 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -15,7 +15,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
-#include <linux/jiffies.h>
+#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
@@ -48,12 +48,12 @@ struct mxc_w1_device {
static u8 mxc_w1_ds2_reset_bus(void *data)
{
struct mxc_w1_device *dev = data;
- unsigned long timeout;
+ ktime_t timeout;
writeb(MXC_W1_CONTROL_RPP, dev->regs + MXC_W1_CONTROL);
/* Wait for reset sequence 511+512us, use 1500us for sure */
- timeout = jiffies + usecs_to_jiffies(1500);
+ timeout = ktime_add_us(ktime_get(), 1500);
udelay(511 + 512);
@@ -63,7 +63,7 @@ static u8 mxc_w1_ds2_reset_bus(void *data)
/* PST bit is valid after the RPP bit is self-cleared */
if (!(ctrl & MXC_W1_CONTROL_RPP))
return !(ctrl & MXC_W1_CONTROL_PST);
- } while (time_is_after_jiffies(timeout));
+ } while (ktime_before(ktime_get(), timeout));
return 1;
}
@@ -76,12 +76,12 @@ static u8 mxc_w1_ds2_reset_bus(void *data)
static u8 mxc_w1_ds2_touch_bit(void *data, u8 bit)
{
struct mxc_w1_device *dev = data;
- unsigned long timeout;
+ ktime_t timeout;
writeb(MXC_W1_CONTROL_WR(bit), dev->regs + MXC_W1_CONTROL);
/* Wait for read/write bit (60us, Max 120us), use 200us for sure */
- timeout = jiffies + usecs_to_jiffies(200);
+ timeout = ktime_add_us(ktime_get(), 200);
udelay(60);
@@ -91,7 +91,7 @@ static u8 mxc_w1_ds2_touch_bit(void *data, u8 bit)
/* RDST bit is valid after the WR1/RD bit is self-cleared */
if (!(ctrl & MXC_W1_CONTROL_WR(bit)))
return !!(ctrl & MXC_W1_CONTROL_RDST);
- } while (time_is_after_jiffies(timeout));
+ } while (ktime_before(ktime_get(), timeout));
return 0;
}
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 3099052e1243..0667bc6e7d23 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -176,7 +176,7 @@ static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
/* check irqstatus */
if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
dev_dbg(hdq_data->dev, "timeout waiting for"
- " TXCOMPLETE/RXCOMPLETE, %x", *status);
+ " TXCOMPLETE/RXCOMPLETE, %x\n", *status);
ret = -ETIMEDOUT;
goto out;
}
@@ -187,7 +187,7 @@ static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
OMAP_HDQ_FLAG_CLEAR, &tmp_status);
if (ret) {
dev_dbg(hdq_data->dev, "timeout waiting GO bit"
- " return to zero, %x", tmp_status);
+ " return to zero, %x\n", tmp_status);
}
out:
@@ -203,7 +203,7 @@ static irqreturn_t hdq_isr(int irq, void *_hdq)
spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
- dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus);
+ dev_dbg(hdq_data->dev, "hdq_isr: %x\n", hdq_data->hdq_irqstatus);
if (hdq_data->hdq_irqstatus &
(OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
@@ -311,7 +311,7 @@ static int omap_hdq_break(struct hdq_data *hdq_data)
tmp_status = hdq_data->hdq_irqstatus;
/* check irqstatus */
if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
- dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x",
+ dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x\n",
tmp_status);
ret = -ETIMEDOUT;
goto out;
@@ -338,7 +338,7 @@ static int omap_hdq_break(struct hdq_data *hdq_data)
&tmp_status);
if (ret)
dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
- " return to zero, %x", tmp_status);
+ " return to zero, %x\n", tmp_status);
out:
mutex_unlock(&hdq_data->hdq_mutex);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 709d4de11f40..fa7f4c61524d 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -515,7 +515,7 @@ config SUNXI_WATCHDOG
config COH901327_WATCHDOG
bool "ST-Ericsson COH 901 327 watchdog"
- depends on ARCH_U300 || (ARM && COMPILE_TEST)
+ depends on ARCH_U300 || (ARM && COMMON_CLK && COMPILE_TEST)
default y if MACH_U300
select WATCHDOG_CORE
help
@@ -651,6 +651,7 @@ config MOXART_WDT
config SIRFSOC_WATCHDOG
tristate "SiRFSOC watchdog"
+ depends on HAS_IOMEM
depends on ARCH_SIRF || COMPILE_TEST
select WATCHDOG_CORE
default y
diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c
index 7f0a8e635286..132d45d003ce 100644
--- a/drivers/watchdog/da9062_wdt.c
+++ b/drivers/watchdog/da9062_wdt.c
@@ -60,11 +60,6 @@ static int da9062_wdt_update_timeout_register(struct da9062_watchdog *wdt,
unsigned int regval)
{
struct da9062 *chip = wdt->hw;
- int ret;
-
- ret = da9062_reset_watchdog_timer(wdt);
- if (ret)
- return ret;
regmap_update_bits(chip->regmap,
DA9062AA_CONTROL_D,
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index 9a1c761258ce..5d0ea419070d 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -688,9 +688,9 @@ static int __init watchdog_init(int sioaddr)
* into the module have been registered yet.
*/
watchdog.sioaddr = sioaddr;
- watchdog.ident.options = WDIOC_SETTIMEOUT
- | WDIOF_MAGICCLOSE
- | WDIOF_KEEPALIVEPING;
+ watchdog.ident.options = WDIOF_MAGICCLOSE
+ | WDIOF_KEEPALIVEPING
+ | WDIOF_CARDRESET;
snprintf(watchdog.ident.identity,
sizeof(watchdog.ident.identity), "%s watchdog",
@@ -704,6 +704,13 @@ static int __init watchdog_init(int sioaddr)
wdt_conf = superio_inb(sioaddr, F71808FG_REG_WDT_CONF);
watchdog.caused_reboot = wdt_conf & BIT(F71808FG_FLAG_WDTMOUT_STS);
+ /*
+ * We don't want WDTMOUT_STS to stick around till regular reboot.
+ * Write 1 to the bit to clear it to zero.
+ */
+ superio_outb(sioaddr, F71808FG_REG_WDT_CONF,
+ wdt_conf | BIT(F71808FG_FLAG_WDTMOUT_STS));
+
superio_exit(sioaddr);
err = watchdog_set_timeout(timeout);
diff --git a/drivers/watchdog/mei_wdt.c b/drivers/watchdog/mei_wdt.c
index 8023cf28657a..c862dd38b6f5 100644
--- a/drivers/watchdog/mei_wdt.c
+++ b/drivers/watchdog/mei_wdt.c
@@ -382,6 +382,7 @@ static int mei_wdt_register(struct mei_wdt *wdt)
watchdog_set_drvdata(&wdt->wdd, wdt);
watchdog_stop_on_reboot(&wdt->wdd);
+ watchdog_stop_on_unregister(&wdt->wdd);
ret = watchdog_register_device(&wdt->wdd);
if (ret) {
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index 780971318810..1a0005a8fadb 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -121,7 +121,7 @@ static int qcom_wdt_restart(struct watchdog_device *wdd, unsigned long action,
*/
wmb();
- msleep(150);
+ mdelay(150);
return 0;
}
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index a281aa84bfb1..4c3b4ea4e17f 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -244,6 +244,8 @@ static int rdc321x_wdt_probe(struct platform_device *pdev)
rdc321x_wdt_device.sb_pdev = pdata->sb_pdev;
rdc321x_wdt_device.base_reg = r->start;
+ rdc321x_wdt_device.queue = 0;
+ rdc321x_wdt_device.default_ticks = ticks;
err = misc_register(&rdc321x_wdt_misc);
if (err < 0) {
@@ -258,14 +260,11 @@ static int rdc321x_wdt_probe(struct platform_device *pdev)
rdc321x_wdt_device.base_reg, RDC_WDT_RST);
init_completion(&rdc321x_wdt_device.stop);
- rdc321x_wdt_device.queue = 0;
clear_bit(0, &rdc321x_wdt_device.inuse);
timer_setup(&rdc321x_wdt_device.timer, rdc321x_wdt_trigger, 0);
- rdc321x_wdt_device.default_ticks = ticks;
-
dev_info(&pdev->dev, "watchdog init success\n");
return 0;
diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h
index 87eaf357ae01..adf015aa4126 100644
--- a/drivers/watchdog/sp5100_tco.h
+++ b/drivers/watchdog/sp5100_tco.h
@@ -70,7 +70,7 @@
#define EFCH_PM_DECODEEN_WDT_TMREN BIT(7)
-#define EFCH_PM_DECODEEN3 0x00
+#define EFCH_PM_DECODEEN3 0x03
#define EFCH_PM_DECODEEN_SECOND_RES GENMASK(1, 0)
#define EFCH_PM_WATCHDOG_DISABLE ((u8)GENMASK(3, 2))
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index 072986d461b7..d8876fba686d 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -137,10 +137,14 @@ wdt_restart(struct watchdog_device *wdd, unsigned long mode, void *cmd)
{
struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
+ writel_relaxed(UNLOCK, wdt->base + WDTLOCK);
writel_relaxed(0, wdt->base + WDTCONTROL);
writel_relaxed(0, wdt->base + WDTLOAD);
writel_relaxed(INT_ENABLE | RESET_ENABLE, wdt->base + WDTCONTROL);
+ /* Flush posted writes. */
+ readl_relaxed(wdt->base + WDTLOCK);
+
return 0;
}
diff --git a/drivers/watchdog/sprd_wdt.c b/drivers/watchdog/sprd_wdt.c
index b6c65afd3677..86cf93af951b 100644
--- a/drivers/watchdog/sprd_wdt.c
+++ b/drivers/watchdog/sprd_wdt.c
@@ -116,18 +116,6 @@ static int sprd_wdt_load_value(struct sprd_wdt *wdt, u32 timeout,
u32 tmr_step = timeout * SPRD_WDT_CNT_STEP;
u32 prtmr_step = pretimeout * SPRD_WDT_CNT_STEP;
- sprd_wdt_unlock(wdt->base);
- writel_relaxed((tmr_step >> SPRD_WDT_CNT_HIGH_SHIFT) &
- SPRD_WDT_LOW_VALUE_MASK, wdt->base + SPRD_WDT_LOAD_HIGH);
- writel_relaxed((tmr_step & SPRD_WDT_LOW_VALUE_MASK),
- wdt->base + SPRD_WDT_LOAD_LOW);
- writel_relaxed((prtmr_step >> SPRD_WDT_CNT_HIGH_SHIFT) &
- SPRD_WDT_LOW_VALUE_MASK,
- wdt->base + SPRD_WDT_IRQ_LOAD_HIGH);
- writel_relaxed(prtmr_step & SPRD_WDT_LOW_VALUE_MASK,
- wdt->base + SPRD_WDT_IRQ_LOAD_LOW);
- sprd_wdt_lock(wdt->base);
-
/*
* Waiting the load value operation done,
* it needs two or three RTC clock cycles.
@@ -142,6 +130,19 @@ static int sprd_wdt_load_value(struct sprd_wdt *wdt, u32 timeout,
if (delay_cnt >= SPRD_WDT_LOAD_TIMEOUT)
return -EBUSY;
+
+ sprd_wdt_unlock(wdt->base);
+ writel_relaxed((tmr_step >> SPRD_WDT_CNT_HIGH_SHIFT) &
+ SPRD_WDT_LOW_VALUE_MASK, wdt->base + SPRD_WDT_LOAD_HIGH);
+ writel_relaxed((tmr_step & SPRD_WDT_LOW_VALUE_MASK),
+ wdt->base + SPRD_WDT_LOAD_LOW);
+ writel_relaxed((prtmr_step >> SPRD_WDT_CNT_HIGH_SHIFT) &
+ SPRD_WDT_LOW_VALUE_MASK,
+ wdt->base + SPRD_WDT_IRQ_LOAD_HIGH);
+ writel_relaxed(prtmr_step & SPRD_WDT_LOW_VALUE_MASK,
+ wdt->base + SPRD_WDT_IRQ_LOAD_LOW);
+ sprd_wdt_lock(wdt->base);
+
return 0;
}
@@ -360,15 +361,10 @@ static int __maybe_unused sprd_wdt_pm_resume(struct device *dev)
if (ret)
return ret;
- if (watchdog_active(&wdt->wdd)) {
+ if (watchdog_active(&wdt->wdd))
ret = sprd_wdt_start(&wdt->wdd);
- if (ret) {
- sprd_wdt_disable(wdt);
- return ret;
- }
- }
- return 0;
+ return ret;
}
static const struct dev_pm_ops sprd_wdt_pm_ops = {
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index 8b1f37ffb65a..5c600a370650 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -246,15 +246,19 @@ static int __watchdog_register_device(struct watchdog_device *wdd)
}
if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) {
- wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
-
- ret = register_reboot_notifier(&wdd->reboot_nb);
- if (ret) {
- pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
- wdd->id, ret);
- watchdog_dev_unregister(wdd);
- ida_simple_remove(&watchdog_ida, id);
- return ret;
+ if (!wdd->ops->stop)
+ pr_warn("watchdog%d: stop_on_reboot not supported\n", wdd->id);
+ else {
+ wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
+
+ ret = register_reboot_notifier(&wdd->reboot_nb);
+ if (ret) {
+ pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
+ wdd->id, ret);
+ watchdog_dev_unregister(wdd);
+ ida_simple_remove(&watchdog_ida, id);
+ return ret;
+ }
}
}
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index e64aa88e99da..8fe59b7d8eec 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -264,6 +264,7 @@ static int watchdog_start(struct watchdog_device *wdd)
if (err == 0) {
set_bit(WDOG_ACTIVE, &wdd->status);
wd_data->last_keepalive = started_at;
+ wd_data->last_hw_keepalive = started_at;
watchdog_update_worker(wdd);
}
@@ -943,8 +944,19 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
wd_data->wdd = wdd;
wdd->wd_data = wd_data;
- if (IS_ERR_OR_NULL(watchdog_kworker))
+ if (IS_ERR_OR_NULL(watchdog_kworker)) {
+ kfree(wd_data);
return -ENODEV;
+ }
+
+ device_initialize(&wd_data->dev);
+ wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
+ wd_data->dev.class = &watchdog_class;
+ wd_data->dev.parent = wdd->parent;
+ wd_data->dev.groups = wdd->groups;
+ wd_data->dev.release = watchdog_core_data_release;
+ dev_set_drvdata(&wd_data->dev, wdd);
+ dev_set_name(&wd_data->dev, "watchdog%d", wdd->id);
kthread_init_work(&wd_data->work, watchdog_ping_work);
hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -961,20 +973,11 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
pr_err("%s: a legacy watchdog module is probably present.\n",
wdd->info->identity);
old_wd_data = NULL;
- kfree(wd_data);
+ put_device(&wd_data->dev);
return err;
}
}
- device_initialize(&wd_data->dev);
- wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
- wd_data->dev.class = &watchdog_class;
- wd_data->dev.parent = wdd->parent;
- wd_data->dev.groups = wdd->groups;
- wd_data->dev.release = watchdog_core_data_release;
- dev_set_drvdata(&wd_data->dev, wdd);
- dev_set_name(&wd_data->dev, "watchdog%d", wdd->id);
-
/* Fill in the data structures */
cdev_init(&wd_data->cdev, &watchdog_fops);
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 6fa7209f24f4..b23edf64c2b2 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -570,11 +570,13 @@ static int add_ballooned_pages(int nr_pages)
if (xen_hotplug_unpopulated) {
st = reserve_additional_memory();
if (st != BP_ECANCELED) {
+ int rc;
+
mutex_unlock(&balloon_mutex);
- wait_event(balloon_wq,
+ rc = wait_event_interruptible(balloon_wq,
!list_empty(&ballooned_pages));
mutex_lock(&balloon_mutex);
- return 0;
+ return rc ? -ENOMEM : 0;
}
}
@@ -632,6 +634,12 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages)
out_undo:
mutex_unlock(&balloon_mutex);
free_xenballooned_pages(pgno, pages);
+ /*
+ * NB: free_xenballooned_pages will only subtract pgno pages, but since
+ * target_unpopulated is incremented with nr_pages at the start we need
+ * to remove the remaining ones also, or accounting will be screwed.
+ */
+ balloon_stats.target_unpopulated -= nr_pages - pgno;
return ret;
}
EXPORT_SYMBOL(alloc_xenballooned_pages);
diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c
index 8edef51c92e5..77cc80bcb479 100644
--- a/drivers/xen/events/events_2l.c
+++ b/drivers/xen/events/events_2l.c
@@ -47,6 +47,11 @@ static unsigned evtchn_2l_max_channels(void)
return EVTCHN_2L_NR_CHANNELS;
}
+static void evtchn_2l_remove(evtchn_port_t evtchn, unsigned int cpu)
+{
+ clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
+}
+
static void evtchn_2l_bind_to_cpu(struct irq_info *info, unsigned cpu)
{
clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu)));
@@ -71,12 +76,6 @@ static bool evtchn_2l_is_pending(unsigned port)
return sync_test_bit(port, BM(&s->evtchn_pending[0]));
}
-static bool evtchn_2l_test_and_set_mask(unsigned port)
-{
- struct shared_info *s = HYPERVISOR_shared_info;
- return sync_test_and_set_bit(port, BM(&s->evtchn_mask[0]));
-}
-
static void evtchn_2l_mask(unsigned port)
{
struct shared_info *s = HYPERVISOR_shared_info;
@@ -91,6 +90,8 @@ static void evtchn_2l_unmask(unsigned port)
BUG_ON(!irqs_disabled());
+ smp_wmb(); /* All writes before unmask must be visible. */
+
if (unlikely((cpu != cpu_from_evtchn(port))))
do_hypercall = 1;
else {
@@ -159,7 +160,7 @@ static inline xen_ulong_t active_evtchns(unsigned int cpu,
* a bitset of words which contain pending event bits. The second
* level is a bitset of pending events themselves.
*/
-static void evtchn_2l_handle_events(unsigned cpu)
+static void evtchn_2l_handle_events(unsigned cpu, struct evtchn_loop_ctrl *ctrl)
{
int irq;
xen_ulong_t pending_words;
@@ -240,10 +241,7 @@ static void evtchn_2l_handle_events(unsigned cpu)
/* Process port. */
port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
- irq = get_evtchn_to_irq(port);
-
- if (irq != -1)
- generic_handle_irq(irq);
+ handle_irq_for_port(port, ctrl);
bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD;
@@ -355,18 +353,27 @@ static void evtchn_2l_resume(void)
EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
}
+static int evtchn_2l_percpu_deinit(unsigned int cpu)
+{
+ memset(per_cpu(cpu_evtchn_mask, cpu), 0, sizeof(xen_ulong_t) *
+ EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
+
+ return 0;
+}
+
static const struct evtchn_ops evtchn_ops_2l = {
.max_channels = evtchn_2l_max_channels,
.nr_channels = evtchn_2l_max_channels,
+ .remove = evtchn_2l_remove,
.bind_to_cpu = evtchn_2l_bind_to_cpu,
.clear_pending = evtchn_2l_clear_pending,
.set_pending = evtchn_2l_set_pending,
.is_pending = evtchn_2l_is_pending,
- .test_and_set_mask = evtchn_2l_test_and_set_mask,
.mask = evtchn_2l_mask,
.unmask = evtchn_2l_unmask,
.handle_events = evtchn_2l_handle_events,
.resume = evtchn_2l_resume,
+ .percpu_deinit = evtchn_2l_percpu_deinit,
};
void __init xen_evtchn_2l_init(void)
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 8d49b91d92cd..b370144682ed 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -32,6 +32,10 @@
#include <linux/slab.h>
#include <linux/irqnr.h>
#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/cpuhotplug.h>
+#include <linux/atomic.h>
+#include <linux/ktime.h>
#ifdef CONFIG_X86
#include <asm/desc.h>
@@ -61,6 +65,15 @@
#include "events_internal.h"
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX "xen."
+
+static uint __read_mostly event_loop_timeout = 2;
+module_param(event_loop_timeout, uint, 0644);
+
+static uint __read_mostly event_eoi_delay = 10;
+module_param(event_eoi_delay, uint, 0644);
+
const struct evtchn_ops *evtchn_ops;
/*
@@ -69,6 +82,25 @@ const struct evtchn_ops *evtchn_ops;
*/
static DEFINE_MUTEX(irq_mapping_update_lock);
+/*
+ * Lock protecting event handling loop against removing event channels.
+ * Adding of event channels is no issue as the associated IRQ becomes active
+ * only after everything is setup (before request_[threaded_]irq() the handler
+ * can't be entered for an event, as the event channel will be unmasked only
+ * then).
+ */
+static DEFINE_RWLOCK(evtchn_rwlock);
+
+/*
+ * Lock hierarchy:
+ *
+ * irq_mapping_update_lock
+ * evtchn_rwlock
+ * IRQ-desc lock
+ * percpu eoi_list_lock
+ * irq_info->lock
+ */
+
static LIST_HEAD(xen_irq_list_head);
/* IRQ <-> VIRQ mapping. */
@@ -90,18 +122,23 @@ static bool (*pirq_needs_eoi)(unsigned irq);
/* Xen will never allocate port zero for any purpose. */
#define VALID_EVTCHN(chn) ((chn) != 0)
+static struct irq_info *legacy_info_ptrs[NR_IRQS_LEGACY];
+
static struct irq_chip xen_dynamic_chip;
+static struct irq_chip xen_lateeoi_chip;
static struct irq_chip xen_percpu_chip;
static struct irq_chip xen_pirq_chip;
static void enable_dynirq(struct irq_data *data);
static void disable_dynirq(struct irq_data *data);
+static DEFINE_PER_CPU(unsigned int, irq_epoch);
+
static void clear_evtchn_to_irq_row(unsigned row)
{
unsigned col;
for (col = 0; col < EVTCHN_PER_ROW; col++)
- evtchn_to_irq[row][col] = -1;
+ WRITE_ONCE(evtchn_to_irq[row][col], -1);
}
static void clear_evtchn_to_irq_all(void)
@@ -138,7 +175,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
clear_evtchn_to_irq_row(row);
}
- evtchn_to_irq[row][col] = irq;
+ WRITE_ONCE(evtchn_to_irq[row][col], irq);
return 0;
}
@@ -148,13 +185,24 @@ int get_evtchn_to_irq(unsigned evtchn)
return -1;
if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
return -1;
- return evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)];
+ return READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]);
}
/* Get info for IRQ */
struct irq_info *info_for_irq(unsigned irq)
{
- return irq_get_handler_data(irq);
+ if (irq < nr_legacy_irqs())
+ return legacy_info_ptrs[irq];
+ else
+ return irq_get_chip_data(irq);
+}
+
+static void set_info_for_irq(unsigned int irq, struct irq_info *info)
+{
+ if (irq < nr_legacy_irqs())
+ legacy_info_ptrs[irq] = info;
+ else
+ irq_set_chip_data(irq, info);
}
/* Constructors for packed IRQ information. */
@@ -172,6 +220,8 @@ static int xen_irq_info_common_setup(struct irq_info *info,
info->irq = irq;
info->evtchn = evtchn;
info->cpu = cpu;
+ info->mask_reason = EVT_MASK_REASON_EXPLICIT;
+ raw_spin_lock_init(&info->lock);
ret = set_evtchn_to_irq(evtchn, irq);
if (ret < 0)
@@ -238,6 +288,7 @@ static int xen_irq_info_pirq_setup(unsigned irq,
static void xen_irq_info_cleanup(struct irq_info *info)
{
set_evtchn_to_irq(info->evtchn, -1);
+ xen_evtchn_port_remove(info->evtchn, info->cpu);
info->evtchn = 0;
}
@@ -246,10 +297,14 @@ static void xen_irq_info_cleanup(struct irq_info *info)
*/
unsigned int evtchn_from_irq(unsigned irq)
{
- if (unlikely(WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq)))
+ const struct irq_info *info = NULL;
+
+ if (likely(irq < nr_irqs))
+ info = info_for_irq(irq);
+ if (!info)
return 0;
- return info_for_irq(irq)->evtchn;
+ return info->evtchn;
}
unsigned irq_from_evtchn(unsigned int evtchn)
@@ -314,6 +369,34 @@ unsigned int cpu_from_evtchn(unsigned int evtchn)
return ret;
}
+static void do_mask(struct irq_info *info, u8 reason)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&info->lock, flags);
+
+ if (!info->mask_reason)
+ mask_evtchn(info->evtchn);
+
+ info->mask_reason |= reason;
+
+ raw_spin_unlock_irqrestore(&info->lock, flags);
+}
+
+static void do_unmask(struct irq_info *info, u8 reason)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&info->lock, flags);
+
+ info->mask_reason &= ~reason;
+
+ if (!info->mask_reason)
+ unmask_evtchn(info->evtchn);
+
+ raw_spin_unlock_irqrestore(&info->lock, flags);
+}
+
#ifdef CONFIG_X86
static bool pirq_check_eoi_map(unsigned irq)
{
@@ -360,9 +443,157 @@ void notify_remote_via_irq(int irq)
}
EXPORT_SYMBOL_GPL(notify_remote_via_irq);
+struct lateeoi_work {
+ struct delayed_work delayed;
+ spinlock_t eoi_list_lock;
+ struct list_head eoi_list;
+};
+
+static DEFINE_PER_CPU(struct lateeoi_work, lateeoi);
+
+static void lateeoi_list_del(struct irq_info *info)
+{
+ struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
+ unsigned long flags;
+
+ spin_lock_irqsave(&eoi->eoi_list_lock, flags);
+ list_del_init(&info->eoi_list);
+ spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
+}
+
+static void lateeoi_list_add(struct irq_info *info)
+{
+ struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
+ struct irq_info *elem;
+ u64 now = get_jiffies_64();
+ unsigned long delay;
+ unsigned long flags;
+
+ if (now < info->eoi_time)
+ delay = info->eoi_time - now;
+ else
+ delay = 1;
+
+ spin_lock_irqsave(&eoi->eoi_list_lock, flags);
+
+ if (list_empty(&eoi->eoi_list)) {
+ list_add(&info->eoi_list, &eoi->eoi_list);
+ mod_delayed_work_on(info->eoi_cpu, system_wq,
+ &eoi->delayed, delay);
+ } else {
+ list_for_each_entry_reverse(elem, &eoi->eoi_list, eoi_list) {
+ if (elem->eoi_time <= info->eoi_time)
+ break;
+ }
+ list_add(&info->eoi_list, &elem->eoi_list);
+ }
+
+ spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
+}
+
+static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
+{
+ evtchn_port_t evtchn;
+ unsigned int cpu;
+ unsigned int delay = 0;
+
+ evtchn = info->evtchn;
+ if (!VALID_EVTCHN(evtchn) || !list_empty(&info->eoi_list))
+ return;
+
+ if (spurious) {
+ if ((1 << info->spurious_cnt) < (HZ << 2))
+ info->spurious_cnt++;
+ if (info->spurious_cnt > 1) {
+ delay = 1 << (info->spurious_cnt - 2);
+ if (delay > HZ)
+ delay = HZ;
+ if (!info->eoi_time)
+ info->eoi_cpu = smp_processor_id();
+ info->eoi_time = get_jiffies_64() + delay;
+ }
+ } else {
+ info->spurious_cnt = 0;
+ }
+
+ cpu = info->eoi_cpu;
+ if (info->eoi_time &&
+ (info->irq_epoch == per_cpu(irq_epoch, cpu) || delay)) {
+ lateeoi_list_add(info);
+ return;
+ }
+
+ info->eoi_time = 0;
+ do_unmask(info, EVT_MASK_REASON_EOI_PENDING);
+}
+
+static void xen_irq_lateeoi_worker(struct work_struct *work)
+{
+ struct lateeoi_work *eoi;
+ struct irq_info *info;
+ u64 now = get_jiffies_64();
+ unsigned long flags;
+
+ eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed);
+
+ read_lock_irqsave(&evtchn_rwlock, flags);
+
+ while (true) {
+ spin_lock(&eoi->eoi_list_lock);
+
+ info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
+ eoi_list);
+
+ if (info == NULL || now < info->eoi_time) {
+ spin_unlock(&eoi->eoi_list_lock);
+ break;
+ }
+
+ list_del_init(&info->eoi_list);
+
+ spin_unlock(&eoi->eoi_list_lock);
+
+ info->eoi_time = 0;
+
+ xen_irq_lateeoi_locked(info, false);
+ }
+
+ if (info)
+ mod_delayed_work_on(info->eoi_cpu, system_wq,
+ &eoi->delayed, info->eoi_time - now);
+
+ read_unlock_irqrestore(&evtchn_rwlock, flags);
+}
+
+static void xen_cpu_init_eoi(unsigned int cpu)
+{
+ struct lateeoi_work *eoi = &per_cpu(lateeoi, cpu);
+
+ INIT_DELAYED_WORK(&eoi->delayed, xen_irq_lateeoi_worker);
+ spin_lock_init(&eoi->eoi_list_lock);
+ INIT_LIST_HEAD(&eoi->eoi_list);
+}
+
+void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
+{
+ struct irq_info *info;
+ unsigned long flags;
+
+ read_lock_irqsave(&evtchn_rwlock, flags);
+
+ info = info_for_irq(irq);
+
+ if (info)
+ xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
+
+ read_unlock_irqrestore(&evtchn_rwlock, flags);
+}
+EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
+
static void xen_irq_init(unsigned irq)
{
struct irq_info *info;
+
#ifdef CONFIG_SMP
/* By default all event channels notify CPU#0. */
cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0));
@@ -375,8 +606,9 @@ static void xen_irq_init(unsigned irq)
info->type = IRQT_UNBOUND;
info->refcnt = -1;
- irq_set_handler_data(irq, info);
+ set_info_for_irq(irq, info);
+ INIT_LIST_HEAD(&info->eoi_list);
list_add_tail(&info->list, &xen_irq_list_head);
}
@@ -424,17 +656,25 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
static void xen_free_irq(unsigned irq)
{
- struct irq_info *info = irq_get_handler_data(irq);
+ struct irq_info *info = info_for_irq(irq);
+ unsigned long flags;
if (WARN_ON(!info))
return;
+ write_lock_irqsave(&evtchn_rwlock, flags);
+
+ if (!list_empty(&info->eoi_list))
+ lateeoi_list_del(info);
+
list_del(&info->list);
- irq_set_handler_data(irq, NULL);
+ set_info_for_irq(irq, NULL);
WARN_ON(info->refcnt > 0);
+ write_unlock_irqrestore(&evtchn_rwlock, flags);
+
kfree(info);
/* Legacy IRQ descriptors are managed by the arch. */
@@ -453,6 +693,12 @@ static void xen_evtchn_close(unsigned int port)
BUG();
}
+static void event_handler_exit(struct irq_info *info)
+{
+ smp_store_release(&info->is_active, 0);
+ clear_evtchn(info->evtchn);
+}
+
static void pirq_query_unmask(int irq)
{
struct physdev_irq_status_query irq_status;
@@ -471,7 +717,8 @@ static void pirq_query_unmask(int irq)
static void eoi_pirq(struct irq_data *data)
{
- int evtchn = evtchn_from_irq(data->irq);
+ struct irq_info *info = info_for_irq(data->irq);
+ int evtchn = info ? info->evtchn : 0;
struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
int rc = 0;
@@ -480,16 +727,15 @@ static void eoi_pirq(struct irq_data *data)
if (unlikely(irqd_is_setaffinity_pending(data)) &&
likely(!irqd_irq_disabled(data))) {
- int masked = test_and_set_mask(evtchn);
+ do_mask(info, EVT_MASK_REASON_TEMPORARY);
- clear_evtchn(evtchn);
+ event_handler_exit(info);
irq_move_masked_irq(data);
- if (!masked)
- unmask_evtchn(evtchn);
+ do_unmask(info, EVT_MASK_REASON_TEMPORARY);
} else
- clear_evtchn(evtchn);
+ event_handler_exit(info);
if (pirq_needs_eoi(data->irq)) {
rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
@@ -540,7 +786,8 @@ static unsigned int __startup_pirq(unsigned int irq)
goto err;
out:
- unmask_evtchn(evtchn);
+ do_unmask(info, EVT_MASK_REASON_EXPLICIT);
+
eoi_pirq(irq_get_irq_data(irq));
return 0;
@@ -567,7 +814,7 @@ static void shutdown_pirq(struct irq_data *data)
if (!VALID_EVTCHN(evtchn))
return;
- mask_evtchn(evtchn);
+ do_mask(info, EVT_MASK_REASON_EXPLICIT);
xen_evtchn_close(evtchn);
xen_irq_info_cleanup(info);
}
@@ -601,7 +848,7 @@ EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
static void __unbind_from_irq(unsigned int irq)
{
int evtchn = evtchn_from_irq(irq);
- struct irq_info *info = irq_get_handler_data(irq);
+ struct irq_info *info = info_for_irq(irq);
if (info->refcnt > 0) {
info->refcnt--;
@@ -826,7 +1073,7 @@ int xen_pirq_from_irq(unsigned irq)
}
EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
-int bind_evtchn_to_irq(unsigned int evtchn)
+static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip)
{
int irq;
int ret;
@@ -843,7 +1090,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
if (irq < 0)
goto out;
- irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
+ irq_set_chip_and_handler_name(irq, chip,
handle_edge_irq, "event");
ret = xen_irq_info_evtchn_setup(irq, evtchn);
@@ -864,8 +1111,19 @@ out:
return irq;
}
+
+int bind_evtchn_to_irq(evtchn_port_t evtchn)
+{
+ return bind_evtchn_to_irq_chip(evtchn, &xen_dynamic_chip);
+}
EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
+int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn)
+{
+ return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip);
+}
+EXPORT_SYMBOL_GPL(bind_evtchn_to_irq_lateeoi);
+
static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
{
struct evtchn_bind_ipi bind_ipi;
@@ -907,8 +1165,9 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
return irq;
}
-int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
- unsigned int remote_port)
+static int bind_interdomain_evtchn_to_irq_chip(unsigned int remote_domain,
+ evtchn_port_t remote_port,
+ struct irq_chip *chip)
{
struct evtchn_bind_interdomain bind_interdomain;
int err;
@@ -919,10 +1178,26 @@ int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
&bind_interdomain);
- return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
+ return err ? : bind_evtchn_to_irq_chip(bind_interdomain.local_port,
+ chip);
+}
+
+int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
+ evtchn_port_t remote_port)
+{
+ return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port,
+ &xen_dynamic_chip);
}
EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq);
+int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain,
+ evtchn_port_t remote_port)
+{
+ return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port,
+ &xen_lateeoi_chip);
+}
+EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi);
+
static int find_virq(unsigned int virq, unsigned int cpu)
{
struct evtchn_status status;
@@ -1018,14 +1293,15 @@ static void unbind_from_irq(unsigned int irq)
mutex_unlock(&irq_mapping_update_lock);
}
-int bind_evtchn_to_irqhandler(unsigned int evtchn,
- irq_handler_t handler,
- unsigned long irqflags,
- const char *devname, void *dev_id)
+static int bind_evtchn_to_irqhandler_chip(evtchn_port_t evtchn,
+ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname, void *dev_id,
+ struct irq_chip *chip)
{
int irq, retval;
- irq = bind_evtchn_to_irq(evtchn);
+ irq = bind_evtchn_to_irq_chip(evtchn, chip);
if (irq < 0)
return irq;
retval = request_irq(irq, handler, irqflags, devname, dev_id);
@@ -1036,18 +1312,38 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn,
return irq;
}
+
+int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
+ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname, void *dev_id)
+{
+ return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags,
+ devname, dev_id,
+ &xen_dynamic_chip);
+}
EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
-int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
- unsigned int remote_port,
- irq_handler_t handler,
- unsigned long irqflags,
- const char *devname,
- void *dev_id)
+int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn,
+ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname, void *dev_id)
+{
+ return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags,
+ devname, dev_id,
+ &xen_lateeoi_chip);
+}
+EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler_lateeoi);
+
+static int bind_interdomain_evtchn_to_irqhandler_chip(
+ unsigned int remote_domain, evtchn_port_t remote_port,
+ irq_handler_t handler, unsigned long irqflags,
+ const char *devname, void *dev_id, struct irq_chip *chip)
{
int irq, retval;
- irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
+ irq = bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port,
+ chip);
if (irq < 0)
return irq;
@@ -1059,8 +1355,33 @@ int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
return irq;
}
+
+int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
+ evtchn_port_t remote_port,
+ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname,
+ void *dev_id)
+{
+ return bind_interdomain_evtchn_to_irqhandler_chip(remote_domain,
+ remote_port, handler, irqflags, devname,
+ dev_id, &xen_dynamic_chip);
+}
EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
+int bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain,
+ evtchn_port_t remote_port,
+ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname,
+ void *dev_id)
+{
+ return bind_interdomain_evtchn_to_irqhandler_chip(remote_domain,
+ remote_port, handler, irqflags, devname,
+ dev_id, &xen_lateeoi_chip);
+}
+EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler_lateeoi);
+
int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
irq_handler_t handler,
unsigned long irqflags, const char *devname, void *dev_id)
@@ -1105,7 +1426,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
void unbind_from_irqhandler(unsigned int irq, void *dev_id)
{
- struct irq_info *info = irq_get_handler_data(irq);
+ struct irq_info *info = info_for_irq(irq);
if (WARN_ON(!info))
return;
@@ -1139,7 +1460,7 @@ int evtchn_make_refcounted(unsigned int evtchn)
if (irq == -1)
return -ENOENT;
- info = irq_get_handler_data(irq);
+ info = info_for_irq(irq);
if (!info)
return -ENOENT;
@@ -1167,13 +1488,13 @@ int evtchn_get(unsigned int evtchn)
if (irq == -1)
goto done;
- info = irq_get_handler_data(irq);
+ info = info_for_irq(irq);
if (!info)
goto done;
err = -EINVAL;
- if (info->refcnt <= 0)
+ if (info->refcnt <= 0 || info->refcnt == SHRT_MAX)
goto done;
info->refcnt++;
@@ -1212,6 +1533,56 @@ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
notify_remote_via_irq(irq);
}
+struct evtchn_loop_ctrl {
+ ktime_t timeout;
+ unsigned count;
+ bool defer_eoi;
+};
+
+void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
+{
+ int irq;
+ struct irq_info *info;
+
+ irq = get_evtchn_to_irq(port);
+ if (irq == -1)
+ return;
+
+ /*
+ * Check for timeout every 256 events.
+ * We are setting the timeout value only after the first 256
+ * events in order to not hurt the common case of few loop
+ * iterations. The 256 is basically an arbitrary value.
+ *
+ * In case we are hitting the timeout we need to defer all further
+ * EOIs in order to ensure to leave the event handling loop rather
+ * sooner than later.
+ */
+ if (!ctrl->defer_eoi && !(++ctrl->count & 0xff)) {
+ ktime_t kt = ktime_get();
+
+ if (!ctrl->timeout) {
+ kt = ktime_add_ms(kt,
+ jiffies_to_msecs(event_loop_timeout));
+ ctrl->timeout = kt;
+ } else if (kt > ctrl->timeout) {
+ ctrl->defer_eoi = true;
+ }
+ }
+
+ info = info_for_irq(irq);
+ if (xchg_acquire(&info->is_active, 1))
+ return;
+
+ if (ctrl->defer_eoi) {
+ info->eoi_cpu = smp_processor_id();
+ info->irq_epoch = __this_cpu_read(irq_epoch);
+ info->eoi_time = get_jiffies_64() + event_eoi_delay;
+ }
+
+ generic_handle_irq(irq);
+}
+
static DEFINE_PER_CPU(unsigned, xed_nesting_count);
static void __xen_evtchn_do_upcall(void)
@@ -1219,6 +1590,9 @@ static void __xen_evtchn_do_upcall(void)
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
int cpu = get_cpu();
unsigned count;
+ struct evtchn_loop_ctrl ctrl = { 0 };
+
+ read_lock(&evtchn_rwlock);
do {
vcpu_info->evtchn_upcall_pending = 0;
@@ -1226,7 +1600,7 @@ static void __xen_evtchn_do_upcall(void)
if (__this_cpu_inc_return(xed_nesting_count) - 1)
goto out;
- xen_evtchn_handle_events(cpu);
+ xen_evtchn_handle_events(cpu, &ctrl);
BUG_ON(!irqs_disabled());
@@ -1235,6 +1609,14 @@ static void __xen_evtchn_do_upcall(void)
} while (count != 1 || vcpu_info->evtchn_upcall_pending);
out:
+ read_unlock(&evtchn_rwlock);
+
+ /*
+ * Increment irq_epoch only now to defer EOIs only for
+ * xen_irq_lateeoi() invocations occurring from inside the loop
+ * above.
+ */
+ __this_cpu_inc(irq_epoch);
put_cpu();
}
@@ -1293,10 +1675,10 @@ void rebind_evtchn_irq(int evtchn, int irq)
}
/* Rebind an evtchn so that it gets delivered to a specific cpu */
-static int xen_rebind_evtchn_to_cpu(int evtchn, unsigned int tcpu)
+static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu)
{
struct evtchn_bind_vcpu bind_vcpu;
- int masked;
+ evtchn_port_t evtchn = info ? info->evtchn : 0;
if (!VALID_EVTCHN(evtchn))
return -1;
@@ -1312,7 +1694,7 @@ static int xen_rebind_evtchn_to_cpu(int evtchn, unsigned int tcpu)
* Mask the event while changing the VCPU binding to prevent
* it being delivered on an unexpected VCPU.
*/
- masked = test_and_set_mask(evtchn);
+ do_mask(info, EVT_MASK_REASON_TEMPORARY);
/*
* If this fails, it usually just indicates that we're dealing with a
@@ -1322,8 +1704,7 @@ static int xen_rebind_evtchn_to_cpu(int evtchn, unsigned int tcpu)
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
bind_evtchn_to_cpu(evtchn, tcpu);
- if (!masked)
- unmask_evtchn(evtchn);
+ do_unmask(info, EVT_MASK_REASON_TEMPORARY);
return 0;
}
@@ -1332,7 +1713,7 @@ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
bool force)
{
unsigned tcpu = cpumask_first_and(dest, cpu_online_mask);
- int ret = xen_rebind_evtchn_to_cpu(evtchn_from_irq(data->irq), tcpu);
+ int ret = xen_rebind_evtchn_to_cpu(info_for_irq(data->irq), tcpu);
if (!ret)
irq_data_update_effective_affinity(data, cpumask_of(tcpu));
@@ -1351,39 +1732,41 @@ EXPORT_SYMBOL_GPL(xen_set_affinity_evtchn);
static void enable_dynirq(struct irq_data *data)
{
- int evtchn = evtchn_from_irq(data->irq);
+ struct irq_info *info = info_for_irq(data->irq);
+ evtchn_port_t evtchn = info ? info->evtchn : 0;
if (VALID_EVTCHN(evtchn))
- unmask_evtchn(evtchn);
+ do_unmask(info, EVT_MASK_REASON_EXPLICIT);
}
static void disable_dynirq(struct irq_data *data)
{
- int evtchn = evtchn_from_irq(data->irq);
+ struct irq_info *info = info_for_irq(data->irq);
+ evtchn_port_t evtchn = info ? info->evtchn : 0;
if (VALID_EVTCHN(evtchn))
- mask_evtchn(evtchn);
+ do_mask(info, EVT_MASK_REASON_EXPLICIT);
}
static void ack_dynirq(struct irq_data *data)
{
- int evtchn = evtchn_from_irq(data->irq);
+ struct irq_info *info = info_for_irq(data->irq);
+ evtchn_port_t evtchn = info ? info->evtchn : 0;
if (!VALID_EVTCHN(evtchn))
return;
if (unlikely(irqd_is_setaffinity_pending(data)) &&
likely(!irqd_irq_disabled(data))) {
- int masked = test_and_set_mask(evtchn);
+ do_mask(info, EVT_MASK_REASON_TEMPORARY);
- clear_evtchn(evtchn);
+ event_handler_exit(info);
irq_move_masked_irq(data);
- if (!masked)
- unmask_evtchn(evtchn);
+ do_unmask(info, EVT_MASK_REASON_TEMPORARY);
} else
- clear_evtchn(evtchn);
+ event_handler_exit(info);
}
static void mask_ack_dynirq(struct irq_data *data)
@@ -1392,18 +1775,39 @@ static void mask_ack_dynirq(struct irq_data *data)
ack_dynirq(data);
}
+static void lateeoi_ack_dynirq(struct irq_data *data)
+{
+ struct irq_info *info = info_for_irq(data->irq);
+ evtchn_port_t evtchn = info ? info->evtchn : 0;
+
+ if (VALID_EVTCHN(evtchn)) {
+ do_mask(info, EVT_MASK_REASON_EOI_PENDING);
+ ack_dynirq(data);
+ }
+}
+
+static void lateeoi_mask_ack_dynirq(struct irq_data *data)
+{
+ struct irq_info *info = info_for_irq(data->irq);
+ evtchn_port_t evtchn = info ? info->evtchn : 0;
+
+ if (VALID_EVTCHN(evtchn)) {
+ do_mask(info, EVT_MASK_REASON_EXPLICIT);
+ ack_dynirq(data);
+ }
+}
+
static int retrigger_dynirq(struct irq_data *data)
{
- unsigned int evtchn = evtchn_from_irq(data->irq);
- int masked;
+ struct irq_info *info = info_for_irq(data->irq);
+ evtchn_port_t evtchn = info ? info->evtchn : 0;
if (!VALID_EVTCHN(evtchn))
return 0;
- masked = test_and_set_mask(evtchn);
+ do_mask(info, EVT_MASK_REASON_TEMPORARY);
set_evtchn(evtchn);
- if (!masked)
- unmask_evtchn(evtchn);
+ do_unmask(info, EVT_MASK_REASON_TEMPORARY);
return 1;
}
@@ -1498,10 +1902,11 @@ static void restore_cpu_ipis(unsigned int cpu)
/* Clear an irq's pending state, in preparation for polling on it */
void xen_clear_irq_pending(int irq)
{
- int evtchn = evtchn_from_irq(irq);
+ struct irq_info *info = info_for_irq(irq);
+ evtchn_port_t evtchn = info ? info->evtchn : 0;
if (VALID_EVTCHN(evtchn))
- clear_evtchn(evtchn);
+ event_handler_exit(info);
}
EXPORT_SYMBOL(xen_clear_irq_pending);
void xen_set_irq_pending(int irq)
@@ -1601,6 +2006,21 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
.irq_retrigger = retrigger_dynirq,
};
+static struct irq_chip xen_lateeoi_chip __read_mostly = {
+ /* The chip name needs to contain "xen-dyn" for irqbalance to work. */
+ .name = "xen-dyn-lateeoi",
+
+ .irq_disable = disable_dynirq,
+ .irq_mask = disable_dynirq,
+ .irq_unmask = enable_dynirq,
+
+ .irq_ack = lateeoi_ack_dynirq,
+ .irq_mask_ack = lateeoi_mask_ack_dynirq,
+
+ .irq_set_affinity = set_affinity_irq,
+ .irq_retrigger = retrigger_dynirq,
+};
+
static struct irq_chip xen_pirq_chip __read_mostly = {
.name = "xen-pirq",
@@ -1631,16 +2051,6 @@ static struct irq_chip xen_percpu_chip __read_mostly = {
.irq_ack = ack_dynirq,
};
-int xen_set_callback_via(uint64_t via)
-{
- struct xen_hvm_param a;
- a.domid = DOMID_SELF;
- a.index = HVM_PARAM_CALLBACK_IRQ;
- a.value = via;
- return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
-}
-EXPORT_SYMBOL_GPL(xen_set_callback_via);
-
#ifdef CONFIG_XEN_PVHVM
/* Vector callbacks are better than PCI interrupts to receive event
* channel notifications because we can receive vector callbacks on any
@@ -1667,12 +2077,31 @@ void xen_callback_vector(void)
void xen_callback_vector(void) {}
#endif
-#undef MODULE_PARAM_PREFIX
-#define MODULE_PARAM_PREFIX "xen."
-
static bool fifo_events = true;
module_param(fifo_events, bool, 0);
+static int xen_evtchn_cpu_prepare(unsigned int cpu)
+{
+ int ret = 0;
+
+ xen_cpu_init_eoi(cpu);
+
+ if (evtchn_ops->percpu_init)
+ ret = evtchn_ops->percpu_init(cpu);
+
+ return ret;
+}
+
+static int xen_evtchn_cpu_dead(unsigned int cpu)
+{
+ int ret = 0;
+
+ if (evtchn_ops->percpu_deinit)
+ ret = evtchn_ops->percpu_deinit(cpu);
+
+ return ret;
+}
+
void __init xen_init_IRQ(void)
{
int ret = -EINVAL;
@@ -1683,6 +2112,12 @@ void __init xen_init_IRQ(void)
if (ret < 0)
xen_evtchn_2l_init();
+ xen_cpu_init_eoi(smp_processor_id());
+
+ cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE,
+ "xen/evtchn:prepare",
+ xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead);
+
evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()),
sizeof(*evtchn_to_irq), GFP_KERNEL);
BUG_ON(!evtchn_to_irq);
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index 76b318e88382..360a7f8cdf75 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -209,12 +209,6 @@ static bool evtchn_fifo_is_pending(unsigned port)
return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
}
-static bool evtchn_fifo_test_and_set_mask(unsigned port)
-{
- event_word_t *word = event_word_from_port(port);
- return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
-}
-
static void evtchn_fifo_mask(unsigned port)
{
event_word_t *word = event_word_from_port(port);
@@ -227,19 +221,25 @@ static bool evtchn_fifo_is_masked(unsigned port)
return sync_test_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
}
/*
- * Clear MASKED, spinning if BUSY is set.
+ * Clear MASKED if not PENDING, spinning if BUSY is set.
+ * Return true if mask was cleared.
*/
-static void clear_masked(volatile event_word_t *word)
+static bool clear_masked_cond(volatile event_word_t *word)
{
event_word_t new, old, w;
w = *word;
do {
+ if (w & (1 << EVTCHN_FIFO_PENDING))
+ return false;
+
old = w & ~(1 << EVTCHN_FIFO_BUSY);
new = old & ~(1 << EVTCHN_FIFO_MASKED);
w = sync_cmpxchg(word, old, new);
} while (w != old);
+
+ return true;
}
static void evtchn_fifo_unmask(unsigned port)
@@ -248,8 +248,7 @@ static void evtchn_fifo_unmask(unsigned port)
BUG_ON(!irqs_disabled());
- clear_masked(word);
- if (evtchn_fifo_is_pending(port)) {
+ if (!clear_masked_cond(word)) {
struct evtchn_unmask unmask = { .port = port };
(void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
}
@@ -270,19 +269,9 @@ static uint32_t clear_linked(volatile event_word_t *word)
return w & EVTCHN_FIFO_LINK_MASK;
}
-static void handle_irq_for_port(unsigned port)
-{
- int irq;
-
- irq = get_evtchn_to_irq(port);
- if (irq != -1)
- generic_handle_irq(irq);
-}
-
-static void consume_one_event(unsigned cpu,
+static void consume_one_event(unsigned cpu, struct evtchn_loop_ctrl *ctrl,
struct evtchn_fifo_control_block *control_block,
- unsigned priority, unsigned long *ready,
- bool drop)
+ unsigned priority, unsigned long *ready)
{
struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
uint32_t head;
@@ -315,16 +304,17 @@ static void consume_one_event(unsigned cpu,
clear_bit(priority, ready);
if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) {
- if (unlikely(drop))
+ if (unlikely(!ctrl))
pr_warn("Dropping pending event for port %u\n", port);
else
- handle_irq_for_port(port);
+ handle_irq_for_port(port, ctrl);
}
q->head[priority] = head;
}
-static void __evtchn_fifo_handle_events(unsigned cpu, bool drop)
+static void __evtchn_fifo_handle_events(unsigned cpu,
+ struct evtchn_loop_ctrl *ctrl)
{
struct evtchn_fifo_control_block *control_block;
unsigned long ready;
@@ -336,14 +326,15 @@ static void __evtchn_fifo_handle_events(unsigned cpu, bool drop)
while (ready) {
q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES);
- consume_one_event(cpu, control_block, q, &ready, drop);
+ consume_one_event(cpu, ctrl, control_block, q, &ready);
ready |= xchg(&control_block->ready, 0);
}
}
-static void evtchn_fifo_handle_events(unsigned cpu)
+static void evtchn_fifo_handle_events(unsigned cpu,
+ struct evtchn_loop_ctrl *ctrl)
{
- __evtchn_fifo_handle_events(cpu, false);
+ __evtchn_fifo_handle_events(cpu, ctrl);
}
static void evtchn_fifo_resume(void)
@@ -380,21 +371,6 @@ static void evtchn_fifo_resume(void)
event_array_pages = 0;
}
-static const struct evtchn_ops evtchn_ops_fifo = {
- .max_channels = evtchn_fifo_max_channels,
- .nr_channels = evtchn_fifo_nr_channels,
- .setup = evtchn_fifo_setup,
- .bind_to_cpu = evtchn_fifo_bind_to_cpu,
- .clear_pending = evtchn_fifo_clear_pending,
- .set_pending = evtchn_fifo_set_pending,
- .is_pending = evtchn_fifo_is_pending,
- .test_and_set_mask = evtchn_fifo_test_and_set_mask,
- .mask = evtchn_fifo_mask,
- .unmask = evtchn_fifo_unmask,
- .handle_events = evtchn_fifo_handle_events,
- .resume = evtchn_fifo_resume,
-};
-
static int evtchn_fifo_alloc_control_block(unsigned cpu)
{
void *control_block = NULL;
@@ -417,19 +393,35 @@ static int evtchn_fifo_alloc_control_block(unsigned cpu)
return ret;
}
-static int xen_evtchn_cpu_prepare(unsigned int cpu)
+static int evtchn_fifo_percpu_init(unsigned int cpu)
{
if (!per_cpu(cpu_control_block, cpu))
return evtchn_fifo_alloc_control_block(cpu);
return 0;
}
-static int xen_evtchn_cpu_dead(unsigned int cpu)
+static int evtchn_fifo_percpu_deinit(unsigned int cpu)
{
- __evtchn_fifo_handle_events(cpu, true);
+ __evtchn_fifo_handle_events(cpu, NULL);
return 0;
}
+static const struct evtchn_ops evtchn_ops_fifo = {
+ .max_channels = evtchn_fifo_max_channels,
+ .nr_channels = evtchn_fifo_nr_channels,
+ .setup = evtchn_fifo_setup,
+ .bind_to_cpu = evtchn_fifo_bind_to_cpu,
+ .clear_pending = evtchn_fifo_clear_pending,
+ .set_pending = evtchn_fifo_set_pending,
+ .is_pending = evtchn_fifo_is_pending,
+ .mask = evtchn_fifo_mask,
+ .unmask = evtchn_fifo_unmask,
+ .handle_events = evtchn_fifo_handle_events,
+ .resume = evtchn_fifo_resume,
+ .percpu_init = evtchn_fifo_percpu_init,
+ .percpu_deinit = evtchn_fifo_percpu_deinit,
+};
+
int __init xen_evtchn_fifo_init(void)
{
int cpu = smp_processor_id();
@@ -443,9 +435,5 @@ int __init xen_evtchn_fifo_init(void)
evtchn_ops = &evtchn_ops_fifo;
- cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE,
- "xen/evtchn:prepare",
- xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead);
-
return ret;
}
diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h
index 50c2050a1e32..cc37b711491c 100644
--- a/drivers/xen/events/events_internal.h
+++ b/drivers/xen/events/events_internal.h
@@ -32,11 +32,22 @@ enum xen_irq_type {
*/
struct irq_info {
struct list_head list;
- int refcnt;
- enum xen_irq_type type; /* type */
+ struct list_head eoi_list;
+ short refcnt;
+ short spurious_cnt;
+ short type; /* type */
+ u8 mask_reason; /* Why is event channel masked */
+#define EVT_MASK_REASON_EXPLICIT 0x01
+#define EVT_MASK_REASON_TEMPORARY 0x02
+#define EVT_MASK_REASON_EOI_PENDING 0x04
+ u8 is_active; /* Is event just being handled? */
unsigned irq;
unsigned int evtchn; /* event channel */
unsigned short cpu; /* cpu bound */
+ unsigned short eoi_cpu; /* EOI must happen on this cpu */
+ unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
+ u64 eoi_time; /* Time in jiffies when to EOI. */
+ raw_spinlock_t lock;
union {
unsigned short virq;
@@ -55,28 +66,34 @@ struct irq_info {
#define PIRQ_SHAREABLE (1 << 1)
#define PIRQ_MSI_GROUP (1 << 2)
+struct evtchn_loop_ctrl;
+
struct evtchn_ops {
unsigned (*max_channels)(void);
unsigned (*nr_channels)(void);
int (*setup)(struct irq_info *info);
+ void (*remove)(evtchn_port_t port, unsigned int cpu);
void (*bind_to_cpu)(struct irq_info *info, unsigned cpu);
void (*clear_pending)(unsigned port);
void (*set_pending)(unsigned port);
bool (*is_pending)(unsigned port);
- bool (*test_and_set_mask)(unsigned port);
void (*mask)(unsigned port);
void (*unmask)(unsigned port);
- void (*handle_events)(unsigned cpu);
+ void (*handle_events)(unsigned cpu, struct evtchn_loop_ctrl *ctrl);
void (*resume)(void);
+
+ int (*percpu_init)(unsigned int cpu);
+ int (*percpu_deinit)(unsigned int cpu);
};
extern const struct evtchn_ops *evtchn_ops;
extern int **evtchn_to_irq;
int get_evtchn_to_irq(unsigned int evtchn);
+void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl);
struct irq_info *info_for_irq(unsigned irq);
unsigned cpu_from_irq(unsigned irq);
@@ -98,6 +115,13 @@ static inline int xen_evtchn_port_setup(struct irq_info *info)
return 0;
}
+static inline void xen_evtchn_port_remove(evtchn_port_t evtchn,
+ unsigned int cpu)
+{
+ if (evtchn_ops->remove)
+ evtchn_ops->remove(evtchn, cpu);
+}
+
static inline void xen_evtchn_port_bind_to_cpu(struct irq_info *info,
unsigned cpu)
{
@@ -119,11 +143,6 @@ static inline bool test_evtchn(unsigned port)
return evtchn_ops->is_pending(port);
}
-static inline bool test_and_set_mask(unsigned port)
-{
- return evtchn_ops->test_and_set_mask(port);
-}
-
static inline void mask_evtchn(unsigned port)
{
return evtchn_ops->mask(port);
@@ -134,9 +153,10 @@ static inline void unmask_evtchn(unsigned port)
return evtchn_ops->unmask(port);
}
-static inline void xen_evtchn_handle_events(unsigned cpu)
+static inline void xen_evtchn_handle_events(unsigned cpu,
+ struct evtchn_loop_ctrl *ctrl)
{
- return evtchn_ops->handle_events(cpu);
+ return evtchn_ops->handle_events(cpu, ctrl);
}
static inline void xen_evtchn_resume(void)
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 47c70b826a6a..4b11e60e37a3 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -166,7 +166,6 @@ static irqreturn_t evtchn_interrupt(int irq, void *data)
"Interrupt for port %d, but apparently not enabled; per-user %p\n",
evtchn->port, u);
- disable_irq_nosync(irq);
evtchn->enabled = false;
spin_lock(&u->ring_prod_lock);
@@ -292,7 +291,7 @@ static ssize_t evtchn_write(struct file *file, const char __user *buf,
evtchn = find_evtchn(u, port);
if (evtchn && !evtchn->enabled) {
evtchn->enabled = true;
- enable_irq(irq_from_evtchn(port));
+ xen_irq_lateeoi(irq_from_evtchn(port), 0);
}
}
@@ -392,8 +391,8 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port)
if (rc < 0)
goto err;
- rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, 0,
- u->name, evtchn);
+ rc = bind_evtchn_to_irqhandler_lateeoi(port, evtchn_interrupt, 0,
+ u->name, evtchn);
if (rc < 0)
goto err;
diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
index d97fcfc5e558..f6589563ff71 100644
--- a/drivers/xen/gntdev-dmabuf.c
+++ b/drivers/xen/gntdev-dmabuf.c
@@ -641,6 +641,14 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
goto fail_detach;
}
+ /* Check that we have zero offset. */
+ if (sgt->sgl->offset) {
+ ret = ERR_PTR(-EINVAL);
+ pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n",
+ sgt->sgl->offset);
+ goto fail_unmap;
+ }
+
/* Check number of pages that imported buffer has. */
if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
ret = ERR_PTR(-EINVAL);
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 9d8e02cfd480..e519063e421e 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -323,44 +323,47 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map)
* to the kernel linear addresses of the struct pages.
* These ptes are completely different from the user ptes dealt
* with find_grant_ptes.
+ * Note that GNTMAP_device_map isn't needed here: The
+ * dev_bus_addr output field gets consumed only from ->map_ops,
+ * and by not requesting it when mapping we also avoid needing
+ * to mirror dev_bus_addr into ->unmap_ops (and holding an extra
+ * reference to the page in the hypervisor).
*/
+ unsigned int flags = (map->flags & ~GNTMAP_device_map) |
+ GNTMAP_host_map;
+
for (i = 0; i < map->count; i++) {
unsigned long address = (unsigned long)
pfn_to_kaddr(page_to_pfn(map->pages[i]));
BUG_ON(PageHighMem(map->pages[i]));
- gnttab_set_map_op(&map->kmap_ops[i], address,
- map->flags | GNTMAP_host_map,
+ gnttab_set_map_op(&map->kmap_ops[i], address, flags,
map->grants[i].ref,
map->grants[i].domid);
gnttab_set_unmap_op(&map->kunmap_ops[i], address,
- map->flags | GNTMAP_host_map, -1);
+ flags, -1);
}
}
pr_debug("map %d+%d\n", map->index, map->count);
err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
map->pages, map->count);
- if (err)
- return err;
for (i = 0; i < map->count; i++) {
- if (map->map_ops[i].status) {
+ if (map->map_ops[i].status == GNTST_okay)
+ map->unmap_ops[i].handle = map->map_ops[i].handle;
+ else if (!err)
err = -EINVAL;
- continue;
- }
- map->unmap_ops[i].handle = map->map_ops[i].handle;
- if (use_ptemod)
- map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
-#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
- else if (map->dma_vaddr) {
- unsigned long bfn;
+ if (map->flags & GNTMAP_device_map)
+ map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr;
- bfn = pfn_to_bfn(page_to_pfn(map->pages[i]));
- map->unmap_ops[i].dev_bus_addr = __pfn_to_phys(bfn);
+ if (use_ptemod) {
+ if (map->kmap_ops[i].status == GNTST_okay)
+ map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
+ else if (!err)
+ err = -EINVAL;
}
-#endif
}
return err;
}
@@ -842,17 +845,18 @@ struct gntdev_copy_batch {
s16 __user *status[GNTDEV_COPY_BATCH];
unsigned int nr_ops;
unsigned int nr_pages;
+ bool writeable;
};
static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt,
- bool writeable, unsigned long *gfn)
+ unsigned long *gfn)
{
unsigned long addr = (unsigned long)virt;
struct page *page;
unsigned long xen_pfn;
int ret;
- ret = get_user_pages_fast(addr, 1, writeable, &page);
+ ret = get_user_pages_fast(addr, 1, batch->writeable, &page);
if (ret < 0)
return ret;
@@ -868,9 +872,13 @@ static void gntdev_put_pages(struct gntdev_copy_batch *batch)
{
unsigned int i;
- for (i = 0; i < batch->nr_pages; i++)
+ for (i = 0; i < batch->nr_pages; i++) {
+ if (batch->writeable && !PageDirty(batch->pages[i]))
+ set_page_dirty_lock(batch->pages[i]);
put_page(batch->pages[i]);
+ }
batch->nr_pages = 0;
+ batch->writeable = false;
}
static int gntdev_copy(struct gntdev_copy_batch *batch)
@@ -959,8 +967,9 @@ static int gntdev_grant_copy_seg(struct gntdev_copy_batch *batch,
virt = seg->source.virt + copied;
off = (unsigned long)virt & ~XEN_PAGE_MASK;
len = min(len, (size_t)XEN_PAGE_SIZE - off);
+ batch->writeable = false;
- ret = gntdev_get_page(batch, virt, false, &gfn);
+ ret = gntdev_get_page(batch, virt, &gfn);
if (ret < 0)
return ret;
@@ -978,8 +987,9 @@ static int gntdev_grant_copy_seg(struct gntdev_copy_batch *batch,
virt = seg->dest.virt + copied;
off = (unsigned long)virt & ~XEN_PAGE_MASK;
len = min(len, (size_t)XEN_PAGE_SIZE - off);
+ batch->writeable = true;
- ret = gntdev_get_page(batch, virt, true, &gfn);
+ ret = gntdev_get_page(batch, virt, &gfn);
if (ret < 0)
return ret;
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 5d7dcad0b0a0..4cec8146609a 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -162,7 +162,6 @@ static int platform_pci_probe(struct pci_dev *pdev,
ret = gnttab_init();
if (ret)
goto grant_out;
- xenbus_probe(NULL);
return 0;
grant_out:
gnttab_free_auto_xlat_frames();
diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
index 5f6b77ea34fb..128375ff80b8 100644
--- a/drivers/xen/preempt.c
+++ b/drivers/xen/preempt.c
@@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
asmlinkage __visible void xen_maybe_preempt_hcall(void)
{
if (unlikely(__this_cpu_read(xen_in_preemptible_hcall)
- && need_resched())) {
+ && need_resched() && !preempt_count())) {
/*
* Clear flag as we may be rescheduled on a different
* cpu.
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 7e6e682104dc..a8486432be05 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -743,14 +743,15 @@ static int remap_pfn_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
return 0;
}
-static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
+static long privcmd_ioctl_mmap_resource(struct file *file,
+ struct privcmd_mmap_resource __user *udata)
{
struct privcmd_data *data = file->private_data;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct privcmd_mmap_resource kdata;
xen_pfn_t *pfns = NULL;
- struct xen_mem_acquire_resource xdata;
+ struct xen_mem_acquire_resource xdata = { };
int rc;
if (copy_from_user(&kdata, udata, sizeof(kdata)))
@@ -760,6 +761,22 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
return -EPERM;
+ /* Both fields must be set or unset */
+ if (!!kdata.addr != !!kdata.num)
+ return -EINVAL;
+
+ xdata.domid = kdata.dom;
+ xdata.type = kdata.type;
+ xdata.id = kdata.id;
+
+ if (!kdata.addr && !kdata.num) {
+ /* Query the size of the resource. */
+ rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
+ if (rc)
+ return rc;
+ return __put_user(xdata.nr_frames, &udata->num);
+ }
+
down_write(&mm->mmap_sem);
vma = find_vma(mm, kdata.addr);
@@ -793,10 +810,6 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
} else
vma->vm_private_data = PRIV_VMA_LOCKED;
- memset(&xdata, 0, sizeof(xdata));
- xdata.domid = kdata.dom;
- xdata.type = kdata.type;
- xdata.id = kdata.id;
xdata.frame = kdata.idx;
xdata.nr_frames = kdata.num;
set_xen_guest_handle(xdata.frame_list, pfns);
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index b3fbfed28682..f94bb6034a5a 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -75,6 +75,7 @@ struct sock_mapping {
atomic_t write;
atomic_t io;
atomic_t release;
+ atomic_t eoi;
void (*saved_data_ready)(struct sock *sk);
struct pvcalls_ioworker ioworker;
};
@@ -96,7 +97,7 @@ static int pvcalls_back_release_active(struct xenbus_device *dev,
struct pvcalls_fedata *fedata,
struct sock_mapping *map);
-static void pvcalls_conn_back_read(void *opaque)
+static bool pvcalls_conn_back_read(void *opaque)
{
struct sock_mapping *map = (struct sock_mapping *)opaque;
struct msghdr msg;
@@ -116,17 +117,17 @@ static void pvcalls_conn_back_read(void *opaque)
virt_mb();
if (error)
- return;
+ return false;
size = pvcalls_queued(prod, cons, array_size);
if (size >= array_size)
- return;
+ return false;
spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
if (skb_queue_empty(&map->sock->sk->sk_receive_queue)) {
atomic_set(&map->read, 0);
spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock,
flags);
- return;
+ return true;
}
spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags);
wanted = array_size - size;
@@ -150,7 +151,7 @@ static void pvcalls_conn_back_read(void *opaque)
ret = inet_recvmsg(map->sock, &msg, wanted, MSG_DONTWAIT);
WARN_ON(ret > wanted);
if (ret == -EAGAIN) /* shouldn't happen */
- return;
+ return true;
if (!ret)
ret = -ENOTCONN;
spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
@@ -169,10 +170,10 @@ static void pvcalls_conn_back_read(void *opaque)
virt_wmb();
notify_remote_via_irq(map->irq);
- return;
+ return true;
}
-static void pvcalls_conn_back_write(struct sock_mapping *map)
+static bool pvcalls_conn_back_write(struct sock_mapping *map)
{
struct pvcalls_data_intf *intf = map->ring;
struct pvcalls_data *data = &map->data;
@@ -189,7 +190,7 @@ static void pvcalls_conn_back_write(struct sock_mapping *map)
array_size = XEN_FLEX_RING_SIZE(map->ring_order);
size = pvcalls_queued(prod, cons, array_size);
if (size == 0)
- return;
+ return false;
memset(&msg, 0, sizeof(msg));
msg.msg_flags |= MSG_DONTWAIT;
@@ -207,12 +208,11 @@ static void pvcalls_conn_back_write(struct sock_mapping *map)
atomic_set(&map->write, 0);
ret = inet_sendmsg(map->sock, &msg, size);
- if (ret == -EAGAIN || (ret >= 0 && ret < size)) {
+ if (ret == -EAGAIN) {
atomic_inc(&map->write);
atomic_inc(&map->io);
+ return true;
}
- if (ret == -EAGAIN)
- return;
/* write the data, then update the indexes */
virt_wmb();
@@ -225,9 +225,13 @@ static void pvcalls_conn_back_write(struct sock_mapping *map)
}
/* update the indexes, then notify the other end */
virt_wmb();
- if (prod != cons + ret)
+ if (prod != cons + ret) {
atomic_inc(&map->write);
+ atomic_inc(&map->io);
+ }
notify_remote_via_irq(map->irq);
+
+ return true;
}
static void pvcalls_back_ioworker(struct work_struct *work)
@@ -236,6 +240,7 @@ static void pvcalls_back_ioworker(struct work_struct *work)
struct pvcalls_ioworker, register_work);
struct sock_mapping *map = container_of(ioworker, struct sock_mapping,
ioworker);
+ unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
while (atomic_read(&map->io) > 0) {
if (atomic_read(&map->release) > 0) {
@@ -243,10 +248,18 @@ static void pvcalls_back_ioworker(struct work_struct *work)
return;
}
- if (atomic_read(&map->read) > 0)
- pvcalls_conn_back_read(map);
- if (atomic_read(&map->write) > 0)
- pvcalls_conn_back_write(map);
+ if (atomic_read(&map->read) > 0 &&
+ pvcalls_conn_back_read(map))
+ eoi_flags = 0;
+ if (atomic_read(&map->write) > 0 &&
+ pvcalls_conn_back_write(map))
+ eoi_flags = 0;
+
+ if (atomic_read(&map->eoi) > 0 && !atomic_read(&map->write)) {
+ atomic_set(&map->eoi, 0);
+ xen_irq_lateeoi(map->irq, eoi_flags);
+ eoi_flags = XEN_EOI_FLAG_SPURIOUS;
+ }
atomic_dec(&map->io);
}
@@ -343,12 +356,9 @@ static struct sock_mapping *pvcalls_new_active_socket(
goto out;
map->bytes = page;
- ret = bind_interdomain_evtchn_to_irqhandler(fedata->dev->otherend_id,
- evtchn,
- pvcalls_back_conn_event,
- 0,
- "pvcalls-backend",
- map);
+ ret = bind_interdomain_evtchn_to_irqhandler_lateeoi(
+ fedata->dev->otherend_id, evtchn,
+ pvcalls_back_conn_event, 0, "pvcalls-backend", map);
if (ret < 0)
goto out;
map->irq = ret;
@@ -882,15 +892,18 @@ static irqreturn_t pvcalls_back_event(int irq, void *dev_id)
{
struct xenbus_device *dev = dev_id;
struct pvcalls_fedata *fedata = NULL;
+ unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
- if (dev == NULL)
- return IRQ_HANDLED;
+ if (dev) {
+ fedata = dev_get_drvdata(&dev->dev);
+ if (fedata) {
+ pvcalls_back_work(fedata);
+ eoi_flags = 0;
+ }
+ }
- fedata = dev_get_drvdata(&dev->dev);
- if (fedata == NULL)
- return IRQ_HANDLED;
+ xen_irq_lateeoi(irq, eoi_flags);
- pvcalls_back_work(fedata);
return IRQ_HANDLED;
}
@@ -900,12 +913,15 @@ static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map)
struct pvcalls_ioworker *iow;
if (map == NULL || map->sock == NULL || map->sock->sk == NULL ||
- map->sock->sk->sk_user_data != map)
+ map->sock->sk->sk_user_data != map) {
+ xen_irq_lateeoi(irq, 0);
return IRQ_HANDLED;
+ }
iow = &map->ioworker;
atomic_inc(&map->write);
+ atomic_inc(&map->eoi);
atomic_inc(&map->io);
queue_work(iow->wq, &iow->register_work);
@@ -940,7 +956,7 @@ static int backend_connect(struct xenbus_device *dev)
goto error;
}
- err = bind_interdomain_evtchn_to_irq(dev->otherend_id, evtchn);
+ err = bind_interdomain_evtchn_to_irq_lateeoi(dev->otherend_id, evtchn);
if (err < 0)
goto error;
fedata->irq = err;
@@ -1096,7 +1112,8 @@ static void set_backend_state(struct xenbus_device *dev,
case XenbusStateInitialised:
switch (state) {
case XenbusStateConnected:
- backend_connect(dev);
+ if (backend_connect(dev))
+ return;
xenbus_switch_state(dev, XenbusStateConnected);
break;
case XenbusStateClosing:
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 097410a7cdb7..adf3aae2939f 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -733,10 +733,17 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev,
wmb();
notify_remote_via_irq(pdev->evtchn_irq);
+ /* Enable IRQ to signal "request done". */
+ xen_pcibk_lateeoi(pdev, 0);
+
ret = wait_event_timeout(xen_pcibk_aer_wait_queue,
!(test_bit(_XEN_PCIB_active, (unsigned long *)
&sh_info->flags)), 300*HZ);
+ /* Enable IRQ for pcifront request if not already active. */
+ if (!test_bit(_PDEVF_op_active, &pdev->flags))
+ xen_pcibk_lateeoi(pdev, 0);
+
if (!ret) {
if (test_bit(_XEN_PCIB_active,
(unsigned long *)&sh_info->flags)) {
@@ -750,13 +757,6 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev,
}
clear_bit(_PCIB_op_pending, (unsigned long *)&pdev->flags);
- if (test_bit(_XEN_PCIF_active,
- (unsigned long *)&sh_info->flags)) {
- dev_dbg(&psdev->dev->dev,
- "schedule pci_conf service in " DRV_NAME "\n");
- xen_pcibk_test_and_schedule_op(psdev->pdev);
- }
-
res = (pci_ers_result_t)aer_op->err;
return res;
}
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
index 263c059bff90..235cdfe13494 100644
--- a/drivers/xen/xen-pciback/pciback.h
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -14,6 +14,7 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/atomic.h>
+#include <xen/events.h>
#include <xen/interface/io/pciif.h>
#define DRV_NAME "xen-pciback"
@@ -27,6 +28,8 @@ struct pci_dev_entry {
#define PDEVF_op_active (1<<(_PDEVF_op_active))
#define _PCIB_op_pending (1)
#define PCIB_op_pending (1<<(_PCIB_op_pending))
+#define _EOI_pending (2)
+#define EOI_pending (1<<(_EOI_pending))
struct xen_pcibk_device {
void *pci_dev_data;
@@ -182,12 +185,17 @@ static inline void xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id);
void xen_pcibk_do_op(struct work_struct *data);
+static inline void xen_pcibk_lateeoi(struct xen_pcibk_device *pdev,
+ unsigned int eoi_flag)
+{
+ if (test_and_clear_bit(_EOI_pending, &pdev->flags))
+ xen_irq_lateeoi(pdev->evtchn_irq, eoi_flag);
+}
+
int xen_pcibk_xenbus_register(void);
void xen_pcibk_xenbus_unregister(void);
extern int verbose_request;
-
-void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev);
#endif
/* Handles shared IRQs that can to device domain and control domain. */
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index 787966f44589..c4ed2c634ca7 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -297,26 +297,41 @@ int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev,
return 0;
}
#endif
+
+static inline bool xen_pcibk_test_op_pending(struct xen_pcibk_device *pdev)
+{
+ return test_bit(_XEN_PCIF_active,
+ (unsigned long *)&pdev->sh_info->flags) &&
+ !test_and_set_bit(_PDEVF_op_active, &pdev->flags);
+}
+
/*
* Now the same evtchn is used for both pcifront conf_read_write request
* as well as pcie aer front end ack. We use a new work_queue to schedule
* xen_pcibk conf_read_write service for avoiding confict with aer_core
* do_recovery job which also use the system default work_queue
*/
-void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev)
+static void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev)
{
+ bool eoi = true;
+
/* Check that frontend is requesting an operation and that we are not
* already processing a request */
- if (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags)
- && !test_and_set_bit(_PDEVF_op_active, &pdev->flags)) {
+ if (xen_pcibk_test_op_pending(pdev)) {
schedule_work(&pdev->op_work);
+ eoi = false;
}
/*_XEN_PCIB_active should have been cleared by pcifront. And also make
sure xen_pcibk is waiting for ack by checking _PCIB_op_pending*/
if (!test_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags)
&& test_bit(_PCIB_op_pending, &pdev->flags)) {
wake_up(&xen_pcibk_aer_wait_queue);
+ eoi = false;
}
+
+ /* EOI if there was nothing to do. */
+ if (eoi)
+ xen_pcibk_lateeoi(pdev, XEN_EOI_FLAG_SPURIOUS);
}
/* Performing the configuration space reads/writes must not be done in atomic
@@ -324,10 +339,8 @@ void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev)
* use of semaphores). This function is intended to be called from a work
* queue in process context taking a struct xen_pcibk_device as a parameter */
-void xen_pcibk_do_op(struct work_struct *data)
+static void xen_pcibk_do_one_op(struct xen_pcibk_device *pdev)
{
- struct xen_pcibk_device *pdev =
- container_of(data, struct xen_pcibk_device, op_work);
struct pci_dev *dev;
struct xen_pcibk_dev_data *dev_data = NULL;
struct xen_pci_op *op = &pdev->op;
@@ -400,16 +413,31 @@ void xen_pcibk_do_op(struct work_struct *data)
smp_mb__before_atomic(); /* /after/ clearing PCIF_active */
clear_bit(_PDEVF_op_active, &pdev->flags);
smp_mb__after_atomic(); /* /before/ final check for work */
+}
- /* Check to see if the driver domain tried to start another request in
- * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active.
- */
- xen_pcibk_test_and_schedule_op(pdev);
+void xen_pcibk_do_op(struct work_struct *data)
+{
+ struct xen_pcibk_device *pdev =
+ container_of(data, struct xen_pcibk_device, op_work);
+
+ do {
+ xen_pcibk_do_one_op(pdev);
+ } while (xen_pcibk_test_op_pending(pdev));
+
+ xen_pcibk_lateeoi(pdev, 0);
}
irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id)
{
struct xen_pcibk_device *pdev = dev_id;
+ bool eoi;
+
+ /* IRQs might come in before pdev->evtchn_irq is written. */
+ if (unlikely(pdev->evtchn_irq != irq))
+ pdev->evtchn_irq = irq;
+
+ eoi = test_and_set_bit(_EOI_pending, &pdev->flags);
+ WARN(eoi, "IRQ while EOI pending\n");
xen_pcibk_test_and_schedule_op(pdev);
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 581c4e1a8b82..4fb6aacf9614 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -123,7 +123,7 @@ static int xen_pcibk_do_attach(struct xen_pcibk_device *pdev, int gnt_ref,
pdev->sh_info = vaddr;
- err = bind_interdomain_evtchn_to_irqhandler(
+ err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
pdev->xdev->otherend_id, remote_evtchn, xen_pcibk_handle_event,
0, DRV_NAME, pdev);
if (err < 0) {
@@ -358,7 +358,8 @@ out:
return err;
}
-static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
+static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev,
+ enum xenbus_state state)
{
int err = 0;
int num_devs;
@@ -372,9 +373,7 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
dev_dbg(&pdev->xdev->dev, "Reconfiguring device ...\n");
mutex_lock(&pdev->dev_lock);
- /* Make sure we only reconfigure once */
- if (xenbus_read_driver_state(pdev->xdev->nodename) !=
- XenbusStateReconfiguring)
+ if (xenbus_read_driver_state(pdev->xdev->nodename) != state)
goto out;
err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, "num_devs", "%d",
@@ -499,6 +498,10 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
}
}
+ if (state != XenbusStateReconfiguring)
+ /* Make sure we only reconfigure once. */
+ goto out;
+
err = xenbus_switch_state(pdev->xdev, XenbusStateReconfigured);
if (err) {
xenbus_dev_fatal(pdev->xdev, err,
@@ -524,7 +527,7 @@ static void xen_pcibk_frontend_changed(struct xenbus_device *xdev,
break;
case XenbusStateReconfiguring:
- xen_pcibk_reconfigure(pdev);
+ xen_pcibk_reconfigure(pdev, XenbusStateReconfiguring);
break;
case XenbusStateConnected:
@@ -663,6 +666,15 @@ static void xen_pcibk_be_watch(struct xenbus_watch *watch,
xen_pcibk_setup_backend(pdev);
break;
+ case XenbusStateInitialised:
+ /*
+ * We typically move to Initialised when the first device was
+ * added. Hence subsequent devices getting added may need
+ * reconfiguring.
+ */
+ xen_pcibk_reconfigure(pdev, XenbusStateInitialised);
+ break;
+
default:
break;
}
@@ -688,7 +700,7 @@ static int xen_pcibk_xenbus_probe(struct xenbus_device *dev,
/* watch the backend node for backend configuration information */
err = xenbus_watch_path(dev, dev->nodename, &pdev->be_watch,
- xen_pcibk_be_watch);
+ NULL, xen_pcibk_be_watch);
if (err)
goto out;
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 14a3d4cbc2a7..614d067ffe12 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -91,7 +91,6 @@ struct vscsibk_info {
unsigned int irq;
struct vscsiif_back_ring ring;
- int ring_error;
spinlock_t ring_lock;
atomic_t nr_unreplied_reqs;
@@ -423,12 +422,12 @@ static int scsiback_gnttab_data_map_batch(struct gnttab_map_grant_ref *map,
return 0;
err = gnttab_map_refs(map, NULL, pg, cnt);
- BUG_ON(err);
for (i = 0; i < cnt; i++) {
if (unlikely(map[i].status != GNTST_okay)) {
pr_err("invalid buffer -- could not remap it\n");
map[i].handle = SCSIBACK_INVALID_HANDLE;
- err = -ENOMEM;
+ if (!err)
+ err = -ENOMEM;
} else {
get_page(pg[i]);
}
@@ -722,7 +721,8 @@ static struct vscsibk_pend *prepare_pending_reqs(struct vscsibk_info *info,
return pending_req;
}
-static int scsiback_do_cmd_fn(struct vscsibk_info *info)
+static int scsiback_do_cmd_fn(struct vscsibk_info *info,
+ unsigned int *eoi_flags)
{
struct vscsiif_back_ring *ring = &info->ring;
struct vscsiif_request ring_req;
@@ -739,11 +739,12 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
rc = ring->rsp_prod_pvt;
pr_warn("Dom%d provided bogus ring requests (%#x - %#x = %u). Halting ring processing\n",
info->domid, rp, rc, rp - rc);
- info->ring_error = 1;
- return 0;
+ return -EINVAL;
}
while ((rc != rp)) {
+ *eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS;
+
if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
break;
@@ -802,13 +803,16 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
static irqreturn_t scsiback_irq_fn(int irq, void *dev_id)
{
struct vscsibk_info *info = dev_id;
+ int rc;
+ unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
- if (info->ring_error)
- return IRQ_HANDLED;
-
- while (scsiback_do_cmd_fn(info))
+ while ((rc = scsiback_do_cmd_fn(info, &eoi_flags)) > 0)
cond_resched();
+ /* In case of a ring error we keep the event channel masked. */
+ if (!rc)
+ xen_irq_lateeoi(irq, eoi_flags);
+
return IRQ_HANDLED;
}
@@ -829,7 +833,7 @@ static int scsiback_init_sring(struct vscsibk_info *info, grant_ref_t ring_ref,
sring = (struct vscsiif_sring *)area;
BACK_RING_INIT(&info->ring, sring, PAGE_SIZE);
- err = bind_interdomain_evtchn_to_irq(info->domid, evtchn);
+ err = bind_interdomain_evtchn_to_irq_lateeoi(info->domid, evtchn);
if (err < 0)
goto unmap_page;
@@ -1252,7 +1256,6 @@ static int scsiback_probe(struct xenbus_device *dev,
info->domid = dev->otherend_id;
spin_lock_init(&info->ring_lock);
- info->ring_error = 0;
atomic_set(&info->nr_unreplied_reqs, 0);
init_waitqueue_head(&info->waiting_to_free);
info->dev = dev;
diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h
index d75a2385b37c..88516a8a9f93 100644
--- a/drivers/xen/xenbus/xenbus.h
+++ b/drivers/xen/xenbus/xenbus.h
@@ -44,6 +44,8 @@ struct xen_bus_type {
int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename);
int (*probe)(struct xen_bus_type *bus, const char *type,
const char *dir);
+ bool (*otherend_will_handle)(struct xenbus_watch *watch,
+ const char *path, const char *token);
void (*otherend_changed)(struct xenbus_watch *watch, const char *path,
const char *token);
struct bus_type bus;
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index a1c17000129b..e35bb6b87449 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -114,18 +114,22 @@ EXPORT_SYMBOL_GPL(xenbus_strstate);
*/
int xenbus_watch_path(struct xenbus_device *dev, const char *path,
struct xenbus_watch *watch,
+ bool (*will_handle)(struct xenbus_watch *,
+ const char *, const char *),
void (*callback)(struct xenbus_watch *,
const char *, const char *))
{
int err;
watch->node = path;
+ watch->will_handle = will_handle;
watch->callback = callback;
err = register_xenbus_watch(watch);
if (err) {
watch->node = NULL;
+ watch->will_handle = NULL;
watch->callback = NULL;
xenbus_dev_fatal(dev, err, "adding watch on %s", path);
}
@@ -152,6 +156,8 @@ EXPORT_SYMBOL_GPL(xenbus_watch_path);
*/
int xenbus_watch_pathfmt(struct xenbus_device *dev,
struct xenbus_watch *watch,
+ bool (*will_handle)(struct xenbus_watch *,
+ const char *, const char *),
void (*callback)(struct xenbus_watch *,
const char *, const char *),
const char *pathfmt, ...)
@@ -168,7 +174,7 @@ int xenbus_watch_pathfmt(struct xenbus_device *dev,
xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
return -ENOMEM;
}
- err = xenbus_watch_path(dev, path, watch, callback);
+ err = xenbus_watch_path(dev, path, watch, will_handle, callback);
if (err)
kfree(path);
@@ -365,8 +371,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
int i, j;
for (i = 0; i < nr_pages; i++) {
- err = gnttab_grant_foreign_access(dev->otherend_id,
- virt_to_gfn(vaddr), 0);
+ unsigned long gfn;
+
+ if (is_vmalloc_addr(vaddr))
+ gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr));
+ else
+ gfn = virt_to_gfn(vaddr);
+
+ err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0);
if (err < 0) {
xenbus_dev_fatal(dev, err,
"granting access to ring page");
@@ -450,7 +462,14 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
unsigned int nr_grefs, void **vaddr)
{
- return ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
+ int err;
+
+ err = ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
+ /* Some hypervisors are buggy and can return 1. */
+ if (err > 0)
+ err = GNTST_general_error;
+
+ return err;
}
EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
index eb5151fc8efa..e5fda0256feb 100644
--- a/drivers/xen/xenbus/xenbus_comms.c
+++ b/drivers/xen/xenbus/xenbus_comms.c
@@ -57,16 +57,8 @@ DEFINE_MUTEX(xs_response_mutex);
static int xenbus_irq;
static struct task_struct *xenbus_task;
-static DECLARE_WORK(probe_work, xenbus_probe);
-
-
static irqreturn_t wake_waiting(int irq, void *unused)
{
- if (unlikely(xenstored_ready == 0)) {
- xenstored_ready = 1;
- schedule_work(&probe_work);
- }
-
wake_up(&xb_waitq);
return IRQ_HANDLED;
}
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 5b471889d723..652894d61967 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -136,6 +136,7 @@ static int watch_otherend(struct xenbus_device *dev)
container_of(dev->dev.bus, struct xen_bus_type, bus);
return xenbus_watch_pathfmt(dev, &dev->otherend_watch,
+ bus->otherend_will_handle,
bus->otherend_changed,
"%s/%s", dev->otherend, "state");
}
@@ -682,29 +683,107 @@ void unregister_xenstore_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
-void xenbus_probe(struct work_struct *unused)
+static void xenbus_probe(void)
{
xenstored_ready = 1;
+ /*
+ * In the HVM case, xenbus_init() deferred its call to
+ * xs_init() in case callbacks were not operational yet.
+ * So do it now.
+ */
+ if (xen_store_domain_type == XS_HVM)
+ xs_init();
+
/* Notify others that xenstore is up */
blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
}
-EXPORT_SYMBOL_GPL(xenbus_probe);
-static int __init xenbus_probe_initcall(void)
+/*
+ * Returns true when XenStore init must be deferred in order to
+ * allow the PCI platform device to be initialised, before we
+ * can actually have event channel interrupts working.
+ */
+static bool xs_hvm_defer_init_for_callback(void)
{
- if (!xen_domain())
- return -ENODEV;
+#ifdef CONFIG_XEN_PVHVM
+ return xen_store_domain_type == XS_HVM &&
+ !xen_have_vector_callback;
+#else
+ return false;
+#endif
+}
- if (xen_initial_domain() || xen_hvm_domain())
- return 0;
+static int xenbus_probe_thread(void *unused)
+{
+ DEFINE_WAIT(w);
- xenbus_probe(NULL);
+ /*
+ * We actually just want to wait for *any* trigger of xb_waitq,
+ * and run xenbus_probe() the moment it occurs.
+ */
+ prepare_to_wait(&xb_waitq, &w, TASK_INTERRUPTIBLE);
+ schedule();
+ finish_wait(&xb_waitq, &w);
+
+ DPRINTK("probing");
+ xenbus_probe();
return 0;
}
+static int __init xenbus_probe_initcall(void)
+{
+ /*
+ * Probe XenBus here in the XS_PV case, and also XS_HVM unless we
+ * need to wait for the platform PCI device to come up.
+ */
+ if (xen_store_domain_type == XS_PV ||
+ (xen_store_domain_type == XS_HVM &&
+ !xs_hvm_defer_init_for_callback()))
+ xenbus_probe();
+
+ /*
+ * For XS_LOCAL, spawn a thread which will wait for xenstored
+ * or a xenstore-stubdom to be started, then probe. It will be
+ * triggered when communication starts happening, by waiting
+ * on xb_waitq.
+ */
+ if (xen_store_domain_type == XS_LOCAL) {
+ struct task_struct *probe_task;
+
+ probe_task = kthread_run(xenbus_probe_thread, NULL,
+ "xenbus_probe");
+ if (IS_ERR(probe_task))
+ return PTR_ERR(probe_task);
+ }
+ return 0;
+}
device_initcall(xenbus_probe_initcall);
+int xen_set_callback_via(uint64_t via)
+{
+ struct xen_hvm_param a;
+ int ret;
+
+ a.domid = DOMID_SELF;
+ a.index = HVM_PARAM_CALLBACK_IRQ;
+ a.value = via;
+
+ ret = HYPERVISOR_hvm_op(HVMOP_set_param, &a);
+ if (ret)
+ return ret;
+
+ /*
+ * If xenbus_probe_initcall() deferred the xenbus_probe()
+ * due to the callback not functioning yet, we can do it now.
+ */
+ if (!xenstored_ready && xs_hvm_defer_init_for_callback())
+ xenbus_probe();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xen_set_callback_via);
+
/* Set up event channel for xenstored which is run as a local process
* (this is normally used only in dom0)
*/
@@ -817,11 +896,17 @@ static int __init xenbus_init(void)
break;
}
- /* Initialize the interface to xenstore. */
- err = xs_init();
- if (err) {
- pr_warn("Error initializing xenstore comms: %i\n", err);
- goto out_error;
+ /*
+ * HVM domains may not have a functional callback yet. In that
+ * case let xs_init() be called from xenbus_probe(), which will
+ * get invoked at an appropriate time.
+ */
+ if (xen_store_domain_type != XS_HVM) {
+ err = xs_init();
+ if (err) {
+ pr_warn("Error initializing xenstore comms: %i\n", err);
+ goto out_error;
+ }
}
if ((xen_store_domain_type != XS_LOCAL) &&
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index b0bed4faf44c..4bb603051d5b 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -180,6 +180,12 @@ static int xenbus_probe_backend(struct xen_bus_type *bus, const char *type,
return err;
}
+static bool frontend_will_handle(struct xenbus_watch *watch,
+ const char *path, const char *token)
+{
+ return watch->nr_pending == 0;
+}
+
static void frontend_changed(struct xenbus_watch *watch,
const char *path, const char *token)
{
@@ -191,6 +197,7 @@ static struct xen_bus_type xenbus_backend = {
.levels = 3, /* backend/type/<frontend>/<id> */
.get_bus_id = backend_bus_id,
.probe = xenbus_probe_backend,
+ .otherend_will_handle = frontend_will_handle,
.otherend_changed = frontend_changed,
.bus = {
.name = "xen-backend",
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index 3a06eb699f33..12e02eb01f59 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -705,9 +705,13 @@ int xs_watch_msg(struct xs_watch_event *event)
spin_lock(&watches_lock);
event->handle = find_watch(event->token);
- if (event->handle != NULL) {
+ if (event->handle != NULL &&
+ (!event->handle->will_handle ||
+ event->handle->will_handle(event->handle,
+ event->path, event->token))) {
spin_lock(&watch_events_lock);
list_add_tail(&event->list, &watch_events);
+ event->handle->nr_pending++;
wake_up(&watch_events_waitq);
spin_unlock(&watch_events_lock);
} else
@@ -765,6 +769,8 @@ int register_xenbus_watch(struct xenbus_watch *watch)
sprintf(token, "%lX", (long)watch);
+ watch->nr_pending = 0;
+
down_read(&xs_watch_rwsem);
spin_lock(&watches_lock);
@@ -814,11 +820,14 @@ void unregister_xenbus_watch(struct xenbus_watch *watch)
/* Cancel pending watch events. */
spin_lock(&watch_events_lock);
- list_for_each_entry_safe(event, tmp, &watch_events, list) {
- if (event->handle != watch)
- continue;
- list_del(&event->list);
- kfree(event);
+ if (watch->nr_pending) {
+ list_for_each_entry_safe(event, tmp, &watch_events, list) {
+ if (event->handle != watch)
+ continue;
+ list_del(&event->list);
+ kfree(event);
+ }
+ watch->nr_pending = 0;
}
spin_unlock(&watch_events_lock);
@@ -865,7 +874,6 @@ void xs_suspend_cancel(void)
static int xenwatch_thread(void *unused)
{
- struct list_head *ent;
struct xs_watch_event *event;
xenwatch_pid = current->pid;
@@ -880,13 +888,15 @@ static int xenwatch_thread(void *unused)
mutex_lock(&xenwatch_mutex);
spin_lock(&watch_events_lock);
- ent = watch_events.next;
- if (ent != &watch_events)
- list_del(ent);
+ event = list_first_entry_or_null(&watch_events,
+ struct xs_watch_event, list);
+ if (event) {
+ list_del(&event->list);
+ event->handle->nr_pending--;
+ }
spin_unlock(&watch_events_lock);
- if (ent != &watch_events) {
- event = list_entry(ent, struct xs_watch_event, list);
+ if (event) {
event->handle->callback(event->handle, event->path,
event->token);
kfree(event);
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index 619128b55837..c579966a0e5c 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -515,10 +515,9 @@ void v9fs_session_close(struct v9fs_session_info *v9ses)
}
#ifdef CONFIG_9P_FSCACHE
- if (v9ses->fscache) {
+ if (v9ses->fscache)
v9fs_cache_session_put_cookie(v9ses);
- kfree(v9ses->cachetag);
- }
+ kfree(v9ses->cachetag);
#endif
kfree(v9ses->uname);
kfree(v9ses->aname);
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 550d0b169d7c..61e0c552083f 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -624,9 +624,9 @@ static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
struct writeback_control wbc = {
.nr_to_write = LONG_MAX,
.sync_mode = WB_SYNC_ALL,
- .range_start = vma->vm_pgoff * PAGE_SIZE,
+ .range_start = (loff_t)vma->vm_pgoff * PAGE_SIZE,
/* absolute end, byte at end included */
- .range_end = vma->vm_pgoff * PAGE_SIZE +
+ .range_end = (loff_t)vma->vm_pgoff * PAGE_SIZE +
(vma->vm_end - vma->vm_start - 1),
};
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index 14a6c1b90c9f..9a1e761b64a2 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -420,24 +420,51 @@ affs_mode_to_prot(struct inode *inode)
u32 prot = AFFS_I(inode)->i_protect;
umode_t mode = inode->i_mode;
+ /*
+ * First, clear all RWED bits for owner, group, other.
+ * Then, recalculate them afresh.
+ *
+ * We'll always clear the delete-inhibit bit for the owner, as that is
+ * the classic single-user mode AmigaOS protection bit and we need to
+ * stay compatible with all scenarios.
+ *
+ * Since multi-user AmigaOS is an extension, we'll only set the
+ * delete-allow bit if any of the other bits in the same user class
+ * (group/other) are used.
+ */
+ prot &= ~(FIBF_NOEXECUTE | FIBF_NOREAD
+ | FIBF_NOWRITE | FIBF_NODELETE
+ | FIBF_GRP_EXECUTE | FIBF_GRP_READ
+ | FIBF_GRP_WRITE | FIBF_GRP_DELETE
+ | FIBF_OTR_EXECUTE | FIBF_OTR_READ
+ | FIBF_OTR_WRITE | FIBF_OTR_DELETE);
+
+ /* Classic single-user AmigaOS flags. These are inverted. */
if (!(mode & 0100))
prot |= FIBF_NOEXECUTE;
if (!(mode & 0400))
prot |= FIBF_NOREAD;
if (!(mode & 0200))
prot |= FIBF_NOWRITE;
+
+ /* Multi-user extended flags. Not inverted. */
if (mode & 0010)
prot |= FIBF_GRP_EXECUTE;
if (mode & 0040)
prot |= FIBF_GRP_READ;
if (mode & 0020)
prot |= FIBF_GRP_WRITE;
+ if (mode & 0070)
+ prot |= FIBF_GRP_DELETE;
+
if (mode & 0001)
prot |= FIBF_OTR_EXECUTE;
if (mode & 0004)
prot |= FIBF_OTR_READ;
if (mode & 0002)
prot |= FIBF_OTR_WRITE;
+ if (mode & 0007)
+ prot |= FIBF_OTR_DELETE;
AFFS_I(inode)->i_protect = prot;
}
diff --git a/fs/affs/file.c b/fs/affs/file.c
index a85817f54483..ba084b0b214b 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -428,6 +428,24 @@ static int affs_write_begin(struct file *file, struct address_space *mapping,
return ret;
}
+static int affs_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned int len, unsigned int copied,
+ struct page *page, void *fsdata)
+{
+ struct inode *inode = mapping->host;
+ int ret;
+
+ ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
+
+ /* Clear Archived bit on file writes, as AmigaOS would do */
+ if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) {
+ AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED;
+ mark_inode_dirty(inode);
+ }
+
+ return ret;
+}
+
static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
{
return generic_block_bmap(mapping,block,affs_get_block);
@@ -437,7 +455,7 @@ const struct address_space_operations affs_aops = {
.readpage = affs_readpage,
.writepage = affs_writepage,
.write_begin = affs_write_begin,
- .write_end = generic_write_end,
+ .write_end = affs_write_end,
.direct_IO = affs_direct_IO,
.bmap = _affs_bmap
};
@@ -794,6 +812,12 @@ done:
if (tmp > inode->i_size)
inode->i_size = AFFS_I(inode)->mmu_private = tmp;
+ /* Clear Archived bit on file writes, as AmigaOS would do */
+ if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) {
+ AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED;
+ mark_inode_dirty(inode);
+ }
+
err_first_bh:
unlock_page(page);
put_page(page);
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index 41c5749f4db7..5400a876d73f 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -460,8 +460,10 @@ affs_xrename(struct inode *old_dir, struct dentry *old_dentry,
return -EIO;
bh_new = affs_bread(sb, d_inode(new_dentry)->i_ino);
- if (!bh_new)
+ if (!bh_new) {
+ affs_brelse(bh_old);
return -EIO;
+ }
/* Remove old header from its parent directory. */
affs_lock_dir(old_dir);
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
index 069273a2483f..fc6c42eeb659 100644
--- a/fs/afs/dynroot.c
+++ b/fs/afs/dynroot.c
@@ -299,15 +299,17 @@ void afs_dynroot_depopulate(struct super_block *sb)
net->dynroot_sb = NULL;
mutex_unlock(&net->proc_cells_lock);
- inode_lock(root->d_inode);
-
- /* Remove all the pins for dirs created for manually added cells */
- list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) {
- if (subdir->d_fsdata) {
- subdir->d_fsdata = NULL;
- dput(subdir);
+ if (root) {
+ inode_lock(root->d_inode);
+
+ /* Remove all the pins for dirs created for manually added cells */
+ list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) {
+ if (subdir->d_fsdata) {
+ subdir->d_fsdata = NULL;
+ dput(subdir);
+ }
}
- }
- inode_unlock(root->d_inode);
+ inode_unlock(root->d_inode);
+ }
}
diff --git a/fs/afs/main.c b/fs/afs/main.c
index 107427688edd..8ecb127be63f 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -190,7 +190,7 @@ static int __init afs_init(void)
goto error_cache;
#endif
- ret = register_pernet_subsys(&afs_net_ops);
+ ret = register_pernet_device(&afs_net_ops);
if (ret < 0)
goto error_net;
@@ -210,7 +210,7 @@ static int __init afs_init(void)
error_proc:
afs_fs_exit();
error_fs:
- unregister_pernet_subsys(&afs_net_ops);
+ unregister_pernet_device(&afs_net_ops);
error_net:
#ifdef CONFIG_AFS_FSCACHE
fscache_unregister_netfs(&afs_cache_netfs);
@@ -241,7 +241,7 @@ static void __exit afs_exit(void)
proc_remove(afs_proc_symlink);
afs_fs_exit();
- unregister_pernet_subsys(&afs_net_ops);
+ unregister_pernet_device(&afs_net_ops);
#ifdef CONFIG_AFS_FSCACHE
fscache_unregister_netfs(&afs_cache_netfs);
#endif
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index 9101f62707af..e445c02dea3d 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -512,6 +512,7 @@ void afs_put_sysnames(struct afs_sysnames *sysnames)
if (sysnames->subs[i] != afs_init_sysname &&
sysnames->subs[i] != sysnames->blank)
kfree(sysnames->subs[i]);
+ kfree(sysnames);
}
}
diff --git a/fs/afs/write.c b/fs/afs/write.c
index e00461a6de9a..ec8d27cf92a5 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -188,11 +188,11 @@ int afs_write_end(struct file *file, struct address_space *mapping,
i_size = i_size_read(&vnode->vfs_inode);
if (maybe_i_size > i_size) {
- spin_lock(&vnode->wb_lock);
+ write_seqlock(&vnode->cb_lock);
i_size = i_size_read(&vnode->vfs_inode);
if (maybe_i_size > i_size)
i_size_write(&vnode->vfs_inode, maybe_i_size);
- spin_unlock(&vnode->wb_lock);
+ write_sequnlock(&vnode->cb_lock);
}
if (!PageUptodate(page)) {
@@ -792,6 +792,7 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
vmf->page->index, priv);
SetPagePrivate(vmf->page);
set_page_private(vmf->page, priv);
+ file_update_time(file);
sb_end_pagefault(inode->i_sb);
return VM_FAULT_LOCKED;
diff --git a/fs/aio.c b/fs/aio.c
index b5fbf2061868..413ec289bfa1 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -169,6 +169,7 @@ struct fsync_iocb {
struct file *file;
struct work_struct work;
bool datasync;
+ struct cred *creds;
};
struct poll_iocb {
@@ -1579,8 +1580,11 @@ static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb,
static void aio_fsync_work(struct work_struct *work)
{
struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
+ const struct cred *old_cred = override_creds(iocb->fsync.creds);
iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
+ revert_creds(old_cred);
+ put_cred(iocb->fsync.creds);
iocb_put(iocb);
}
@@ -1594,6 +1598,10 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
if (unlikely(!req->file->f_op->fsync))
return -EINVAL;
+ req->creds = prepare_creds();
+ if (!req->creds)
+ return -ENOMEM;
+
req->datasync = datasync;
INIT_WORK(&req->work, aio_fsync_work);
schedule_work(&req->work);
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index e7fd0b5b9234..975dd0dbc252 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1766,7 +1766,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
(!regset->active || regset->active(t->task, regset) > 0)) {
int ret;
size_t size = regset_size(t->task, regset);
- void *data = kmalloc(size, GFP_KERNEL);
+ void *data = kzalloc(size, GFP_KERNEL);
if (unlikely(!data))
return 0;
ret = regset->get(t->task, regset,
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index aa4a7a23ff99..27a04f492541 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -694,12 +694,24 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
struct super_block *sb = file_inode(file)->i_sb;
struct dentry *root = sb->s_root, *dentry;
int err = 0;
+ struct file *f = NULL;
e = create_entry(buffer, count);
if (IS_ERR(e))
return PTR_ERR(e);
+ if (e->flags & MISC_FMT_OPEN_FILE) {
+ f = open_exec(e->interpreter);
+ if (IS_ERR(f)) {
+ pr_notice("register: failed to install interpreter file %s\n",
+ e->interpreter);
+ kfree(e);
+ return PTR_ERR(f);
+ }
+ e->interp_file = f;
+ }
+
inode_lock(d_inode(root));
dentry = lookup_one_len(e->name, root, strlen(e->name));
err = PTR_ERR(dentry);
@@ -723,21 +735,6 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
goto out2;
}
- if (e->flags & MISC_FMT_OPEN_FILE) {
- struct file *f;
-
- f = open_exec(e->interpreter);
- if (IS_ERR(f)) {
- err = PTR_ERR(f);
- pr_notice("register: failed to install interpreter file %s\n", e->interpreter);
- simple_release_fs(&bm_mnt, &entry_count);
- iput(inode);
- inode = NULL;
- goto out2;
- }
- e->interp_file = f;
- }
-
e->dentry = dget(dentry);
inode->i_private = e;
inode->i_fop = &bm_entry_operations;
@@ -754,6 +751,8 @@ out:
inode_unlock(d_inode(root));
if (err) {
+ if (f)
+ filp_close(f, NULL);
kfree(e);
return err;
}
diff --git a/fs/block_dev.c b/fs/block_dev.c
index c158bad9a075..b34f76af59c4 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1463,10 +1463,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
*/
if (!for_part) {
ret = devcgroup_inode_permission(bdev->bd_inode, perm);
- if (ret != 0) {
- bdput(bdev);
+ if (ret != 0)
return ret;
- }
}
restart:
@@ -1535,8 +1533,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
goto out_clear;
BUG_ON(for_part);
ret = __blkdev_get(whole, mode, 1);
- if (ret)
+ if (ret) {
+ bdput(whole);
goto out_clear;
+ }
bdev->bd_contains = whole;
bdev->bd_part = disk_get_part(disk, partno);
if (!(disk->flags & GENHD_FL_UP) ||
@@ -1586,7 +1586,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
disk_unblock_events(disk);
put_disk_and_module(disk);
out:
- bdput(bdev);
return ret;
}
@@ -1672,6 +1671,9 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
bdput(whole);
}
+ if (res)
+ bdput(bdev);
+
return res;
}
EXPORT_SYMBOL(blkdev_get);
@@ -1791,6 +1793,16 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
struct gendisk *disk = bdev->bd_disk;
struct block_device *victim = NULL;
+ /*
+ * Sync early if it looks like we're the last one. If someone else
+ * opens the block device between now and the decrement of bd_openers
+ * then we did a sync that we didn't need to, but that's not the end
+ * of the world and we want to avoid long (could be several minute)
+ * syncs while holding the mutex.
+ */
+ if (bdev->bd_openers == 1)
+ sync_blockdev(bdev);
+
mutex_lock_nested(&bdev->bd_mutex, for_part);
if (for_part)
bdev->bd_part_count--;
@@ -1907,6 +1919,7 @@ ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct inode *bd_inode = bdev_file_inode(file);
loff_t size = i_size_read(bd_inode);
struct blk_plug plug;
+ size_t shorted = 0;
ssize_t ret;
if (bdev_read_only(I_BDEV(bd_inode)))
@@ -1921,12 +1934,17 @@ ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
return -EOPNOTSUPP;
- iov_iter_truncate(from, size - iocb->ki_pos);
+ size -= iocb->ki_pos;
+ if (iov_iter_count(from) > size) {
+ shorted = iov_iter_count(from) - size;
+ iov_iter_truncate(from, size);
+ }
blk_start_plug(&plug);
ret = __generic_file_write_iter(iocb, from);
if (ret > 0)
ret = generic_write_sync(iocb, ret);
+ iov_iter_reexpand(from, iov_iter_count(from) + shorted);
blk_finish_plug(&plug);
return ret;
}
@@ -1938,13 +1956,21 @@ ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
struct inode *bd_inode = bdev_file_inode(file);
loff_t size = i_size_read(bd_inode);
loff_t pos = iocb->ki_pos;
+ size_t shorted = 0;
+ ssize_t ret;
if (pos >= size)
return 0;
size -= pos;
- iov_iter_truncate(to, size);
- return generic_file_read_iter(iocb, to);
+ if (iov_iter_count(to) > size) {
+ shorted = iov_iter_count(to) - size;
+ iov_iter_truncate(to, size);
+ }
+
+ ret = generic_file_read_iter(iocb, to);
+ iov_iter_reexpand(to, iov_iter_count(to) + shorted);
+ return ret;
}
EXPORT_SYMBOL_GPL(blkdev_read_iter);
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 02e4e903dfe9..f79c0cb7697a 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -434,3 +434,11 @@ void btrfs_set_work_high_priority(struct btrfs_work *work)
{
set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
}
+
+void btrfs_flush_workqueue(struct btrfs_workqueue *wq)
+{
+ if (wq->high)
+ flush_workqueue(wq->high->normal_wq);
+
+ flush_workqueue(wq->normal->normal_wq);
+}
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index 7861c9feba5f..e87be7c8be58 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -73,5 +73,6 @@ void btrfs_set_work_high_priority(struct btrfs_work *work);
struct btrfs_fs_info *btrfs_work_owner(const struct btrfs_work *work);
struct btrfs_fs_info *btrfs_workqueue_owner(const struct __btrfs_workqueue *wq);
bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq);
+void btrfs_flush_workqueue(struct btrfs_workqueue *wq);
#endif
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 19855659f650..6b8824de2abb 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1419,6 +1419,7 @@ static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
if (ret < 0 && ret != -ENOENT) {
ulist_free(tmp);
ulist_free(*roots);
+ *roots = NULL;
return ret;
}
node = ulist_next(tmp, &uiter);
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index c9943d70e2cb..00dc1b5c8737 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -267,9 +267,12 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
ret = btrfs_inc_ref(trans, root, cow, 1);
else
ret = btrfs_inc_ref(trans, root, cow, 0);
-
- if (ret)
+ if (ret) {
+ btrfs_tree_unlock(cow);
+ free_extent_buffer(cow);
+ btrfs_abort_transaction(trans, ret);
return ret;
+ }
btrfs_mark_buffer_dirty(cow);
*cow_ret = cow;
@@ -1110,6 +1113,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
if (ret) {
+ btrfs_tree_unlock(cow);
+ free_extent_buffer(cow);
btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -1117,6 +1122,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
ret = btrfs_reloc_cow_block(trans, root, buf, cow);
if (ret) {
+ btrfs_tree_unlock(cow);
+ free_extent_buffer(cow);
btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -1149,6 +1156,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
if (last_ref) {
ret = tree_mod_log_free_eb(buf);
if (ret) {
+ btrfs_tree_unlock(cow);
+ free_extent_buffer(cow);
btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -1347,7 +1356,8 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
btrfs_tree_read_unlock_blocking(eb);
free_extent_buffer(eb);
- extent_buffer_get(eb_rewin);
+ btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb_rewin),
+ eb_rewin, btrfs_header_level(eb_rewin));
btrfs_tree_read_lock(eb_rewin);
__tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
WARN_ON(btrfs_header_nritems(eb_rewin) >
@@ -1404,8 +1414,30 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
"failed to read tree block %llu from get_old_root",
logical);
} else {
+ struct tree_mod_elem *tm2;
+
+ btrfs_tree_read_lock(old);
eb = btrfs_clone_extent_buffer(old);
+ /*
+ * After the lookup for the most recent tree mod operation
+ * above and before we locked and cloned the extent buffer
+ * 'old', a new tree mod log operation may have been added.
+ * So lookup for a more recent one to make sure the number
+ * of mod log operations we replay is consistent with the
+ * number of items we have in the cloned extent buffer,
+ * otherwise we can hit a BUG_ON when rewinding the extent
+ * buffer.
+ */
+ tm2 = tree_mod_log_search(fs_info, logical, time_seq);
+ btrfs_tree_read_unlock(old);
free_extent_buffer(old);
+ ASSERT(tm2);
+ ASSERT(tm2 == tm || tm2->seq > tm->seq);
+ if (!tm2 || tm2->seq < tm->seq) {
+ free_extent_buffer(eb);
+ return NULL;
+ }
+ tm = tm2;
}
} else if (old_root) {
eb_root_owner = btrfs_header_owner(eb_root);
@@ -1421,8 +1453,6 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
if (!eb)
return NULL;
- extent_buffer_get(eb);
- btrfs_tree_read_lock(eb);
if (old_root) {
btrfs_set_header_bytenr(eb, eb->start);
btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
@@ -1430,6 +1460,9 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
btrfs_set_header_level(eb, old_root->level);
btrfs_set_header_generation(eb, old_generation);
}
+ btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), eb,
+ btrfs_header_level(eb));
+ btrfs_tree_read_lock(eb);
if (tm)
__tree_mod_log_rewind(fs_info, eb, time_seq, tm);
else
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 15cb96ad15d8..4d1c12faada8 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1459,6 +1459,21 @@ do { \
#define BTRFS_INODE_ROOT_ITEM_INIT (1 << 31)
+#define BTRFS_INODE_FLAG_MASK \
+ (BTRFS_INODE_NODATASUM | \
+ BTRFS_INODE_NODATACOW | \
+ BTRFS_INODE_READONLY | \
+ BTRFS_INODE_NOCOMPRESS | \
+ BTRFS_INODE_PREALLOC | \
+ BTRFS_INODE_SYNC | \
+ BTRFS_INODE_IMMUTABLE | \
+ BTRFS_INODE_APPEND | \
+ BTRFS_INODE_NODUMP | \
+ BTRFS_INODE_NOATIME | \
+ BTRFS_INODE_DIRSYNC | \
+ BTRFS_INODE_COMPRESS | \
+ BTRFS_INODE_ROOT_ITEM_INIT)
+
struct btrfs_map_token {
const struct extent_buffer *eb;
char *kaddr;
@@ -3271,6 +3286,8 @@ void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info);
int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
unsigned long new_flags);
int btrfs_sync_fs(struct super_block *sb, int wait);
+char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
+ u64 subvol_objectid);
static inline __printf(2, 3) __cold
void btrfs_no_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index e9522f2f25cc..fea5ccfade5c 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -6,6 +6,7 @@
#include <linux/slab.h>
#include <linux/iversion.h>
+#include <linux/sched/mm.h>
#include "delayed-inode.h"
#include "disk-io.h"
#include "transaction.h"
@@ -619,8 +620,7 @@ static int btrfs_delayed_inode_reserve_metadata(
*/
if (!src_rsv || (!trans->bytes_reserved &&
src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
- ret = btrfs_qgroup_reserve_meta_prealloc(root,
- fs_info->nodesize, true);
+ ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true);
if (ret < 0)
return ret;
ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
@@ -642,7 +642,7 @@ static int btrfs_delayed_inode_reserve_metadata(
btrfs_ino(inode),
num_bytes, 1);
} else {
- btrfs_qgroup_free_meta_prealloc(root, fs_info->nodesize);
+ btrfs_qgroup_free_meta_prealloc(root, num_bytes);
}
return ret;
}
@@ -801,11 +801,14 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
struct btrfs_delayed_item *delayed_item)
{
struct extent_buffer *leaf;
+ unsigned int nofs_flag;
char *ptr;
int ret;
+ nofs_flag = memalloc_nofs_save();
ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
delayed_item->data_len);
+ memalloc_nofs_restore(nofs_flag);
if (ret < 0 && ret != -EEXIST)
return ret;
@@ -933,6 +936,7 @@ static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
struct btrfs_delayed_node *node)
{
struct btrfs_delayed_item *curr, *prev;
+ unsigned int nofs_flag;
int ret = 0;
do_again:
@@ -941,7 +945,9 @@ do_again:
if (!curr)
goto delete_fail;
+ nofs_flag = memalloc_nofs_save();
ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
+ memalloc_nofs_restore(nofs_flag);
if (ret < 0)
goto delete_fail;
else if (ret > 0) {
@@ -1008,6 +1014,7 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
struct btrfs_key key;
struct btrfs_inode_item *inode_item;
struct extent_buffer *leaf;
+ unsigned int nofs_flag;
int mod;
int ret;
@@ -1020,7 +1027,9 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
else
mod = 1;
+ nofs_flag = memalloc_nofs_save();
ret = btrfs_lookup_inode(trans, root, path, &key, mod);
+ memalloc_nofs_restore(nofs_flag);
if (ret > 0) {
btrfs_release_path(path);
return -ENOENT;
@@ -1071,7 +1080,10 @@ search:
key.type = BTRFS_INODE_EXTREF_KEY;
key.offset = -1;
+
+ nofs_flag = memalloc_nofs_save();
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+ memalloc_nofs_restore(nofs_flag);
if (ret < 0)
goto err_out;
ASSERT(ret);
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 96763805787e..4d1d2657d70c 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -54,6 +54,17 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info)
ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
if (ret) {
no_valid_dev_replace_entry_found:
+ /*
+ * We don't have a replace item or it's corrupted. If there is
+ * a replace target, fail the mount.
+ */
+ if (btrfs_find_device(fs_info->fs_devices,
+ BTRFS_DEV_REPLACE_DEVID, NULL, NULL, false)) {
+ btrfs_err(fs_info,
+ "found replace target device without a valid replace item");
+ ret = -EUCLEAN;
+ goto out;
+ }
ret = 0;
dev_replace->replace_state =
BTRFS_DEV_REPLACE_ITEM_STATE_NEVER_STARTED;
@@ -107,16 +118,27 @@ no_valid_dev_replace_entry_found:
case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
- dev_replace->srcdev = NULL;
- dev_replace->tgtdev = NULL;
+ /*
+ * We don't have an active replace item but if there is a
+ * replace target, fail the mount.
+ */
+ if (btrfs_find_device(fs_info->fs_devices,
+ BTRFS_DEV_REPLACE_DEVID, NULL, NULL, false)) {
+ btrfs_err(fs_info,
+ "replace devid present without an active replace item");
+ ret = -EUCLEAN;
+ } else {
+ dev_replace->srcdev = NULL;
+ dev_replace->tgtdev = NULL;
+ }
break;
case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
- dev_replace->srcdev = btrfs_find_device(fs_info, src_devid,
- NULL, NULL);
- dev_replace->tgtdev = btrfs_find_device(fs_info,
+ dev_replace->srcdev = btrfs_find_device(fs_info->fs_devices,
+ src_devid, NULL, NULL, true);
+ dev_replace->tgtdev = btrfs_find_device(fs_info->fs_devices,
BTRFS_DEV_REPLACE_DEVID,
- NULL, NULL);
+ NULL, NULL, true);
/*
* allow 'btrfs dev replace_cancel' if src/tgt device is
* missing
@@ -190,7 +212,7 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
int ret = 0;
*device_out = NULL;
- if (fs_info->fs_devices->seeding) {
+ if (srcdev->fs_devices->seeding) {
btrfs_err(fs_info, "the filesystem is a seed filesystem!");
return -EINVAL;
}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index b5039b16de93..cb21ffd3bba7 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -438,6 +438,16 @@ int btrfs_verify_level_key(struct btrfs_fs_info *fs_info,
*/
if (btrfs_header_generation(eb) > fs_info->last_trans_committed)
return 0;
+
+ /* We have @first_key, so this @eb must have at least one item */
+ if (btrfs_header_nritems(eb) == 0) {
+ btrfs_err(fs_info,
+ "invalid tree nritems, bytenr=%llu nritems=0 expect >0",
+ eb->start);
+ WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
+ return -EUCLEAN;
+ }
+
if (found_level)
btrfs_node_key_to_cpu(eb, &found_key, 0);
else
@@ -1490,9 +1500,16 @@ int btrfs_init_fs_root(struct btrfs_root *root)
spin_lock_init(&root->ino_cache_lock);
init_waitqueue_head(&root->ino_cache_wait);
- ret = get_anon_bdev(&root->anon_dev);
- if (ret)
- goto fail;
+ /*
+ * Don't assign anonymous block device to roots that are not exposed to
+ * userspace, the id pool is limited to 1M
+ */
+ if (is_fstree(root->root_key.objectid) &&
+ btrfs_root_refs(&root->root_item) > 0) {
+ ret = get_anon_bdev(&root->anon_dev);
+ if (ret)
+ goto fail;
+ }
mutex_lock(&root->objectid_mutex);
ret = btrfs_find_highest_objectid(root,
@@ -3007,6 +3024,18 @@ retry_root_backup:
fs_info->generation = generation;
fs_info->last_trans_committed = generation;
+ /*
+ * If we have a uuid root and we're not being told to rescan we need to
+ * check the generation here so we can set the
+ * BTRFS_FS_UPDATE_UUID_TREE_GEN bit. Otherwise we could commit the
+ * transaction during a balance or the log replay without updating the
+ * uuid generation, and then if we crash we would rescan the uuid tree,
+ * even though it was perfectly fine.
+ */
+ if (fs_info->uuid_root && !btrfs_test_opt(fs_info, RESCAN_UUID_TREE) &&
+ fs_info->generation == btrfs_super_uuid_tree_generation(disk_super))
+ set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
+
ret = btrfs_verify_dev_extents(fs_info);
if (ret) {
btrfs_err(fs_info,
@@ -3237,8 +3266,6 @@ retry_root_backup:
close_ctree(fs_info);
return ret;
}
- } else {
- set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
}
set_bit(BTRFS_FS_OPEN, &fs_info->flags);
@@ -3949,6 +3976,19 @@ void close_ctree(struct btrfs_fs_info *fs_info)
*/
btrfs_delete_unused_bgs(fs_info);
+ /*
+ * There might be existing delayed inode workers still running
+ * and holding an empty delayed inode item. We must wait for
+ * them to complete first because they can create a transaction.
+ * This happens when someone calls btrfs_balance_delayed_items()
+ * and then a transaction commit runs the same delayed nodes
+ * before any delayed worker has done something with the nodes.
+ * We must wait for any worker here and not at transaction
+ * commit time since that could cause a deadlock.
+ * This is a very rare case.
+ */
+ btrfs_flush_workqueue(fs_info->delayed_workers);
+
ret = btrfs_commit_super(fs_info);
if (ret)
btrfs_err(fs_info, "commit super ret %d", ret);
@@ -4404,6 +4444,7 @@ static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
cache->io_ctl.inode = NULL;
iput(inode);
}
+ ASSERT(cache->io_ctl.pages == NULL);
btrfs_put_block_group(cache);
}
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 1f3755b3a37a..665ec85cb09b 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -57,9 +57,9 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
return type;
}
-static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
- u64 root_objectid, u32 generation,
- int check_generation)
+struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
+ u64 root_objectid, u32 generation,
+ int check_generation)
{
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
struct btrfs_root *root;
@@ -152,7 +152,7 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh,
return btrfs_get_dentry(sb, objectid, root_objectid, generation, 1);
}
-static struct dentry *btrfs_get_parent(struct dentry *child)
+struct dentry *btrfs_get_parent(struct dentry *child)
{
struct inode *dir = d_inode(child);
struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
diff --git a/fs/btrfs/export.h b/fs/btrfs/export.h
index 57488ecd7d4e..f32f4113c976 100644
--- a/fs/btrfs/export.h
+++ b/fs/btrfs/export.h
@@ -18,4 +18,9 @@ struct btrfs_fid {
u64 parent_root_objectid;
} __attribute__ ((packed));
+struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
+ u64 root_objectid, u32 generation,
+ int check_generation);
+struct dentry *btrfs_get_parent(struct dentry *child);
+
#endif
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 47ca1ebda056..ce5e0f6c6af4 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -1057,12 +1057,11 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
ASSERT(eb->fs_info);
/*
- * Every shared one has parent tree
- * block, which must be aligned to
- * nodesize.
+ * Every shared one has parent tree block,
+ * which must be aligned to sector size.
*/
if (offset &&
- IS_ALIGNED(offset, eb->fs_info->nodesize))
+ IS_ALIGNED(offset, eb->fs_info->sectorsize))
return type;
}
} else if (is_data == BTRFS_REF_TYPE_DATA) {
@@ -1071,12 +1070,11 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
if (type == BTRFS_SHARED_DATA_REF_KEY) {
ASSERT(eb->fs_info);
/*
- * Every shared one has parent tree
- * block, which must be aligned to
- * nodesize.
+ * Every shared one has parent tree block,
+ * which must be aligned to sector size.
*/
if (offset &&
- IS_ALIGNED(offset, eb->fs_info->nodesize))
+ IS_ALIGNED(offset, eb->fs_info->sectorsize))
return type;
}
} else {
@@ -1086,8 +1084,9 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
}
btrfs_print_leaf((struct extent_buffer *)eb);
- btrfs_err(eb->fs_info, "eb %llu invalid extent inline ref type %d",
- eb->start, type);
+ btrfs_err(eb->fs_info,
+ "eb %llu iref 0x%lx invalid extent inline ref type %d",
+ eb->start, (unsigned long)iref, type);
WARN_ON(1);
return BTRFS_REF_TYPE_INVALID;
@@ -9099,8 +9098,6 @@ out:
*/
if (!for_reloc && !root_dropped)
btrfs_add_dead_root(root);
- if (err && err != -EAGAIN)
- btrfs_handle_fs_error(fs_info, err, NULL);
return err;
}
@@ -10361,6 +10358,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
&fs_info->block_group_cache_tree);
RB_CLEAR_NODE(&block_group->cache_node);
+ /* Once for the block groups rbtree */
+ btrfs_put_block_group(block_group);
+
if (fs_info->first_logical_byte == block_group->key.objectid)
fs_info->first_logical_byte = (u64)-1;
spin_unlock(&fs_info->block_group_cache_lock);
@@ -10496,9 +10496,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
if (ret)
goto out;
- btrfs_put_block_group(block_group);
- btrfs_put_block_group(block_group);
-
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0)
ret = -EIO;
@@ -10524,7 +10521,10 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
/* once for the tree */
free_extent_map(em);
}
+
out:
+ /* Once for the lookup reference */
+ btrfs_put_block_group(block_group);
btrfs_free_path(path);
return ret;
}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 11efb4f5041c..dabf153843e9 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -138,7 +138,61 @@ static int add_extent_changeset(struct extent_state *state, unsigned bits,
return ret;
}
-static void flush_write_bio(struct extent_page_data *epd);
+static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
+ unsigned long bio_flags)
+{
+ blk_status_t ret = 0;
+ struct bio_vec *bvec = bio_last_bvec_all(bio);
+ struct page *page = bvec->bv_page;
+ struct extent_io_tree *tree = bio->bi_private;
+ u64 start;
+
+ start = page_offset(page) + bvec->bv_offset;
+
+ bio->bi_private = NULL;
+
+ if (tree->ops)
+ ret = tree->ops->submit_bio_hook(tree->private_data, bio,
+ mirror_num, bio_flags, start);
+ else
+ btrfsic_submit_bio(bio);
+
+ return blk_status_to_errno(ret);
+}
+
+/* Cleanup unsubmitted bios */
+static void end_write_bio(struct extent_page_data *epd, int ret)
+{
+ if (epd->bio) {
+ epd->bio->bi_status = errno_to_blk_status(ret);
+ bio_endio(epd->bio);
+ epd->bio = NULL;
+ }
+}
+
+/*
+ * Submit bio from extent page data via submit_one_bio
+ *
+ * Return 0 if everything is OK.
+ * Return <0 for error.
+ */
+static int __must_check flush_write_bio(struct extent_page_data *epd)
+{
+ int ret = 0;
+
+ if (epd->bio) {
+ ret = submit_one_bio(epd->bio, 0, 0);
+ /*
+ * Clean up of epd->bio is handled by its endio function.
+ * And endio is either triggered by successful bio execution
+ * or the error handler of submit bio hook.
+ * So at this point, no matter what happened, we don't need
+ * to clean up epd->bio.
+ */
+ epd->bio = NULL;
+ }
+ return ret;
+}
int __init extent_io_init(void)
{
@@ -1707,7 +1761,8 @@ static int __process_pages_contig(struct address_space *mapping,
if (!PageDirty(pages[i]) ||
pages[i]->mapping != mapping) {
unlock_page(pages[i]);
- put_page(pages[i]);
+ for (; i < ret; i++)
+ put_page(pages[i]);
err = -EAGAIN;
goto out;
}
@@ -2709,28 +2764,6 @@ struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size)
return bio;
}
-static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
- unsigned long bio_flags)
-{
- blk_status_t ret = 0;
- struct bio_vec *bvec = bio_last_bvec_all(bio);
- struct page *page = bvec->bv_page;
- struct extent_io_tree *tree = bio->bi_private;
- u64 start;
-
- start = page_offset(page) + bvec->bv_offset;
-
- bio->bi_private = NULL;
-
- if (tree->ops)
- ret = tree->ops->submit_bio_hook(tree->private_data, bio,
- mirror_num, bio_flags, start);
- else
- btrfsic_submit_bio(bio);
-
- return blk_status_to_errno(ret);
-}
-
/*
* @opf: bio REQ_OP_* and REQ_* flags as one value
* @tree: tree so we can call our merge_bio hook
@@ -3438,6 +3471,9 @@ done:
* records are inserted to lock ranges in the tree, and as dirty areas
* are found, they are marked writeback. Then the lock bits are removed
* and the end_io handler clears the writeback ranges
+ *
+ * Return 0 if everything goes well.
+ * Return <0 for error.
*/
static int __extent_writepage(struct page *page, struct writeback_control *wbc,
struct extent_page_data *epd)
@@ -3505,6 +3541,7 @@ done:
end_extent_writepage(page, ret, start, page_end);
}
unlock_page(page);
+ ASSERT(ret <= 0);
return ret;
done_unlocked:
@@ -3517,18 +3554,34 @@ void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
TASK_UNINTERRUPTIBLE);
}
+static void end_extent_buffer_writeback(struct extent_buffer *eb)
+{
+ clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
+ smp_mb__after_atomic();
+ wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
+}
+
+/*
+ * Lock eb pages and flush the bio if we can't the locks
+ *
+ * Return 0 if nothing went wrong
+ * Return >0 is same as 0, except bio is not submitted
+ * Return <0 if something went wrong, no page is locked
+ */
static noinline_for_stack int
lock_extent_buffer_for_io(struct extent_buffer *eb,
struct btrfs_fs_info *fs_info,
struct extent_page_data *epd)
{
- int i, num_pages;
+ int i, num_pages, failed_page_nr;
int flush = 0;
int ret = 0;
if (!btrfs_try_tree_write_lock(eb)) {
+ ret = flush_write_bio(epd);
+ if (ret < 0)
+ return ret;
flush = 1;
- flush_write_bio(epd);
btrfs_tree_lock(eb);
}
@@ -3537,7 +3590,9 @@ lock_extent_buffer_for_io(struct extent_buffer *eb,
if (!epd->sync_io)
return 0;
if (!flush) {
- flush_write_bio(epd);
+ ret = flush_write_bio(epd);
+ if (ret < 0)
+ return ret;
flush = 1;
}
while (1) {
@@ -3578,7 +3633,14 @@ lock_extent_buffer_for_io(struct extent_buffer *eb,
if (!trylock_page(p)) {
if (!flush) {
- flush_write_bio(epd);
+ int err;
+
+ err = flush_write_bio(epd);
+ if (err < 0) {
+ ret = err;
+ failed_page_nr = i;
+ goto err_unlock;
+ }
flush = 1;
}
lock_page(p);
@@ -3586,13 +3648,25 @@ lock_extent_buffer_for_io(struct extent_buffer *eb,
}
return ret;
-}
-
-static void end_extent_buffer_writeback(struct extent_buffer *eb)
-{
- clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
- smp_mb__after_atomic();
- wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
+err_unlock:
+ /* Unlock already locked pages */
+ for (i = 0; i < failed_page_nr; i++)
+ unlock_page(eb->pages[i]);
+ /*
+ * Clear EXTENT_BUFFER_WRITEBACK and wake up anyone waiting on it.
+ * Also set back EXTENT_BUFFER_DIRTY so future attempts to this eb can
+ * be made and undo everything done before.
+ */
+ btrfs_tree_lock(eb);
+ spin_lock(&eb->refs_lock);
+ set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
+ end_extent_buffer_writeback(eb);
+ spin_unlock(&eb->refs_lock);
+ percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, eb->len,
+ fs_info->dirty_metadata_batch);
+ btrfs_clear_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
+ btrfs_tree_unlock(eb);
+ return ret;
}
static void set_btree_ioerr(struct page *page)
@@ -3839,6 +3913,10 @@ retry:
if (!ret) {
free_extent_buffer(eb);
continue;
+ } else if (ret < 0) {
+ done = 1;
+ free_extent_buffer(eb);
+ break;
}
ret = write_one_eb(eb, fs_info, wbc, &epd);
@@ -3868,7 +3946,44 @@ retry:
index = 0;
goto retry;
}
- flush_write_bio(&epd);
+ ASSERT(ret <= 0);
+ if (ret < 0) {
+ end_write_bio(&epd, ret);
+ return ret;
+ }
+ /*
+ * If something went wrong, don't allow any metadata write bio to be
+ * submitted.
+ *
+ * This would prevent use-after-free if we had dirty pages not
+ * cleaned up, which can still happen by fuzzed images.
+ *
+ * - Bad extent tree
+ * Allowing existing tree block to be allocated for other trees.
+ *
+ * - Log tree operations
+ * Exiting tree blocks get allocated to log tree, bumps its
+ * generation, then get cleaned in tree re-balance.
+ * Such tree block will not be written back, since it's clean,
+ * thus no WRITTEN flag set.
+ * And after log writes back, this tree block is not traced by
+ * any dirty extent_io_tree.
+ *
+ * - Offending tree block gets re-dirtied from its original owner
+ * Since it has bumped generation, no WRITTEN flag, it can be
+ * reused without COWing. This tree block will not be traced
+ * by btrfs_transaction::dirty_pages.
+ *
+ * Now such dirty tree block will not be cleaned by any dirty
+ * extent io tree. Thus we don't want to submit such wild eb
+ * if the fs already has error.
+ */
+ if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
+ ret = flush_write_bio(&epd);
+ } else {
+ ret = -EUCLEAN;
+ end_write_bio(&epd, ret);
+ }
return ret;
}
@@ -3965,7 +4080,8 @@ retry:
* tmpfs file mapping
*/
if (!trylock_page(page)) {
- flush_write_bio(epd);
+ ret = flush_write_bio(epd);
+ BUG_ON(ret < 0);
lock_page(page);
}
@@ -3975,8 +4091,10 @@ retry:
}
if (wbc->sync_mode != WB_SYNC_NONE) {
- if (PageWriteback(page))
- flush_write_bio(epd);
+ if (PageWriteback(page)) {
+ ret = flush_write_bio(epd);
+ BUG_ON(ret < 0);
+ }
wait_on_page_writeback(page);
}
@@ -4021,8 +4139,9 @@ retry:
* page in our current bio, and thus deadlock, so flush the
* write bio here.
*/
- flush_write_bio(epd);
- goto retry;
+ ret = flush_write_bio(epd);
+ if (!ret)
+ goto retry;
}
if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
@@ -4032,17 +4151,6 @@ retry:
return ret;
}
-static void flush_write_bio(struct extent_page_data *epd)
-{
- if (epd->bio) {
- int ret;
-
- ret = submit_one_bio(epd->bio, 0, 0);
- BUG_ON(ret < 0); /* -ENOMEM */
- epd->bio = NULL;
- }
-}
-
int extent_write_full_page(struct page *page, struct writeback_control *wbc)
{
int ret;
@@ -4054,8 +4162,14 @@ int extent_write_full_page(struct page *page, struct writeback_control *wbc)
};
ret = __extent_writepage(page, wbc, &epd);
+ ASSERT(ret <= 0);
+ if (ret < 0) {
+ end_write_bio(&epd, ret);
+ return ret;
+ }
- flush_write_bio(&epd);
+ ret = flush_write_bio(&epd);
+ ASSERT(ret <= 0);
return ret;
}
@@ -4063,6 +4177,7 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
int mode)
{
int ret = 0;
+ int flush_ret;
struct address_space *mapping = inode->i_mapping;
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
struct page *page;
@@ -4097,7 +4212,8 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
start += PAGE_SIZE;
}
- flush_write_bio(&epd);
+ flush_ret = flush_write_bio(&epd);
+ BUG_ON(flush_ret < 0);
return ret;
}
@@ -4105,6 +4221,7 @@ int extent_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
int ret = 0;
+ int flush_ret;
struct extent_page_data epd = {
.bio = NULL,
.tree = &BTRFS_I(mapping->host)->io_tree,
@@ -4113,7 +4230,8 @@ int extent_writepages(struct address_space *mapping,
};
ret = extent_write_cache_pages(mapping, wbc, &epd);
- flush_write_bio(&epd);
+ flush_ret = flush_write_bio(&epd);
+ BUG_ON(flush_ret < 0);
return ret;
}
@@ -4269,6 +4387,8 @@ int try_release_extent_mapping(struct page *page, gfp_t mask)
/* once for us */
free_extent_map(em);
+
+ cond_resched(); /* Allow large-extent preemption. */
}
}
return try_release_extent_state(tree, page, mask);
@@ -4801,25 +4921,28 @@ struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
static void check_buffer_tree_ref(struct extent_buffer *eb)
{
int refs;
- /* the ref bit is tricky. We have to make sure it is set
- * if we have the buffer dirty. Otherwise the
- * code to free a buffer can end up dropping a dirty
- * page
+ /*
+ * The TREE_REF bit is first set when the extent_buffer is added
+ * to the radix tree. It is also reset, if unset, when a new reference
+ * is created by find_extent_buffer.
*
- * Once the ref bit is set, it won't go away while the
- * buffer is dirty or in writeback, and it also won't
- * go away while we have the reference count on the
- * eb bumped.
+ * It is only cleared in two cases: freeing the last non-tree
+ * reference to the extent_buffer when its STALE bit is set or
+ * calling releasepage when the tree reference is the only reference.
*
- * We can't just set the ref bit without bumping the
- * ref on the eb because free_extent_buffer might
- * see the ref bit and try to clear it. If this happens
- * free_extent_buffer might end up dropping our original
- * ref by mistake and freeing the page before we are able
- * to add one more ref.
+ * In both cases, care is taken to ensure that the extent_buffer's
+ * pages are not under io. However, releasepage can be concurrently
+ * called with creating new references, which is prone to race
+ * conditions between the calls to check_buffer_tree_ref in those
+ * codepaths and clearing TREE_REF in try_release_extent_buffer.
*
- * So bump the ref count first, then set the bit. If someone
- * beat us to it, drop the ref we added.
+ * The actual lifetime of the extent_buffer in the radix tree is
+ * adequately protected by the refcount, but the TREE_REF bit and
+ * its corresponding reference are not. To protect against this
+ * class of races, we call check_buffer_tree_ref from the codepaths
+ * which trigger io after they set eb->io_pages. Note that once io is
+ * initiated, TREE_REF can no longer be cleared, so that is the
+ * moment at which any such race is best fixed.
*/
refs = atomic_read(&eb->refs);
if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
@@ -5273,6 +5396,11 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
eb->read_mirror = 0;
atomic_set(&eb->io_pages, num_reads);
+ /*
+ * It is possible for releasepage to clear the TREE_REF bit before we
+ * set io_pages. See check_buffer_tree_ref for a more detailed comment.
+ */
+ check_buffer_tree_ref(eb);
for (i = 0; i < num_pages; i++) {
page = eb->pages[i];
@@ -5366,9 +5494,9 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
}
}
-int read_extent_buffer_to_user(const struct extent_buffer *eb,
- void __user *dstv,
- unsigned long start, unsigned long len)
+int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
+ void __user *dstv,
+ unsigned long start, unsigned long len)
{
size_t cur;
size_t offset;
@@ -5389,7 +5517,7 @@ int read_extent_buffer_to_user(const struct extent_buffer *eb,
cur = min(len, (PAGE_SIZE - offset));
kaddr = page_address(page);
- if (copy_to_user(dst, kaddr + offset, cur)) {
+ if (probe_user_write(dst, kaddr + offset, cur)) {
ret = -EFAULT;
break;
}
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index a3598b24441e..d5089cadd7c4 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -448,9 +448,9 @@ int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
void read_extent_buffer(const struct extent_buffer *eb, void *dst,
unsigned long start,
unsigned long len);
-int read_extent_buffer_to_user(const struct extent_buffer *eb,
- void __user *dst, unsigned long start,
- unsigned long len);
+int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
+ void __user *dst, unsigned long start,
+ unsigned long len);
void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src);
void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
const void *src);
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index f9e280d0b44f..1b8a04b767ff 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -785,10 +785,12 @@ again:
nritems = btrfs_header_nritems(path->nodes[0]);
if (!nritems || (path->slots[0] >= nritems - 1)) {
ret = btrfs_next_leaf(root, path);
- if (ret == 1)
+ if (ret < 0) {
+ goto out;
+ } else if (ret > 0) {
found_next = 1;
- if (ret != 0)
goto insert;
+ }
slot = path->slots[0];
}
btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index c2c93fe9d7fd..a202f2f12b1c 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2074,6 +2074,16 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
btrfs_init_log_ctx(&ctx, inode);
/*
+ * Set the range to full if the NO_HOLES feature is not enabled.
+ * This is to avoid missing file extent items representing holes after
+ * replaying the log.
+ */
+ if (!btrfs_fs_incompat(fs_info, NO_HOLES)) {
+ start = 0;
+ end = LLONG_MAX;
+ }
+
+ /*
* We write the dirty pages in the range and wait until they complete
* out of the ->i_mutex. If so, we can flush the dirty pages by
* multi-task, and make the performance up. See
@@ -2127,6 +2137,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
*/
ret = start_ordered_ops(inode, start, end);
if (ret) {
+ up_write(&BTRFS_I(inode)->dio_sem);
inode_unlock(inode);
goto out;
}
@@ -2999,14 +3010,17 @@ reserve_space:
if (ret < 0)
goto out;
space_reserved = true;
- ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
- alloc_start, bytes_to_reserve);
- if (ret)
- goto out;
ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
&cached_state);
if (ret)
goto out;
+ ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
+ alloc_start, bytes_to_reserve);
+ if (ret) {
+ unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
+ lockend, &cached_state);
+ goto out;
+ }
ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
alloc_end - alloc_start,
i_blocksize(inode),
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index c9965e89097f..6511cb71986c 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -743,8 +743,10 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
while (num_entries) {
e = kmem_cache_zalloc(btrfs_free_space_cachep,
GFP_NOFS);
- if (!e)
+ if (!e) {
+ ret = -ENOMEM;
goto free_cache;
+ }
ret = io_ctl_read_entry(&io_ctl, e, &type);
if (ret) {
@@ -753,6 +755,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
}
if (!e->bytes) {
+ ret = -1;
kmem_cache_free(btrfs_free_space_cachep, e);
goto free_cache;
}
@@ -773,6 +776,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
e->bitmap = kmem_cache_zalloc(
btrfs_free_space_bitmap_cachep, GFP_NOFS);
if (!e->bitmap) {
+ ret = -ENOMEM;
kmem_cache_free(
btrfs_free_space_cachep, e);
goto free_cache;
@@ -1167,7 +1171,6 @@ static int __btrfs_wait_cache_io(struct btrfs_root *root,
ret = update_cache_item(trans, root, inode, path, offset,
io_ctl->entries, io_ctl->bitmaps);
out:
- io_ctl_free(io_ctl);
if (ret) {
invalidate_inode_pages2(inode->i_mapping);
BTRFS_I(inode)->generation = 0;
@@ -1332,6 +1335,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
* them out later
*/
io_ctl_drop_pages(io_ctl);
+ io_ctl_free(io_ctl);
unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
i_size_read(inode) - 1, &cached_state);
@@ -2169,7 +2173,7 @@ out:
static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info, bool update_stat)
{
- struct btrfs_free_space *left_info;
+ struct btrfs_free_space *left_info = NULL;
struct btrfs_free_space *right_info;
bool merged = false;
u64 offset = info->offset;
@@ -2184,7 +2188,7 @@ static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
if (right_info && rb_prev(&right_info->offset_index))
left_info = rb_entry(rb_prev(&right_info->offset_index),
struct btrfs_free_space, offset_index);
- else
+ else if (!right_info)
left_info = tree_search_offset(ctl, offset - 1, 0, 0);
if (right_info && !right_info->bitmap) {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index c69e5b255745..8c6f619c9ee6 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -628,7 +628,21 @@ cont:
PAGE_SET_WRITEBACK |
page_error_op |
PAGE_END_WRITEBACK);
- goto free_pages_out;
+
+ /*
+ * Ensure we only free the compressed pages if we have
+ * them allocated, as we can still reach here with
+ * inode_need_compress() == false.
+ */
+ if (pages) {
+ for (i = 0; i < nr_pages; i++) {
+ WARN_ON(pages[i]->mapping);
+ put_page(pages[i]);
+ }
+ kfree(pages);
+ }
+
+ return;
}
}
@@ -706,13 +720,6 @@ cleanup_and_bail_uncompressed:
*num_added += 1;
return;
-
-free_pages_out:
- for (i = 0; i < nr_pages; i++) {
- WARN_ON(pages[i]->mapping);
- put_page(pages[i]);
- }
- kfree(pages);
}
static void free_async_extent_pages(struct async_extent *async_extent)
@@ -977,6 +984,7 @@ static noinline int cow_file_range(struct inode *inode,
u64 num_bytes;
unsigned long ram_size;
u64 cur_alloc_size = 0;
+ u64 min_alloc_size;
u64 blocksize = fs_info->sectorsize;
struct btrfs_key ins;
struct extent_map *em;
@@ -1028,10 +1036,26 @@ static noinline int cow_file_range(struct inode *inode,
btrfs_drop_extent_cache(BTRFS_I(inode), start,
start + num_bytes - 1, 0);
+ /*
+ * Relocation relies on the relocated extents to have exactly the same
+ * size as the original extents. Normally writeback for relocation data
+ * extents follows a NOCOW path because relocation preallocates the
+ * extents. However, due to an operation such as scrub turning a block
+ * group to RO mode, it may fallback to COW mode, so we must make sure
+ * an extent allocated during COW has exactly the requested size and can
+ * not be split into smaller extents, otherwise relocation breaks and
+ * fails during the stage where it updates the bytenr of file extent
+ * items.
+ */
+ if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
+ min_alloc_size = num_bytes;
+ else
+ min_alloc_size = fs_info->sectorsize;
+
while (num_bytes > 0) {
cur_alloc_size = num_bytes;
ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
- fs_info->sectorsize, 0, alloc_hint,
+ min_alloc_size, 0, alloc_hint,
&ins, 1, 1);
if (ret < 0)
goto out_unlock;
@@ -1136,8 +1160,8 @@ out_unlock:
*/
if (extent_reserved) {
extent_clear_unlock_delalloc(inode, start,
- start + cur_alloc_size,
- start + cur_alloc_size,
+ start + cur_alloc_size - 1,
+ start + cur_alloc_size - 1,
locked_page,
clear_bits,
page_ops);
@@ -4441,6 +4465,8 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
}
}
+ free_anon_bdev(dest->anon_dev);
+ dest->anon_dev = 0;
out_end_trans:
trans->block_rsv = NULL;
trans->bytes_reserved = 0;
@@ -5536,12 +5562,14 @@ no_delete:
}
/*
- * this returns the key found in the dir entry in the location pointer.
+ * Return the key found in the dir entry in the location pointer, fill @type
+ * with BTRFS_FT_*, and return 0.
+ *
* If no dir entries were found, returns -ENOENT.
* If found a corrupted location in dir entry, returns -EUCLEAN.
*/
static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
- struct btrfs_key *location)
+ struct btrfs_key *location, u8 *type)
{
const char *name = dentry->d_name.name;
int namelen = dentry->d_name.len;
@@ -5574,6 +5602,8 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
__func__, name, btrfs_ino(BTRFS_I(dir)),
location->objectid, location->type, location->offset);
}
+ if (!ret)
+ *type = btrfs_dir_type(path->nodes[0], di);
out:
btrfs_free_path(path);
return ret;
@@ -5809,6 +5839,11 @@ static struct inode *new_simple_dir(struct super_block *s,
return inode;
}
+static inline u8 btrfs_inode_type(struct inode *inode)
+{
+ return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
+}
+
struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
{
struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
@@ -5816,18 +5851,31 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_root *sub_root = root;
struct btrfs_key location;
+ u8 di_type = 0;
int index;
int ret = 0;
if (dentry->d_name.len > BTRFS_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
- ret = btrfs_inode_by_name(dir, dentry, &location);
+ ret = btrfs_inode_by_name(dir, dentry, &location, &di_type);
if (ret < 0)
return ERR_PTR(ret);
if (location.type == BTRFS_INODE_ITEM_KEY) {
inode = btrfs_iget(dir->i_sb, &location, root, NULL);
+ if (IS_ERR(inode))
+ return inode;
+
+ /* Do extra check against inode mode with di_type */
+ if (btrfs_inode_type(inode) != di_type) {
+ btrfs_crit(fs_info,
+"inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u",
+ inode->i_mode, btrfs_inode_type(inode),
+ di_type);
+ iput(inode);
+ return ERR_PTR(-EUCLEAN);
+ }
return inode;
}
@@ -6438,11 +6486,6 @@ fail:
return ERR_PTR(ret);
}
-static inline u8 btrfs_inode_type(struct inode *inode)
-{
- return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
-}
-
/*
* utility function to add 'inode' into 'parent_inode' with
* a give name and a given sequence number.
@@ -6976,6 +7019,14 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
extent_start = found_key.offset;
if (found_type == BTRFS_FILE_EXTENT_REG ||
found_type == BTRFS_FILE_EXTENT_PREALLOC) {
+ /* Only regular file could have regular/prealloc extent */
+ if (!S_ISREG(inode->vfs_inode.i_mode)) {
+ err = -EUCLEAN;
+ btrfs_crit(fs_info,
+ "regular/prealloc extent found for non-regular inode %llu",
+ btrfs_ino(inode));
+ goto out;
+ }
extent_end = extent_start +
btrfs_file_extent_num_bytes(leaf, item);
@@ -8399,7 +8450,6 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip)
/* bio split */
ASSERT(map_length <= INT_MAX);
- atomic_inc(&dip->pending_bios);
do {
clone_len = min_t(int, submit_len, map_length);
@@ -8450,7 +8500,8 @@ submit:
if (!status)
return 0;
- bio_put(bio);
+ if (bio != orig_bio)
+ bio_put(bio);
out_err:
dip->errors = 1;
/*
@@ -8491,7 +8542,7 @@ static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode,
bio->bi_private = dip;
dip->orig_bio = bio;
dip->dio_bio = dio_bio;
- atomic_set(&dip->pending_bios, 0);
+ atomic_set(&dip->pending_bios, 1);
io_bio = btrfs_io_bio(bio);
io_bio->logical = file_offset;
@@ -8639,9 +8690,6 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
dio_data.overwrite = 1;
inode_unlock(inode);
relock = true;
- } else if (iocb->ki_flags & IOCB_NOWAIT) {
- ret = -EAGAIN;
- goto out;
}
ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
offset, count);
@@ -8865,20 +8913,17 @@ again:
/*
* Qgroup reserved space handler
* Page here will be either
- * 1) Already written to disk
- * In this case, its reserved space is released from data rsv map
- * and will be freed by delayed_ref handler finally.
- * So even we call qgroup_free_data(), it won't decrease reserved
- * space.
- * 2) Not written to disk
- * This means the reserved space should be freed here. However,
- * if a truncate invalidates the page (by clearing PageDirty)
- * and the page is accounted for while allocating extent
- * in btrfs_check_data_free_space() we let delayed_ref to
- * free the entire extent.
+ * 1) Already written to disk or ordered extent already submitted
+ * Then its QGROUP_RESERVED bit in io_tree is already cleaned.
+ * Qgroup will be handled by its qgroup_record then.
+ * btrfs_qgroup_free_data() call will do nothing here.
+ *
+ * 2) Not written to disk yet
+ * Then btrfs_qgroup_free_data() call will clear the QGROUP_RESERVED
+ * bit of its io_tree, and free the qgroup reserved data space.
+ * Since the IO will never happen for this page.
*/
- if (PageDirty(page))
- btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE);
+ btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE);
if (!inode_evicting) {
clear_extent_bit(tree, page_start, page_end,
EXTENT_LOCKED | EXTENT_DIRTY |
@@ -9427,7 +9472,7 @@ int __init btrfs_init_cachep(void)
btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap",
PAGE_SIZE, PAGE_SIZE,
- SLAB_RED_ZONE, NULL);
+ SLAB_MEM_SPREAD, NULL);
if (!btrfs_free_space_bitmap_cachep)
goto fail;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 199c70b8f7d8..717385b7f66f 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -656,8 +656,6 @@ static noinline int create_subvol(struct inode *dir,
btrfs_set_root_otransid(root_item, trans->transid);
btrfs_tree_unlock(leaf);
- free_extent_buffer(leaf);
- leaf = NULL;
btrfs_set_root_dirid(root_item, new_dirid);
@@ -666,8 +664,22 @@ static noinline int create_subvol(struct inode *dir,
key.type = BTRFS_ROOT_ITEM_KEY;
ret = btrfs_insert_root(trans, fs_info->tree_root, &key,
root_item);
- if (ret)
+ if (ret) {
+ /*
+ * Since we don't abort the transaction in this case, free the
+ * tree block so that we don't leak space and leave the
+ * filesystem in an inconsistent state (an extent item in the
+ * extent tree without backreferences). Also no need to have
+ * the tree block locked since it is not in any tree at this
+ * point, so no other task can find it and use it.
+ */
+ btrfs_free_tree_block(trans, root, leaf, 0, 1);
+ free_extent_buffer(leaf);
goto fail;
+ }
+
+ free_extent_buffer(leaf);
+ leaf = NULL;
key.offset = (u64)-1;
new_root = btrfs_read_fs_root_no_name(fs_info, &key);
@@ -1239,6 +1251,7 @@ static int cluster_pages_for_defrag(struct inode *inode,
u64 page_start;
u64 page_end;
u64 page_cnt;
+ u64 start = (u64)start_index << PAGE_SHIFT;
int ret;
int i;
int i_done;
@@ -1255,8 +1268,7 @@ static int cluster_pages_for_defrag(struct inode *inode,
page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
- start_index << PAGE_SHIFT,
- page_cnt << PAGE_SHIFT);
+ start, page_cnt << PAGE_SHIFT);
if (ret)
return ret;
i_done = 0;
@@ -1346,8 +1358,7 @@ again:
btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
spin_unlock(&BTRFS_I(inode)->lock);
btrfs_delalloc_release_space(inode, data_reserved,
- start_index << PAGE_SHIFT,
- (page_cnt - i_done) << PAGE_SHIFT, true);
+ start, (page_cnt - i_done) << PAGE_SHIFT, true);
}
@@ -1374,8 +1385,7 @@ out:
put_page(pages[i]);
}
btrfs_delalloc_release_space(inode, data_reserved,
- start_index << PAGE_SHIFT,
- page_cnt << PAGE_SHIFT, true);
+ start, page_cnt << PAGE_SHIFT, true);
btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT);
extent_changeset_free(data_reserved);
return ret;
@@ -1642,7 +1652,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
btrfs_info(fs_info, "resizing devid %llu", devid);
}
- device = btrfs_find_device(fs_info, devid, NULL, NULL);
+ device = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
if (!device) {
btrfs_info(fs_info, "resizer unable to find device %llu",
devid);
@@ -1844,7 +1854,10 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
readonly = true;
if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
- if (vol_args->size > PAGE_SIZE) {
+ u64 nums;
+
+ if (vol_args->size < sizeof(*inherit) ||
+ vol_args->size > PAGE_SIZE) {
ret = -EINVAL;
goto free_args;
}
@@ -1853,6 +1866,20 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
ret = PTR_ERR(inherit);
goto free_args;
}
+
+ if (inherit->num_qgroups > PAGE_SIZE ||
+ inherit->num_ref_copies > PAGE_SIZE ||
+ inherit->num_excl_copies > PAGE_SIZE) {
+ ret = -EINVAL;
+ goto free_inherit;
+ }
+
+ nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
+ 2 * inherit->num_excl_copies;
+ if (vol_args->size != struct_size(inherit, qgroups, nums)) {
+ ret = -EINVAL;
+ goto free_inherit;
+ }
}
ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
@@ -2079,9 +2106,14 @@ static noinline int copy_to_sk(struct btrfs_path *path,
sh.len = item_len;
sh.transid = found_transid;
- /* copy search result header */
- if (copy_to_user(ubuf + *sk_offset, &sh, sizeof(sh))) {
- ret = -EFAULT;
+ /*
+ * Copy search result header. If we fault then loop again so we
+ * can fault in the pages and -EFAULT there if there's a
+ * problem. Otherwise we'll fault and then copy the buffer in
+ * properly this next time through
+ */
+ if (probe_user_write(ubuf + *sk_offset, &sh, sizeof(sh))) {
+ ret = 0;
goto out;
}
@@ -2089,10 +2121,14 @@ static noinline int copy_to_sk(struct btrfs_path *path,
if (item_len) {
char __user *up = ubuf + *sk_offset;
- /* copy the item */
- if (read_extent_buffer_to_user(leaf, up,
- item_off, item_len)) {
- ret = -EFAULT;
+ /*
+ * Copy the item, same behavior as above, but reset the
+ * * sk_offset so we copy the full thing again.
+ */
+ if (read_extent_buffer_to_user_nofault(leaf, up,
+ item_off, item_len)) {
+ ret = 0;
+ *sk_offset -= sizeof(sh);
goto out;
}
@@ -2180,6 +2216,11 @@ static noinline int search_ioctl(struct inode *inode,
key.offset = sk->min_offset;
while (1) {
+ ret = fault_in_pages_writeable(ubuf + sk_offset,
+ *buf_size - sk_offset);
+ if (ret)
+ break;
+
ret = btrfs_search_forward(root, &key, path, sk->min_transid);
if (ret != 0) {
if (ret > 0)
@@ -3178,7 +3219,8 @@ static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
s_uuid = di_args->uuid;
rcu_read_lock();
- dev = btrfs_find_device(fs_info, di_args->devid, s_uuid, NULL);
+ dev = btrfs_find_device(fs_info->fs_devices, di_args->devid, s_uuid,
+ NULL, true);
if (!dev) {
ret = -ENODEV;
@@ -4202,6 +4244,8 @@ process_slot:
ret = -EINTR;
goto out;
}
+
+ cond_resched();
}
ret = 0;
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index df49931ffe92..4b217e9a581c 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -95,9 +95,10 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
* offset is supposed to be a tree block which
* must be aligned to nodesize.
*/
- if (!IS_ALIGNED(offset, eb->fs_info->nodesize))
- pr_info("\t\t\t(parent %llu is NOT ALIGNED to nodesize %llu)\n",
- offset, (unsigned long long)eb->fs_info->nodesize);
+ if (!IS_ALIGNED(offset, eb->fs_info->sectorsize))
+ pr_info(
+ "\t\t\t(parent %llu not aligned to sectorsize %u)\n",
+ offset, eb->fs_info->sectorsize);
break;
case BTRFS_EXTENT_DATA_REF_KEY:
dref = (struct btrfs_extent_data_ref *)(&iref->offset);
@@ -112,8 +113,9 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
* must be aligned to nodesize.
*/
if (!IS_ALIGNED(offset, eb->fs_info->nodesize))
- pr_info("\t\t\t(parent %llu is NOT ALIGNED to nodesize %llu)\n",
- offset, (unsigned long long)eb->fs_info->nodesize);
+ pr_info(
+ "\t\t\t(parent %llu not aligned to sectorsize %u)\n",
+ offset, eb->fs_info->sectorsize);
break;
default:
pr_cont("(extent %llu has INVALID ref type %d)\n",
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 0cd043f03081..7bda5586e433 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -488,13 +488,13 @@ next2:
break;
}
out:
+ btrfs_free_path(path);
fs_info->qgroup_flags |= flags;
if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
ret >= 0)
ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
- btrfs_free_path(path);
if (ret < 0) {
ulist_free(fs_info->qgroup_ulist);
@@ -1032,6 +1032,7 @@ out_add_root:
ret = qgroup_rescan_init(fs_info, 0, 1);
if (!ret) {
qgroup_rescan_zero_tracking(fs_info);
+ fs_info->qgroup_rescan_running = true;
btrfs_queue_work(fs_info->qgroup_rescan_workers,
&fs_info->qgroup_rescan_work);
}
@@ -2258,6 +2259,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
struct btrfs_root *quota_root;
struct btrfs_qgroup *srcgroup;
struct btrfs_qgroup *dstgroup;
+ bool need_rescan = false;
u32 level_size = 0;
u64 nums;
@@ -2401,6 +2403,13 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
goto unlock;
}
++i_qgroups;
+
+ /*
+ * If we're doing a snapshot, and adding the snapshot to a new
+ * qgroup, the numbers are guaranteed to be incorrect.
+ */
+ if (srcid)
+ need_rescan = true;
}
for (i = 0; i < inherit->num_ref_copies; ++i, i_qgroups += 2) {
@@ -2420,6 +2429,9 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
dst->rfer = src->rfer - level_size;
dst->rfer_cmpr = src->rfer_cmpr - level_size;
+
+ /* Manually tweaking numbers certainly needs a rescan */
+ need_rescan = true;
}
for (i = 0; i < inherit->num_excl_copies; ++i, i_qgroups += 2) {
struct btrfs_qgroup *src;
@@ -2438,6 +2450,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
dst->excl = src->excl + level_size;
dst->excl_cmpr = src->excl_cmpr + level_size;
+ need_rescan = true;
}
unlock:
@@ -2445,6 +2458,8 @@ unlock:
out:
if (!committing)
mutex_unlock(&fs_info->qgroup_ioctl_lock);
+ if (need_rescan)
+ fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
return ret;
}
@@ -2759,6 +2774,12 @@ out:
return ret;
}
+static bool rescan_should_stop(struct btrfs_fs_info *fs_info)
+{
+ return btrfs_fs_closing(fs_info) ||
+ test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
+}
+
static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
{
struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
@@ -2767,6 +2788,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
struct btrfs_trans_handle *trans = NULL;
int err = -ENOMEM;
int ret = 0;
+ bool stopped = false;
path = btrfs_alloc_path();
if (!path)
@@ -2779,7 +2801,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
path->skip_locking = 1;
err = 0;
- while (!err && !btrfs_fs_closing(fs_info)) {
+ while (!err && !(stopped = rescan_should_stop(fs_info))) {
trans = btrfs_start_transaction(fs_info->fs_root, 0);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
@@ -2822,7 +2844,7 @@ out:
}
mutex_lock(&fs_info->qgroup_rescan_lock);
- if (!btrfs_fs_closing(fs_info))
+ if (!stopped)
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
if (trans) {
ret = update_qgroup_status_item(trans);
@@ -2841,7 +2863,7 @@ out:
btrfs_end_transaction(trans);
- if (btrfs_fs_closing(fs_info)) {
+ if (stopped) {
btrfs_info(fs_info, "qgroup scan paused");
} else if (err >= 0) {
btrfs_info(fs_info, "qgroup scan completed%s",
@@ -2906,7 +2928,6 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
sizeof(fs_info->qgroup_rescan_progress));
fs_info->qgroup_rescan_progress.objectid = progress_objectid;
init_completion(&fs_info->qgroup_rescan_completion);
- fs_info->qgroup_rescan_running = true;
spin_unlock(&fs_info->qgroup_lock);
mutex_unlock(&fs_info->qgroup_rescan_lock);
@@ -2972,8 +2993,11 @@ btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
qgroup_rescan_zero_tracking(fs_info);
+ mutex_lock(&fs_info->qgroup_rescan_lock);
+ fs_info->qgroup_rescan_running = true;
btrfs_queue_work(fs_info->qgroup_rescan_workers,
&fs_info->qgroup_rescan_work);
+ mutex_unlock(&fs_info->qgroup_rescan_lock);
return 0;
}
@@ -3009,9 +3033,13 @@ int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
void
btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
{
- if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
+ if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
+ mutex_lock(&fs_info->qgroup_rescan_lock);
+ fs_info->qgroup_rescan_running = true;
btrfs_queue_work(fs_info->qgroup_rescan_workers,
&fs_info->qgroup_rescan_work);
+ mutex_unlock(&fs_info->qgroup_rescan_lock);
+ }
}
/*
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 927f9f3daddb..a91f74cf5cd1 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -1182,22 +1182,19 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
int nr_data = rbio->nr_data;
int stripe;
int pagenr;
- int p_stripe = -1;
- int q_stripe = -1;
+ bool has_qstripe;
struct bio_list bio_list;
struct bio *bio;
int ret;
bio_list_init(&bio_list);
- if (rbio->real_stripes - rbio->nr_data == 1) {
- p_stripe = rbio->real_stripes - 1;
- } else if (rbio->real_stripes - rbio->nr_data == 2) {
- p_stripe = rbio->real_stripes - 2;
- q_stripe = rbio->real_stripes - 1;
- } else {
+ if (rbio->real_stripes - rbio->nr_data == 1)
+ has_qstripe = false;
+ else if (rbio->real_stripes - rbio->nr_data == 2)
+ has_qstripe = true;
+ else
BUG();
- }
/* at this point we either have a full stripe,
* or we've read the full stripe from the drive.
@@ -1241,7 +1238,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
SetPageUptodate(p);
pointers[stripe++] = kmap(p);
- if (q_stripe != -1) {
+ if (has_qstripe) {
/*
* raid6, add the qstripe and call the
@@ -2340,8 +2337,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
int nr_data = rbio->nr_data;
int stripe;
int pagenr;
- int p_stripe = -1;
- int q_stripe = -1;
+ bool has_qstripe;
struct page *p_page = NULL;
struct page *q_page = NULL;
struct bio_list bio_list;
@@ -2351,14 +2347,12 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
bio_list_init(&bio_list);
- if (rbio->real_stripes - rbio->nr_data == 1) {
- p_stripe = rbio->real_stripes - 1;
- } else if (rbio->real_stripes - rbio->nr_data == 2) {
- p_stripe = rbio->real_stripes - 2;
- q_stripe = rbio->real_stripes - 1;
- } else {
+ if (rbio->real_stripes - rbio->nr_data == 1)
+ has_qstripe = false;
+ else if (rbio->real_stripes - rbio->nr_data == 2)
+ has_qstripe = true;
+ else
BUG();
- }
if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
is_replace = 1;
@@ -2380,17 +2374,22 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
goto cleanup;
SetPageUptodate(p_page);
- if (q_stripe != -1) {
+ if (has_qstripe) {
+ /* RAID6, allocate and map temp space for the Q stripe */
q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
if (!q_page) {
__free_page(p_page);
goto cleanup;
}
SetPageUptodate(q_page);
+ pointers[rbio->real_stripes - 1] = kmap(q_page);
}
atomic_set(&rbio->error, 0);
+ /* Map the parity stripe just once */
+ pointers[nr_data] = kmap(p_page);
+
for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
struct page *p;
void *parity;
@@ -2400,17 +2399,8 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
pointers[stripe] = kmap(p);
}
- /* then add the parity stripe */
- pointers[stripe++] = kmap(p_page);
-
- if (q_stripe != -1) {
-
- /*
- * raid6, add the qstripe and call the
- * library function to fill in our p/q
- */
- pointers[stripe++] = kmap(q_page);
-
+ if (has_qstripe) {
+ /* RAID6, call the library function to fill in our P/Q */
raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
pointers);
} else {
@@ -2431,12 +2421,14 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
for (stripe = 0; stripe < nr_data; stripe++)
kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
- kunmap(p_page);
}
+ kunmap(p_page);
__free_page(p_page);
- if (q_page)
+ if (q_page) {
+ kunmap(q_page);
__free_page(q_page);
+ }
writeback:
/*
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index 4c81ffe12385..368c349c5669 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -442,6 +442,8 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
}
have_zone = 1;
}
+ if (!have_zone)
+ radix_tree_delete(&fs_info->reada_tree, index);
spin_unlock(&fs_info->reada_lock);
btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
index dbc685ca017f..b26739d0e991 100644
--- a/fs/btrfs/ref-verify.c
+++ b/fs/btrfs/ref-verify.c
@@ -297,6 +297,8 @@ static struct block_entry *add_block_entry(struct btrfs_fs_info *fs_info,
exist_re = insert_root_entry(&exist->roots, re);
if (exist_re)
kfree(re);
+ } else {
+ kfree(re);
}
kfree(be);
return exist;
@@ -852,6 +854,7 @@ int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
"dropping a ref for a root that doesn't have a ref on the block");
dump_block_entry(fs_info, be);
dump_ref_action(fs_info, ra);
+ kfree(ref);
kfree(ra);
goto out_unlock;
}
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index f98913061a40..06c6a66a991f 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -525,8 +525,8 @@ static int should_ignore_root(struct btrfs_root *root)
if (!reloc_root)
return 0;
- if (btrfs_root_last_snapshot(&reloc_root->root_item) ==
- root->fs_info->running_transaction->transid - 1)
+ if (btrfs_header_generation(reloc_root->commit_root) ==
+ root->fs_info->running_transaction->transid)
return 0;
/*
* if there is reloc tree and it was created in previous
@@ -1141,7 +1141,7 @@ out:
free_backref_node(cache, lower);
}
- free_backref_node(cache, node);
+ remove_backref_node(cache, node);
return ERR_PTR(err);
}
ASSERT(!node || !node->detached);
@@ -1253,7 +1253,7 @@ static int __must_check __add_reloc_root(struct btrfs_root *root)
if (!node)
return -ENOMEM;
- node->bytenr = root->node->start;
+ node->bytenr = root->commit_root->start;
node->data = root;
spin_lock(&rc->reloc_root_tree.lock);
@@ -1284,15 +1284,14 @@ static void __del_reloc_root(struct btrfs_root *root)
if (rc && root->node) {
spin_lock(&rc->reloc_root_tree.lock);
rb_node = tree_search(&rc->reloc_root_tree.rb_root,
- root->node->start);
+ root->commit_root->start);
if (rb_node) {
node = rb_entry(rb_node, struct mapping_node, rb_node);
rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
+ RB_CLEAR_NODE(&node->rb_node);
}
spin_unlock(&rc->reloc_root_tree.lock);
- if (!node)
- return;
- BUG_ON((struct btrfs_root *)node->data != root);
+ ASSERT(!node || (struct btrfs_root *)node->data == root);
}
spin_lock(&fs_info->trans_lock);
@@ -1305,7 +1304,7 @@ static void __del_reloc_root(struct btrfs_root *root)
* helper to update the 'address of tree root -> reloc tree'
* mapping
*/
-static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr)
+static int __update_reloc_root(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct rb_node *rb_node;
@@ -1314,7 +1313,7 @@ static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr)
spin_lock(&rc->reloc_root_tree.lock);
rb_node = tree_search(&rc->reloc_root_tree.rb_root,
- root->node->start);
+ root->commit_root->start);
if (rb_node) {
node = rb_entry(rb_node, struct mapping_node, rb_node);
rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
@@ -1326,7 +1325,7 @@ static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr)
BUG_ON((struct btrfs_root *)node->data != root);
spin_lock(&rc->reloc_root_tree.lock);
- node->bytenr = new_bytenr;
+ node->bytenr = root->node->start;
rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
node->bytenr, &node->rb_node);
spin_unlock(&rc->reloc_root_tree.lock);
@@ -1471,6 +1470,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
}
if (reloc_root->commit_root != reloc_root->node) {
+ __update_reloc_root(reloc_root);
btrfs_set_root_node(root_item, reloc_root->node);
free_extent_buffer(reloc_root->commit_root);
reloc_root->commit_root = btrfs_root_node(reloc_root);
@@ -1755,8 +1755,8 @@ int replace_path(struct btrfs_trans_handle *trans,
int ret;
int slot;
- BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
- BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
+ ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
+ ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
last_snapshot = btrfs_root_last_snapshot(&src->root_item);
again:
@@ -1790,7 +1790,7 @@ again:
struct btrfs_key first_key;
level = btrfs_header_level(parent);
- BUG_ON(level < lowest_level);
+ ASSERT(level >= lowest_level);
ret = btrfs_bin_search(parent, &key, level, &slot);
if (ret && slot > 0)
@@ -2434,7 +2434,21 @@ out:
free_reloc_roots(&reloc_roots);
}
- BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
+ /*
+ * We used to have
+ *
+ * BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
+ *
+ * here, but it's wrong. If we fail to start the transaction in
+ * prepare_to_merge() we will have only 0 ref reloc roots, none of which
+ * have actually been removed from the reloc_root_tree rb tree. This is
+ * fine because we're bailing here, and we hold a reference on the root
+ * for the list that holds it, so these roots will be cleaned up when we
+ * do the reloc_dirty_list afterwards. Meanwhile the root->reloc_root
+ * will be cleaned up on unmount.
+ *
+ * The remaining nodes will be cleaned up by free_reloc_control.
+ */
}
static void free_block_list(struct rb_root *blocks)
@@ -4585,11 +4599,6 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
BUG_ON(rc->stage == UPDATE_DATA_PTRS &&
root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID);
- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
- if (buf == root->node)
- __update_reloc_root(root, cow->start);
- }
-
level = btrfs_header_level(buf);
if (btrfs_header_generation(buf) <=
btrfs_root_last_snapshot(&root->root_item))
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 6b6008db3e03..fee8995c9a0c 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -3835,7 +3835,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
return PTR_ERR(sctx);
mutex_lock(&fs_info->fs_devices->device_list_mutex);
- dev = btrfs_find_device(fs_info, devid, NULL, NULL);
+ dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
!is_dev_replace)) {
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
@@ -4019,7 +4019,7 @@ int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
struct scrub_ctx *sctx = NULL;
mutex_lock(&fs_info->fs_devices->device_list_mutex);
- dev = btrfs_find_device(fs_info, devid, NULL, NULL);
+ dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
if (dev)
sctx = dev->scrub_ctx;
if (sctx)
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 931a7d1ddc95..128398dde081 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -23,6 +23,7 @@
#include "btrfs_inode.h"
#include "transaction.h"
#include "compression.h"
+#include "xattr.h"
/*
* Maximum number of references an extent can have in order for us to attempt to
@@ -237,6 +238,7 @@ struct waiting_dir_move {
* after this directory is moved, we can try to rmdir the ino rmdir_ino.
*/
u64 rmdir_ino;
+ u64 rmdir_gen;
bool orphanized;
};
@@ -307,7 +309,7 @@ static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
static struct waiting_dir_move *
get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
-static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino);
+static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen);
static int need_send_hole(struct send_ctx *sctx)
{
@@ -2303,7 +2305,7 @@ static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
fs_path_reset(name);
- if (is_waiting_for_rm(sctx, ino)) {
+ if (is_waiting_for_rm(sctx, ino, gen)) {
ret = gen_unique_name(sctx, ino, gen, name);
if (ret < 0)
goto out;
@@ -2862,8 +2864,8 @@ out:
return ret;
}
-static struct orphan_dir_info *
-add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
+static struct orphan_dir_info *add_orphan_dir_info(struct send_ctx *sctx,
+ u64 dir_ino, u64 dir_gen)
{
struct rb_node **p = &sctx->orphan_dirs.rb_node;
struct rb_node *parent = NULL;
@@ -2872,20 +2874,23 @@ add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
while (*p) {
parent = *p;
entry = rb_entry(parent, struct orphan_dir_info, node);
- if (dir_ino < entry->ino) {
+ if (dir_ino < entry->ino)
p = &(*p)->rb_left;
- } else if (dir_ino > entry->ino) {
+ else if (dir_ino > entry->ino)
p = &(*p)->rb_right;
- } else {
+ else if (dir_gen < entry->gen)
+ p = &(*p)->rb_left;
+ else if (dir_gen > entry->gen)
+ p = &(*p)->rb_right;
+ else
return entry;
- }
}
odi = kmalloc(sizeof(*odi), GFP_KERNEL);
if (!odi)
return ERR_PTR(-ENOMEM);
odi->ino = dir_ino;
- odi->gen = 0;
+ odi->gen = dir_gen;
odi->last_dir_index_offset = 0;
rb_link_node(&odi->node, parent, p);
@@ -2893,8 +2898,8 @@ add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
return odi;
}
-static struct orphan_dir_info *
-get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
+static struct orphan_dir_info *get_orphan_dir_info(struct send_ctx *sctx,
+ u64 dir_ino, u64 gen)
{
struct rb_node *n = sctx->orphan_dirs.rb_node;
struct orphan_dir_info *entry;
@@ -2905,15 +2910,19 @@ get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
n = n->rb_left;
else if (dir_ino > entry->ino)
n = n->rb_right;
+ else if (gen < entry->gen)
+ n = n->rb_left;
+ else if (gen > entry->gen)
+ n = n->rb_right;
else
return entry;
}
return NULL;
}
-static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino)
+static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen)
{
- struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino);
+ struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino, gen);
return odi != NULL;
}
@@ -2958,7 +2967,7 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
key.type = BTRFS_DIR_INDEX_KEY;
key.offset = 0;
- odi = get_orphan_dir_info(sctx, dir);
+ odi = get_orphan_dir_info(sctx, dir, dir_gen);
if (odi)
key.offset = odi->last_dir_index_offset;
@@ -2989,7 +2998,7 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
dm = get_waiting_dir_move(sctx, loc.objectid);
if (dm) {
- odi = add_orphan_dir_info(sctx, dir);
+ odi = add_orphan_dir_info(sctx, dir, dir_gen);
if (IS_ERR(odi)) {
ret = PTR_ERR(odi);
goto out;
@@ -2997,12 +3006,13 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
odi->gen = dir_gen;
odi->last_dir_index_offset = found_key.offset;
dm->rmdir_ino = dir;
+ dm->rmdir_gen = dir_gen;
ret = 0;
goto out;
}
if (loc.objectid > send_progress) {
- odi = add_orphan_dir_info(sctx, dir);
+ odi = add_orphan_dir_info(sctx, dir, dir_gen);
if (IS_ERR(odi)) {
ret = PTR_ERR(odi);
goto out;
@@ -3042,6 +3052,7 @@ static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
return -ENOMEM;
dm->ino = ino;
dm->rmdir_ino = 0;
+ dm->rmdir_gen = 0;
dm->orphanized = orphanized;
while (*p) {
@@ -3187,7 +3198,7 @@ static int path_loop(struct send_ctx *sctx, struct fs_path *name,
while (ino != BTRFS_FIRST_FREE_OBJECTID) {
fs_path_reset(name);
- if (is_waiting_for_rm(sctx, ino))
+ if (is_waiting_for_rm(sctx, ino, gen))
break;
if (is_waiting_for_move(sctx, ino)) {
if (*ancestor_ino == 0)
@@ -3227,6 +3238,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
u64 parent_ino, parent_gen;
struct waiting_dir_move *dm = NULL;
u64 rmdir_ino = 0;
+ u64 rmdir_gen;
u64 ancestor;
bool is_orphan;
int ret;
@@ -3241,6 +3253,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
dm = get_waiting_dir_move(sctx, pm->ino);
ASSERT(dm);
rmdir_ino = dm->rmdir_ino;
+ rmdir_gen = dm->rmdir_gen;
is_orphan = dm->orphanized;
free_waiting_dir_move(sctx, dm);
@@ -3277,6 +3290,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
dm = get_waiting_dir_move(sctx, pm->ino);
ASSERT(dm);
dm->rmdir_ino = rmdir_ino;
+ dm->rmdir_gen = rmdir_gen;
}
goto out;
}
@@ -3295,7 +3309,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
struct orphan_dir_info *odi;
u64 gen;
- odi = get_orphan_dir_info(sctx, rmdir_ino);
+ odi = get_orphan_dir_info(sctx, rmdir_ino, rmdir_gen);
if (!odi) {
/* already deleted */
goto finish;
@@ -3810,6 +3824,72 @@ static int update_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
}
/*
+ * When processing the new references for an inode we may orphanize an existing
+ * directory inode because its old name conflicts with one of the new references
+ * of the current inode. Later, when processing another new reference of our
+ * inode, we might need to orphanize another inode, but the path we have in the
+ * reference reflects the pre-orphanization name of the directory we previously
+ * orphanized. For example:
+ *
+ * parent snapshot looks like:
+ *
+ * . (ino 256)
+ * |----- f1 (ino 257)
+ * |----- f2 (ino 258)
+ * |----- d1/ (ino 259)
+ * |----- d2/ (ino 260)
+ *
+ * send snapshot looks like:
+ *
+ * . (ino 256)
+ * |----- d1 (ino 258)
+ * |----- f2/ (ino 259)
+ * |----- f2_link/ (ino 260)
+ * | |----- f1 (ino 257)
+ * |
+ * |----- d2 (ino 258)
+ *
+ * When processing inode 257 we compute the name for inode 259 as "d1", and we
+ * cache it in the name cache. Later when we start processing inode 258, when
+ * collecting all its new references we set a full path of "d1/d2" for its new
+ * reference with name "d2". When we start processing the new references we
+ * start by processing the new reference with name "d1", and this results in
+ * orphanizing inode 259, since its old reference causes a conflict. Then we
+ * move on the next new reference, with name "d2", and we find out we must
+ * orphanize inode 260, as its old reference conflicts with ours - but for the
+ * orphanization we use a source path corresponding to the path we stored in the
+ * new reference, which is "d1/d2" and not "o259-6-0/d2" - this makes the
+ * receiver fail since the path component "d1/" no longer exists, it was renamed
+ * to "o259-6-0/" when processing the previous new reference. So in this case we
+ * must recompute the path in the new reference and use it for the new
+ * orphanization operation.
+ */
+static int refresh_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
+{
+ char *name;
+ int ret;
+
+ name = kmemdup(ref->name, ref->name_len, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ fs_path_reset(ref->full_path);
+ ret = get_cur_path(sctx, ref->dir, ref->dir_gen, ref->full_path);
+ if (ret < 0)
+ goto out;
+
+ ret = fs_path_add(ref->full_path, name, ref->name_len);
+ if (ret < 0)
+ goto out;
+
+ /* Update the reference's base name pointer. */
+ set_ref_path(ref, ref->full_path);
+out:
+ kfree(name);
+ return ret;
+}
+
+/*
* This does all the move/link/unlink/rmdir magic.
*/
static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
@@ -3939,6 +4019,12 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
struct name_cache_entry *nce;
struct waiting_dir_move *wdm;
+ if (orphanized_dir) {
+ ret = refresh_ref_path(sctx, cur);
+ if (ret < 0)
+ goto out;
+ }
+
ret = orphanize_inode(sctx, ow_inode, ow_gen,
cur->full_path);
if (ret < 0)
@@ -4543,6 +4629,10 @@ static int __process_new_xattr(int num, struct btrfs_key *di_key,
struct fs_path *p;
struct posix_acl_xattr_header dummy_acl;
+ /* Capabilities are emitted by finish_inode_if_needed */
+ if (!strncmp(name, XATTR_NAME_CAPS, name_len))
+ return 0;
+
p = fs_path_alloc();
if (!p)
return -ENOMEM;
@@ -5105,6 +5195,64 @@ static int send_extent_data(struct send_ctx *sctx,
return 0;
}
+/*
+ * Search for a capability xattr related to sctx->cur_ino. If the capability is
+ * found, call send_set_xattr function to emit it.
+ *
+ * Return 0 if there isn't a capability, or when the capability was emitted
+ * successfully, or < 0 if an error occurred.
+ */
+static int send_capabilities(struct send_ctx *sctx)
+{
+ struct fs_path *fspath = NULL;
+ struct btrfs_path *path;
+ struct btrfs_dir_item *di;
+ struct extent_buffer *leaf;
+ unsigned long data_ptr;
+ char *buf = NULL;
+ int buf_len;
+ int ret = 0;
+
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
+ di = btrfs_lookup_xattr(NULL, sctx->send_root, path, sctx->cur_ino,
+ XATTR_NAME_CAPS, strlen(XATTR_NAME_CAPS), 0);
+ if (!di) {
+ /* There is no xattr for this inode */
+ goto out;
+ } else if (IS_ERR(di)) {
+ ret = PTR_ERR(di);
+ goto out;
+ }
+
+ leaf = path->nodes[0];
+ buf_len = btrfs_dir_data_len(leaf, di);
+
+ fspath = fs_path_alloc();
+ buf = kmalloc(buf_len, GFP_KERNEL);
+ if (!fspath || !buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath);
+ if (ret < 0)
+ goto out;
+
+ data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di);
+ read_extent_buffer(leaf, buf, data_ptr, buf_len);
+
+ ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS,
+ strlen(XATTR_NAME_CAPS), buf, buf_len);
+out:
+ kfree(buf);
+ fs_path_free(fspath);
+ btrfs_free_path(path);
+ return ret;
+}
+
static int clone_range(struct send_ctx *sctx,
struct clone_root *clone_root,
const u64 disk_byte,
@@ -5936,6 +6084,10 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
goto out;
}
+ ret = send_capabilities(sctx);
+ if (ret < 0)
+ goto out;
+
/*
* If other directory inodes depended on our current directory
* inode's move/rename, now do their move/rename operations.
@@ -6720,7 +6872,7 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
alloc_size = sizeof(struct clone_root) * (arg->clone_sources_count + 1);
- sctx->clone_roots = kzalloc(alloc_size, GFP_KERNEL);
+ sctx->clone_roots = kvzalloc(alloc_size, GFP_KERNEL);
if (!sctx->clone_roots) {
ret = -ENOMEM;
goto out;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 6a5b16a119ed..521f6c2091ad 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -432,6 +432,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
char *compress_type;
bool compress_force = false;
enum btrfs_compression_type saved_compress_type;
+ int saved_compress_level;
bool saved_compress_force;
int no_compress = 0;
@@ -514,6 +515,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
info->compress_type : BTRFS_COMPRESS_NONE;
saved_compress_force =
btrfs_test_opt(info, FORCE_COMPRESS);
+ saved_compress_level = info->compress_level;
if (token == Opt_compress ||
token == Opt_compress_force ||
strncmp(args[0].from, "zlib", 4) == 0) {
@@ -537,6 +539,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
} else if (strncmp(args[0].from, "lzo", 3) == 0) {
compress_type = "lzo";
info->compress_type = BTRFS_COMPRESS_LZO;
+ info->compress_level = 0;
btrfs_set_opt(info->mount_opt, COMPRESS);
btrfs_clear_opt(info->mount_opt, NODATACOW);
btrfs_clear_opt(info->mount_opt, NODATASUM);
@@ -552,6 +555,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
no_compress = 0;
} else if (strncmp(args[0].from, "no", 2) == 0) {
compress_type = "no";
+ info->compress_level = 0;
+ info->compress_type = 0;
btrfs_clear_opt(info->mount_opt, COMPRESS);
btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
compress_force = false;
@@ -572,11 +577,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
*/
btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
}
- if ((btrfs_test_opt(info, COMPRESS) &&
- (info->compress_type != saved_compress_type ||
- compress_force != saved_compress_force)) ||
- (!btrfs_test_opt(info, COMPRESS) &&
- no_compress == 1)) {
+ if (no_compress == 1) {
+ btrfs_info(info, "use no compression");
+ } else if ((info->compress_type != saved_compress_type) ||
+ (compress_force != saved_compress_force) ||
+ (info->compress_level != saved_compress_level)) {
btrfs_info(info, "%s %s compression, level %d",
(compress_force) ? "force" : "use",
compress_type, info->compress_level);
@@ -996,8 +1001,8 @@ out:
return error;
}
-static char *get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
- u64 subvol_objectid)
+char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
+ u64 subvol_objectid)
{
struct btrfs_root *root = fs_info->tree_root;
struct btrfs_root *fs_root;
@@ -1278,6 +1283,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
{
struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb);
const char *compress_type;
+ const char *subvol_name;
if (btrfs_test_opt(info, DEGRADED))
seq_puts(seq, ",degraded");
@@ -1362,8 +1368,13 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
seq_puts(seq, ",ref_verify");
seq_printf(seq, ",subvolid=%llu",
BTRFS_I(d_inode(dentry))->root->root_key.objectid);
- seq_puts(seq, ",subvol=");
- seq_dentry(seq, dentry, " \t\n\\");
+ subvol_name = btrfs_get_subvol_name_from_objectid(info,
+ BTRFS_I(d_inode(dentry))->root->root_key.objectid);
+ if (!IS_ERR(subvol_name)) {
+ seq_puts(seq, ",subvol=");
+ seq_escape(seq, subvol_name, " \t\n\\");
+ kfree(subvol_name);
+ }
return 0;
}
@@ -1408,8 +1419,8 @@ static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
goto out;
}
}
- subvol_name = get_subvol_name_from_objectid(btrfs_sb(mnt->mnt_sb),
- subvol_objectid);
+ subvol_name = btrfs_get_subvol_name_from_objectid(
+ btrfs_sb(mnt->mnt_sb), subvol_objectid);
if (IS_ERR(subvol_name)) {
root = ERR_CAST(subvol_name);
subvol_name = NULL;
@@ -1834,6 +1845,14 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
btrfs_scrub_cancel(fs_info);
btrfs_pause_balance(fs_info);
+ /*
+ * Pause the qgroup rescan worker if it is running. We don't want
+ * it to be still running after we are in RO mode, as after that,
+ * by the time we unmount, it might have left a transaction open,
+ * so we would leak the transaction and/or crash.
+ */
+ btrfs_qgroup_wait_for_completion(fs_info, false);
+
ret = btrfs_commit_super(fs_info);
if (ret)
goto restore;
@@ -2314,9 +2333,7 @@ static int btrfs_unfreeze(struct super_block *sb)
static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
{
struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb);
- struct btrfs_fs_devices *cur_devices;
struct btrfs_device *dev, *first_dev = NULL;
- struct list_head *head;
/*
* Lightweight locking of the devices. We should not need
@@ -2326,18 +2343,13 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
* least until until the rcu_read_unlock.
*/
rcu_read_lock();
- cur_devices = fs_info->fs_devices;
- while (cur_devices) {
- head = &cur_devices->devices;
- list_for_each_entry_rcu(dev, head, dev_list) {
- if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
- continue;
- if (!dev->name)
- continue;
- if (!first_dev || dev->devid < first_dev->devid)
- first_dev = dev;
- }
- cur_devices = cur_devices->seed;
+ list_for_each_entry_rcu(dev, &fs_info->fs_devices->devices, dev_list) {
+ if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
+ continue;
+ if (!dev->name)
+ continue;
+ if (!first_dev || dev->devid < first_dev->devid)
+ first_dev = dev;
}
if (first_dev)
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index aefb0169d46d..afec808a763b 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -10,6 +10,7 @@
#include <linux/kobject.h>
#include <linux/bug.h>
#include <linux/debugfs.h>
+#include <linux/sched/mm.h>
#include "ctree.h"
#include "disk-io.h"
@@ -766,7 +767,9 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices,
{
int error = 0;
struct btrfs_device *dev;
+ unsigned int nofs_flag;
+ nofs_flag = memalloc_nofs_save();
list_for_each_entry(dev, &fs_devices->devices, dev_list) {
struct hd_struct *disk;
struct kobject *disk_kobj;
@@ -785,6 +788,7 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices,
if (error)
break;
}
+ memalloc_nofs_restore(nofs_flag);
return error;
}
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
index 2eec1dd3803a..82d874b10438 100644
--- a/fs/btrfs/tests/btrfs-tests.c
+++ b/fs/btrfs/tests/btrfs-tests.c
@@ -38,7 +38,13 @@ static struct file_system_type test_type = {
struct inode *btrfs_new_test_inode(void)
{
- return new_inode(test_mnt->mnt_sb);
+ struct inode *inode;
+
+ inode = new_inode(test_mnt->mnt_sb);
+ if (inode)
+ inode_init_owner(inode, NULL, S_IFREG);
+
+ return inode;
}
static int btrfs_init_test_fs(void)
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
index 64043f028820..648633aae968 100644
--- a/fs/btrfs/tests/inode-tests.c
+++ b/fs/btrfs/tests/inode-tests.c
@@ -232,6 +232,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
return ret;
}
+ inode->i_mode = S_IFREG;
BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
BTRFS_I(inode)->location.offset = 0;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 4b1491e1b803..1b52c960682d 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -572,10 +572,19 @@ again:
}
got_it:
- btrfs_record_root_in_trans(h, root);
-
if (!current->journal_info)
current->journal_info = h;
+
+ /*
+ * btrfs_record_root_in_trans() needs to alloc new extents, and may
+ * call btrfs_join_transaction() while we're also starting a
+ * transaction.
+ *
+ * Thus it need to be called after current->journal_info initialized,
+ * or we can deadlock.
+ */
+ btrfs_record_root_in_trans(h, root);
+
return h;
join_fail:
@@ -1240,7 +1249,6 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
struct btrfs_root *gang[8];
int i;
int ret;
- int err = 0;
spin_lock(&fs_info->fs_roots_radix_lock);
while (1) {
@@ -1252,6 +1260,8 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
break;
for (i = 0; i < ret; i++) {
struct btrfs_root *root = gang[i];
+ int ret2;
+
radix_tree_tag_clear(&fs_info->fs_roots_radix,
(unsigned long)root->root_key.objectid,
BTRFS_ROOT_TRANS_TAG);
@@ -1273,17 +1283,17 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
root->node);
}
- err = btrfs_update_root(trans, fs_info->tree_root,
+ ret2 = btrfs_update_root(trans, fs_info->tree_root,
&root->root_key,
&root->root_item);
+ if (ret2)
+ return ret2;
spin_lock(&fs_info->fs_roots_radix_lock);
- if (err)
- break;
btrfs_qgroup_free_meta_all_pertrans(root);
}
}
spin_unlock(&fs_info->fs_roots_radix_lock);
- return err;
+ return 0;
}
/*
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 3ec712cba58e..9023e6b46396 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -448,6 +448,320 @@ static int check_block_group_item(struct btrfs_fs_info *fs_info,
return 0;
}
+__printf(5, 6)
+__cold
+static void chunk_err(const struct btrfs_fs_info *fs_info,
+ const struct extent_buffer *leaf,
+ const struct btrfs_chunk *chunk, u64 logical,
+ const char *fmt, ...)
+{
+ bool is_sb;
+ struct va_format vaf;
+ va_list args;
+ int i;
+ int slot = -1;
+
+ /* Only superblock eb is able to have such small offset */
+ is_sb = (leaf->start == BTRFS_SUPER_INFO_OFFSET);
+
+ if (!is_sb) {
+ /*
+ * Get the slot number by iterating through all slots, this
+ * would provide better readability.
+ */
+ for (i = 0; i < btrfs_header_nritems(leaf); i++) {
+ if (btrfs_item_ptr_offset(leaf, i) ==
+ (unsigned long)chunk) {
+ slot = i;
+ break;
+ }
+ }
+ }
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (is_sb)
+ btrfs_crit(fs_info,
+ "corrupt superblock syschunk array: chunk_start=%llu, %pV",
+ logical, &vaf);
+ else
+ btrfs_crit(fs_info,
+ "corrupt leaf: root=%llu block=%llu slot=%d chunk_start=%llu, %pV",
+ BTRFS_CHUNK_TREE_OBJECTID, leaf->start, slot,
+ logical, &vaf);
+ va_end(args);
+}
+
+/*
+ * The common chunk check which could also work on super block sys chunk array.
+ *
+ * Return -EUCLEAN if anything is corrupted.
+ * Return 0 if everything is OK.
+ */
+int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *leaf,
+ struct btrfs_chunk *chunk, u64 logical)
+{
+ u64 length;
+ u64 stripe_len;
+ u16 num_stripes;
+ u16 sub_stripes;
+ u64 type;
+ u64 features;
+ bool mixed = false;
+
+ length = btrfs_chunk_length(leaf, chunk);
+ stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
+ num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
+ sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
+ type = btrfs_chunk_type(leaf, chunk);
+
+ if (!num_stripes) {
+ chunk_err(fs_info, leaf, chunk, logical,
+ "invalid chunk num_stripes, have %u", num_stripes);
+ return -EUCLEAN;
+ }
+ if (!IS_ALIGNED(logical, fs_info->sectorsize)) {
+ chunk_err(fs_info, leaf, chunk, logical,
+ "invalid chunk logical, have %llu should aligned to %u",
+ logical, fs_info->sectorsize);
+ return -EUCLEAN;
+ }
+ if (btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize) {
+ chunk_err(fs_info, leaf, chunk, logical,
+ "invalid chunk sectorsize, have %u expect %u",
+ btrfs_chunk_sector_size(leaf, chunk),
+ fs_info->sectorsize);
+ return -EUCLEAN;
+ }
+ if (!length || !IS_ALIGNED(length, fs_info->sectorsize)) {
+ chunk_err(fs_info, leaf, chunk, logical,
+ "invalid chunk length, have %llu", length);
+ return -EUCLEAN;
+ }
+ if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) {
+ chunk_err(fs_info, leaf, chunk, logical,
+ "invalid chunk stripe length: %llu",
+ stripe_len);
+ return -EUCLEAN;
+ }
+ if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
+ type) {
+ chunk_err(fs_info, leaf, chunk, logical,
+ "unrecognized chunk type: 0x%llx",
+ ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
+ BTRFS_BLOCK_GROUP_PROFILE_MASK) &
+ btrfs_chunk_type(leaf, chunk));
+ return -EUCLEAN;
+ }
+
+ if (!is_power_of_2(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
+ (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) != 0) {
+ chunk_err(fs_info, leaf, chunk, logical,
+ "invalid chunk profile flag: 0x%llx, expect 0 or 1 bit set",
+ type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
+ return -EUCLEAN;
+ }
+ if ((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0) {
+ chunk_err(fs_info, leaf, chunk, logical,
+ "missing chunk type flag, have 0x%llx one bit must be set in 0x%llx",
+ type, BTRFS_BLOCK_GROUP_TYPE_MASK);
+ return -EUCLEAN;
+ }
+
+ if ((type & BTRFS_BLOCK_GROUP_SYSTEM) &&
+ (type & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA))) {
+ chunk_err(fs_info, leaf, chunk, logical,
+ "system chunk with data or metadata type: 0x%llx",
+ type);
+ return -EUCLEAN;
+ }
+
+ features = btrfs_super_incompat_flags(fs_info->super_copy);
+ if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
+ mixed = true;
+
+ if (!mixed) {
+ if ((type & BTRFS_BLOCK_GROUP_METADATA) &&
+ (type & BTRFS_BLOCK_GROUP_DATA)) {
+ chunk_err(fs_info, leaf, chunk, logical,
+ "mixed chunk type in non-mixed mode: 0x%llx", type);
+ return -EUCLEAN;
+ }
+ }
+
+ if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
+ (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes != 2) ||
+ (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
+ (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
+ (type & BTRFS_BLOCK_GROUP_DUP && num_stripes != 2) ||
+ ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 && num_stripes != 1)) {
+ chunk_err(fs_info, leaf, chunk, logical,
+ "invalid num_stripes:sub_stripes %u:%u for profile %llu",
+ num_stripes, sub_stripes,
+ type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
+ return -EUCLEAN;
+ }
+
+ return 0;
+}
+
+__printf(4, 5)
+__cold
+static void dev_item_err(const struct btrfs_fs_info *fs_info,
+ const struct extent_buffer *eb, int slot,
+ const char *fmt, ...)
+{
+ struct btrfs_key key;
+ struct va_format vaf;
+ va_list args;
+
+ btrfs_item_key_to_cpu(eb, &key, slot);
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ btrfs_crit(fs_info,
+ "corrupt %s: root=%llu block=%llu slot=%d devid=%llu %pV",
+ btrfs_header_level(eb) == 0 ? "leaf" : "node",
+ btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
+ key.objectid, &vaf);
+ va_end(args);
+}
+
+static int check_dev_item(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *leaf,
+ struct btrfs_key *key, int slot)
+{
+ struct btrfs_dev_item *ditem;
+
+ if (key->objectid != BTRFS_DEV_ITEMS_OBJECTID) {
+ dev_item_err(fs_info, leaf, slot,
+ "invalid objectid: has=%llu expect=%llu",
+ key->objectid, BTRFS_DEV_ITEMS_OBJECTID);
+ return -EUCLEAN;
+ }
+ ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item);
+ if (btrfs_device_id(leaf, ditem) != key->offset) {
+ dev_item_err(fs_info, leaf, slot,
+ "devid mismatch: key has=%llu item has=%llu",
+ key->offset, btrfs_device_id(leaf, ditem));
+ return -EUCLEAN;
+ }
+
+ /*
+ * For device total_bytes, we don't have reliable way to check it, as
+ * it can be 0 for device removal. Device size check can only be done
+ * by dev extents check.
+ */
+ if (btrfs_device_bytes_used(leaf, ditem) >
+ btrfs_device_total_bytes(leaf, ditem)) {
+ dev_item_err(fs_info, leaf, slot,
+ "invalid bytes used: have %llu expect [0, %llu]",
+ btrfs_device_bytes_used(leaf, ditem),
+ btrfs_device_total_bytes(leaf, ditem));
+ return -EUCLEAN;
+ }
+ /*
+ * Remaining members like io_align/type/gen/dev_group aren't really
+ * utilized. Skip them to make later usage of them easier.
+ */
+ return 0;
+}
+
+/* Inode item error output has the same format as dir_item_err() */
+#define inode_item_err(fs_info, eb, slot, fmt, ...) \
+ dir_item_err(fs_info, eb, slot, fmt, __VA_ARGS__)
+
+static int check_inode_item(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *leaf,
+ struct btrfs_key *key, int slot)
+{
+ struct btrfs_inode_item *iitem;
+ u64 super_gen = btrfs_super_generation(fs_info->super_copy);
+ u32 valid_mask = (S_IFMT | S_ISUID | S_ISGID | S_ISVTX | 0777);
+ u32 mode;
+
+ if ((key->objectid < BTRFS_FIRST_FREE_OBJECTID ||
+ key->objectid > BTRFS_LAST_FREE_OBJECTID) &&
+ key->objectid != BTRFS_ROOT_TREE_DIR_OBJECTID &&
+ key->objectid != BTRFS_FREE_INO_OBJECTID) {
+ generic_err(fs_info, leaf, slot,
+ "invalid key objectid: has %llu expect %llu or [%llu, %llu] or %llu",
+ key->objectid, BTRFS_ROOT_TREE_DIR_OBJECTID,
+ BTRFS_FIRST_FREE_OBJECTID,
+ BTRFS_LAST_FREE_OBJECTID,
+ BTRFS_FREE_INO_OBJECTID);
+ return -EUCLEAN;
+ }
+ if (key->offset != 0) {
+ inode_item_err(fs_info, leaf, slot,
+ "invalid key offset: has %llu expect 0",
+ key->offset);
+ return -EUCLEAN;
+ }
+ iitem = btrfs_item_ptr(leaf, slot, struct btrfs_inode_item);
+
+ /* Here we use super block generation + 1 to handle log tree */
+ if (btrfs_inode_generation(leaf, iitem) > super_gen + 1) {
+ inode_item_err(fs_info, leaf, slot,
+ "invalid inode generation: has %llu expect (0, %llu]",
+ btrfs_inode_generation(leaf, iitem),
+ super_gen + 1);
+ return -EUCLEAN;
+ }
+ /* Note for ROOT_TREE_DIR_ITEM, mkfs could set its transid 0 */
+ if (btrfs_inode_transid(leaf, iitem) > super_gen + 1) {
+ inode_item_err(fs_info, leaf, slot,
+ "invalid inode transid: has %llu expect [0, %llu]",
+ btrfs_inode_transid(leaf, iitem), super_gen + 1);
+ return -EUCLEAN;
+ }
+
+ /*
+ * For size and nbytes it's better not to be too strict, as for dir
+ * item its size/nbytes can easily get wrong, but doesn't affect
+ * anything in the fs. So here we skip the check.
+ */
+ mode = btrfs_inode_mode(leaf, iitem);
+ if (mode & ~valid_mask) {
+ inode_item_err(fs_info, leaf, slot,
+ "unknown mode bit detected: 0x%x",
+ mode & ~valid_mask);
+ return -EUCLEAN;
+ }
+
+ /*
+ * S_IFMT is not bit mapped so we can't completely rely on is_power_of_2,
+ * but is_power_of_2() can save us from checking FIFO/CHR/DIR/REG.
+ * Only needs to check BLK, LNK and SOCKS
+ */
+ if (!is_power_of_2(mode & S_IFMT)) {
+ if (!S_ISLNK(mode) && !S_ISBLK(mode) && !S_ISSOCK(mode)) {
+ inode_item_err(fs_info, leaf, slot,
+ "invalid mode: has 0%o expect valid S_IF* bit(s)",
+ mode & S_IFMT);
+ return -EUCLEAN;
+ }
+ }
+ if (S_ISDIR(mode) && btrfs_inode_nlink(leaf, iitem) > 1) {
+ inode_item_err(fs_info, leaf, slot,
+ "invalid nlink: has %u expect no more than 1 for dir",
+ btrfs_inode_nlink(leaf, iitem));
+ return -EUCLEAN;
+ }
+ if (btrfs_inode_flags(leaf, iitem) & ~BTRFS_INODE_FLAG_MASK) {
+ inode_item_err(fs_info, leaf, slot,
+ "unknown flags detected: 0x%llx",
+ btrfs_inode_flags(leaf, iitem) &
+ ~BTRFS_INODE_FLAG_MASK);
+ return -EUCLEAN;
+ }
+ return 0;
+}
+
/*
* Common point to switch the item-specific validation.
*/
@@ -456,6 +770,7 @@ static int check_leaf_item(struct btrfs_fs_info *fs_info,
struct btrfs_key *key, int slot)
{
int ret = 0;
+ struct btrfs_chunk *chunk;
switch (key->type) {
case BTRFS_EXTENT_DATA_KEY:
@@ -472,6 +787,17 @@ static int check_leaf_item(struct btrfs_fs_info *fs_info,
case BTRFS_BLOCK_GROUP_ITEM_KEY:
ret = check_block_group_item(fs_info, leaf, key, slot);
break;
+ case BTRFS_CHUNK_ITEM_KEY:
+ chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
+ ret = btrfs_check_chunk_valid(fs_info, leaf, chunk,
+ key->offset);
+ break;
+ case BTRFS_DEV_ITEM_KEY:
+ ret = check_dev_item(fs_info, leaf, key, slot);
+ break;
+ case BTRFS_INODE_ITEM_KEY:
+ ret = check_inode_item(fs_info, leaf, key, slot);
+ break;
}
return ret;
}
@@ -485,6 +811,13 @@ static int check_leaf(struct btrfs_fs_info *fs_info, struct extent_buffer *leaf,
u32 nritems = btrfs_header_nritems(leaf);
int slot;
+ if (btrfs_header_level(leaf) != 0) {
+ generic_err(fs_info, leaf, 0,
+ "invalid level for leaf, have %d expect 0",
+ btrfs_header_level(leaf));
+ return -EUCLEAN;
+ }
+
/*
* Extent buffers from a relocation tree have a owner field that
* corresponds to the subvolume tree they are based on. So just from an
@@ -509,6 +842,12 @@ static int check_leaf(struct btrfs_fs_info *fs_info, struct extent_buffer *leaf,
owner);
return -EUCLEAN;
}
+ /* Unknown tree */
+ if (owner == 0) {
+ generic_err(fs_info, leaf, 0,
+ "invalid owner, root 0 is not defined");
+ return -EUCLEAN;
+ }
key.objectid = owner;
key.type = BTRFS_ROOT_ITEM_KEY;
key.offset = (u64)-1;
@@ -643,9 +982,16 @@ int btrfs_check_node(struct btrfs_fs_info *fs_info, struct extent_buffer *node)
unsigned long nr = btrfs_header_nritems(node);
struct btrfs_key key, next_key;
int slot;
+ int level = btrfs_header_level(node);
u64 bytenr;
int ret = 0;
+ if (level <= 0 || level >= BTRFS_MAX_LEVEL) {
+ generic_err(fs_info, node, 0,
+ "invalid level for node, have %d expect [1, %d]",
+ level, BTRFS_MAX_LEVEL - 1);
+ return -EUCLEAN;
+ }
if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(fs_info)) {
btrfs_crit(fs_info,
"corrupt node: root=%llu block=%llu, nritems too %s, have %lu expect range [1,%u]",
diff --git a/fs/btrfs/tree-checker.h b/fs/btrfs/tree-checker.h
index ff043275b784..4df45e8a6659 100644
--- a/fs/btrfs/tree-checker.h
+++ b/fs/btrfs/tree-checker.h
@@ -25,4 +25,8 @@ int btrfs_check_leaf_relaxed(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf);
int btrfs_check_node(struct btrfs_fs_info *fs_info, struct extent_buffer *node);
+int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *leaf,
+ struct btrfs_chunk *chunk, u64 logical);
+
#endif
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index d4c86c6cbe39..1cd610ddbb24 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -1770,8 +1770,6 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
ret = btrfs_update_inode(trans, root, inode);
} else if (ret == -EEXIST) {
ret = 0;
- } else {
- BUG(); /* Logic Error */
}
iput(inode);
@@ -3422,11 +3420,13 @@ fail:
btrfs_free_path(path);
out_unlock:
mutex_unlock(&dir->log_mutex);
- if (ret == -ENOSPC) {
+ if (err == -ENOSPC) {
btrfs_set_log_full_commit(root->fs_info, trans);
- ret = 0;
- } else if (ret < 0)
- btrfs_abort_transaction(trans, ret);
+ err = 0;
+ } else if (err < 0 && err != -ENOENT) {
+ /* ENOENT can be returned if the entry hasn't been fsynced yet */
+ btrfs_abort_transaction(trans, err);
+ }
btrfs_end_log_trans(root);
@@ -3587,6 +3587,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
* search and this search we'll not find the key again and can just
* bail.
*/
+search:
ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
if (ret != 0)
goto done;
@@ -3606,6 +3607,13 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
if (min_key.objectid != ino || min_key.type != key_type)
goto done;
+
+ if (need_resched()) {
+ btrfs_release_path(path);
+ cond_resched();
+ goto search;
+ }
+
ret = overwrite_item(trans, log, dst_path, src, i,
&min_key);
if (ret) {
@@ -3988,11 +3996,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
fs_info->csum_root,
ds + cs, ds + cs + cl - 1,
&ordered_sums, 0);
- if (ret) {
- btrfs_release_path(dst_path);
- kfree(ins_data);
- return ret;
- }
+ if (ret)
+ break;
}
}
}
@@ -4005,7 +4010,6 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
* we have to do this after the loop above to avoid changing the
* log tree while trying to change the log tree.
*/
- ret = 0;
while (!list_empty(&ordered_sums)) {
struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
struct btrfs_ordered_sum,
@@ -4182,6 +4186,9 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
const u64 ino = btrfs_ino(inode);
struct btrfs_path *dst_path = NULL;
bool dropped_extents = false;
+ u64 truncate_offset = i_size;
+ struct extent_buffer *leaf;
+ int slot;
int ins_nr = 0;
int start_slot;
int ret;
@@ -4196,9 +4203,43 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
if (ret < 0)
goto out;
+ /*
+ * We must check if there is a prealloc extent that starts before the
+ * i_size and crosses the i_size boundary. This is to ensure later we
+ * truncate down to the end of that extent and not to the i_size, as
+ * otherwise we end up losing part of the prealloc extent after a log
+ * replay and with an implicit hole if there is another prealloc extent
+ * that starts at an offset beyond i_size.
+ */
+ ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY);
+ if (ret < 0)
+ goto out;
+
+ if (ret == 0) {
+ struct btrfs_file_extent_item *ei;
+
+ leaf = path->nodes[0];
+ slot = path->slots[0];
+ ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
+
+ if (btrfs_file_extent_type(leaf, ei) ==
+ BTRFS_FILE_EXTENT_PREALLOC) {
+ u64 extent_end;
+
+ btrfs_item_key_to_cpu(leaf, &key, slot);
+ extent_end = key.offset +
+ btrfs_file_extent_num_bytes(leaf, ei);
+
+ if (extent_end > i_size)
+ truncate_offset = extent_end;
+ }
+ } else {
+ ret = 0;
+ }
+
while (true) {
- struct extent_buffer *leaf = path->nodes[0];
- int slot = path->slots[0];
+ leaf = path->nodes[0];
+ slot = path->slots[0];
if (slot >= btrfs_header_nritems(leaf)) {
if (ins_nr > 0) {
@@ -4236,7 +4277,7 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
ret = btrfs_truncate_inode_items(trans,
root->log_root,
&inode->vfs_inode,
- i_size,
+ truncate_offset,
BTRFS_EXTENT_DATA_KEY);
} while (ret == -EAGAIN);
if (ret)
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 9c3b394b99fa..662711200eeb 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -4,6 +4,7 @@
*/
#include <linux/sched.h>
+#include <linux/sched/mm.h>
#include <linux/bio.h>
#include <linux/slab.h>
#include <linux/buffer_head.h>
@@ -27,6 +28,7 @@
#include "math.h"
#include "dev-replace.h"
#include "sysfs.h"
+#include "tree-checker.h"
const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
[BTRFS_RAID_RAID10] = {
@@ -155,7 +157,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
*
* global::fs_devs - add, remove, updates to the global list
*
- * does not protect: manipulation of the fs_devices::devices list!
+ * does not protect: manipulation of the fs_devices::devices list in general
+ * but in mount context it could be used to exclude list modifications by eg.
+ * scan ioctl
*
* btrfs_device::name - renames (write side), read is RCU
*
@@ -168,6 +172,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
* may be used to exclude some operations from running concurrently without any
* modifications to the list (see write_all_supers)
*
+ * Is not required at mount and close times, because our device list is
+ * protected by the uuid_mutex at that point.
+ *
* balance_mutex
* -------------
* protects balance structures (status, state) and context accessed from
@@ -347,27 +354,6 @@ static struct btrfs_device *__alloc_device(void)
return dev;
}
-/*
- * Find a device specified by @devid or @uuid in the list of @fs_devices, or
- * return NULL.
- *
- * If devid and uuid are both specified, the match must be exact, otherwise
- * only devid is used.
- */
-static struct btrfs_device *find_device(struct btrfs_fs_devices *fs_devices,
- u64 devid, const u8 *uuid)
-{
- struct btrfs_device *dev;
-
- list_for_each_entry(dev, &fs_devices->devices, dev_list) {
- if (dev->devid == devid &&
- (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
- return dev;
- }
- }
- return NULL;
-}
-
static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
{
struct btrfs_fs_devices *fs_devices;
@@ -677,6 +663,11 @@ static void btrfs_free_stale_devices(const char *path,
}
}
+/*
+ * This is only used on mount, and we are protected from competing things
+ * messing with our fs_devices by the uuid_mutex, thus we do not need the
+ * fs_devices->device_list_mutex here.
+ */
static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
struct btrfs_device *device, fmode_t flags,
void *holder)
@@ -772,8 +763,8 @@ static noinline struct btrfs_device *device_list_add(const char *path,
device = NULL;
} else {
mutex_lock(&fs_devices->device_list_mutex);
- device = find_device(fs_devices, devid,
- disk_super->dev_item.uuid);
+ device = btrfs_find_device(fs_devices, devid,
+ disk_super->dev_item.uuid, NULL, false);
}
if (!device) {
@@ -866,17 +857,25 @@ static noinline struct btrfs_device *device_list_add(const char *path,
if (device->bdev != path_bdev) {
bdput(path_bdev);
mutex_unlock(&fs_devices->device_list_mutex);
- btrfs_warn_in_rcu(device->fs_info,
- "duplicate device fsid:devid for %pU:%llu old:%s new:%s",
- disk_super->fsid, devid,
- rcu_str_deref(device->name), path);
+ /*
+ * device->fs_info may not be reliable here, so
+ * pass in a NULL instead. This avoids a
+ * possible use-after-free when the fs_info and
+ * fs_info->sb are already torn down.
+ */
+ btrfs_warn_in_rcu(NULL,
+ "duplicate device %s devid %llu generation %llu scanned by %s (%d)",
+ path, devid, found_transid,
+ current->comm,
+ task_pid_nr(current));
return ERR_PTR(-EEXIST);
}
bdput(path_bdev);
btrfs_info_in_rcu(device->fs_info,
- "device fsid %pU devid %llu moved old:%s new:%s",
- disk_super->fsid, devid,
- rcu_str_deref(device->name), path);
+ "devid %llu device path %s changed to %s scanned by %s (%d)",
+ devid, rcu_str_deref(device->name),
+ path, current->comm,
+ task_pid_nr(current));
}
name = rcu_string_strdup(path, GFP_NOFS);
@@ -972,6 +971,8 @@ again:
&device->dev_state)) {
if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
&device->dev_state) &&
+ !test_bit(BTRFS_DEV_STATE_MISSING,
+ &device->dev_state) &&
(!latest_dev ||
device->generation > latest_dev->generation)) {
latest_dev = device;
@@ -979,22 +980,13 @@ again:
continue;
}
- if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
- /*
- * In the first step, keep the device which has
- * the correct fsid and the devid that is used
- * for the dev_replace procedure.
- * In the second step, the dev_replace state is
- * read from the device tree and it is known
- * whether the procedure is really active or
- * not, which means whether this device is
- * used or whether it should be removed.
- */
- if (step == 0 || test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
- &device->dev_state)) {
- continue;
- }
- }
+ /*
+ * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID,
+ * in btrfs_init_dev_replace() so just continue.
+ */
+ if (device->devid == BTRFS_DEV_REPLACE_DEVID)
+ continue;
+
if (device->bdev) {
blkdev_put(device->bdev, device->mode);
device->bdev = NULL;
@@ -1003,9 +995,6 @@ again:
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
list_del_init(&device->dev_alloc_list);
clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
- if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
- &device->dev_state))
- fs_devices->rw_devices--;
}
list_del_init(&device->dev_list);
fs_devices->num_devices--;
@@ -1172,8 +1161,14 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
int ret;
lockdep_assert_held(&uuid_mutex);
+ /*
+ * The device_list_mutex cannot be taken here in case opening the
+ * underlying device takes further locks like bd_mutex.
+ *
+ * We also don't need the lock here as this is called during mount and
+ * exclusion is provided by uuid_mutex
+ */
- mutex_lock(&fs_devices->device_list_mutex);
if (fs_devices->opened) {
fs_devices->opened++;
ret = 0;
@@ -1181,7 +1176,6 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
list_sort(NULL, &fs_devices->devices, devid_cmp);
ret = open_fs_devices(fs_devices, flags, holder);
}
- mutex_unlock(&fs_devices->device_list_mutex);
return ret;
}
@@ -2144,7 +2138,8 @@ static int btrfs_find_device_by_path(struct btrfs_fs_info *fs_info,
disk_super = (struct btrfs_super_block *)bh->b_data;
devid = btrfs_stack_device_id(&disk_super->dev_item);
dev_uuid = disk_super->dev_item.uuid;
- *device = btrfs_find_device(fs_info, devid, dev_uuid, disk_super->fsid);
+ *device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
+ disk_super->fsid, true);
brelse(bh);
if (!*device)
ret = -ENOENT;
@@ -2190,7 +2185,8 @@ int btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, u64 devid,
if (devid) {
ret = 0;
- *device = btrfs_find_device(fs_info, devid, NULL, NULL);
+ *device = btrfs_find_device(fs_info->fs_devices, devid,
+ NULL, NULL, true);
if (!*device)
ret = -ENOENT;
} else {
@@ -2322,7 +2318,8 @@ next_slot:
BTRFS_UUID_SIZE);
read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
BTRFS_FSID_SIZE);
- device = btrfs_find_device(fs_info, devid, dev_uuid, fs_uuid);
+ device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
+ fs_uuid, true);
BUG_ON(!device); /* Logic error */
if (device->fs_devices->seeding) {
@@ -2456,9 +2453,6 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
btrfs_set_super_num_devices(fs_info->super_copy,
orig_super_num_devices + 1);
- /* add sysfs device entry */
- btrfs_sysfs_add_device_link(fs_devices, device);
-
/*
* we've got more storage, clear any full flags on the space
* infos
@@ -2466,6 +2460,10 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
btrfs_clear_space_info_full(fs_info);
mutex_unlock(&fs_info->chunk_mutex);
+
+ /* Add sysfs device entry */
+ btrfs_sysfs_add_device_link(fs_devices, device);
+
mutex_unlock(&fs_devices->device_list_mutex);
if (seeding_dev) {
@@ -4013,6 +4011,8 @@ int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
btrfs_warn(fs_info,
"balance: cannot set exclusive op status, resume manually");
+ btrfs_release_path(path);
+
mutex_lock(&fs_info->balance_mutex);
BUG_ON(fs_info->balance_ctl);
spin_lock(&fs_info->balance_lock);
@@ -4173,6 +4173,7 @@ static int btrfs_uuid_scan_kthread(void *data)
goto skip;
}
update_tree:
+ btrfs_release_path(path);
if (!btrfs_is_empty_uuid(root_item.uuid)) {
ret = btrfs_uuid_tree_add(trans, root_item.uuid,
BTRFS_UUID_KEY_SUBVOL,
@@ -4197,6 +4198,7 @@ update_tree:
}
skip:
+ btrfs_release_path(path);
if (trans) {
ret = btrfs_end_transaction(trans);
trans = NULL;
@@ -4204,7 +4206,6 @@ skip:
break;
}
- btrfs_release_path(path);
if (key.offset < (u64)-1) {
key.offset++;
} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
@@ -4602,15 +4603,6 @@ static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
btrfs_set_fs_incompat(info, RAID56);
}
-#define BTRFS_MAX_DEVS(info) ((BTRFS_MAX_ITEM_SIZE(info) \
- - sizeof(struct btrfs_chunk)) \
- / sizeof(struct btrfs_stripe) + 1)
-
-#define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \
- - 2 * sizeof(struct btrfs_disk_key) \
- - 2 * sizeof(struct btrfs_chunk)) \
- / sizeof(struct btrfs_stripe) + 1)
-
static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
u64 start, u64 type)
{
@@ -6254,21 +6246,36 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
return BLK_STS_OK;
}
-struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
- u8 *uuid, u8 *fsid)
+/*
+ * Find a device specified by @devid or @uuid in the list of @fs_devices, or
+ * return NULL.
+ *
+ * If devid and uuid are both specified, the match must be exact, otherwise
+ * only devid is used.
+ *
+ * If @seed is true, traverse through the seed devices.
+ */
+struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
+ u64 devid, u8 *uuid, u8 *fsid,
+ bool seed)
{
struct btrfs_device *device;
- struct btrfs_fs_devices *cur_devices;
- cur_devices = fs_info->fs_devices;
- while (cur_devices) {
+ while (fs_devices) {
if (!fsid ||
- !memcmp(cur_devices->fsid, fsid, BTRFS_FSID_SIZE)) {
- device = find_device(cur_devices, devid, uuid);
- if (device)
- return device;
+ !memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) {
+ list_for_each_entry(device, &fs_devices->devices,
+ dev_list) {
+ if (device->devid == devid &&
+ (!uuid || memcmp(device->uuid, uuid,
+ BTRFS_UUID_SIZE) == 0))
+ return device;
+ }
}
- cur_devices = cur_devices->seed;
+ if (seed)
+ fs_devices = fs_devices->seed;
+ else
+ return NULL;
}
return NULL;
}
@@ -6277,8 +6284,17 @@ static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
u64 devid, u8 *dev_uuid)
{
struct btrfs_device *device;
+ unsigned int nofs_flag;
+ /*
+ * We call this under the chunk_mutex, so we want to use NOFS for this
+ * allocation, however we don't want to change btrfs_alloc_device() to
+ * always do NOFS because we use it in a lot of other GFP_KERNEL safe
+ * places.
+ */
+ nofs_flag = memalloc_nofs_save();
device = btrfs_alloc_device(NULL, &devid, dev_uuid);
+ memalloc_nofs_restore(nofs_flag);
if (IS_ERR(device))
return device;
@@ -6343,99 +6359,6 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
return dev;
}
-/* Return -EIO if any error, otherwise return 0. */
-static int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
- struct extent_buffer *leaf,
- struct btrfs_chunk *chunk, u64 logical)
-{
- u64 length;
- u64 stripe_len;
- u16 num_stripes;
- u16 sub_stripes;
- u64 type;
- u64 features;
- bool mixed = false;
-
- length = btrfs_chunk_length(leaf, chunk);
- stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
- num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
- sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
- type = btrfs_chunk_type(leaf, chunk);
-
- if (!num_stripes) {
- btrfs_err(fs_info, "invalid chunk num_stripes: %u",
- num_stripes);
- return -EIO;
- }
- if (!IS_ALIGNED(logical, fs_info->sectorsize)) {
- btrfs_err(fs_info, "invalid chunk logical %llu", logical);
- return -EIO;
- }
- if (btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize) {
- btrfs_err(fs_info, "invalid chunk sectorsize %u",
- btrfs_chunk_sector_size(leaf, chunk));
- return -EIO;
- }
- if (!length || !IS_ALIGNED(length, fs_info->sectorsize)) {
- btrfs_err(fs_info, "invalid chunk length %llu", length);
- return -EIO;
- }
- if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) {
- btrfs_err(fs_info, "invalid chunk stripe length: %llu",
- stripe_len);
- return -EIO;
- }
- if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
- type) {
- btrfs_err(fs_info, "unrecognized chunk type: %llu",
- ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
- BTRFS_BLOCK_GROUP_PROFILE_MASK) &
- btrfs_chunk_type(leaf, chunk));
- return -EIO;
- }
-
- if ((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0) {
- btrfs_err(fs_info, "missing chunk type flag: 0x%llx", type);
- return -EIO;
- }
-
- if ((type & BTRFS_BLOCK_GROUP_SYSTEM) &&
- (type & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA))) {
- btrfs_err(fs_info,
- "system chunk with data or metadata type: 0x%llx", type);
- return -EIO;
- }
-
- features = btrfs_super_incompat_flags(fs_info->super_copy);
- if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
- mixed = true;
-
- if (!mixed) {
- if ((type & BTRFS_BLOCK_GROUP_METADATA) &&
- (type & BTRFS_BLOCK_GROUP_DATA)) {
- btrfs_err(fs_info,
- "mixed chunk type in non-mixed mode: 0x%llx", type);
- return -EIO;
- }
- }
-
- if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
- (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes != 2) ||
- (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
- (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
- (type & BTRFS_BLOCK_GROUP_DUP && num_stripes != 2) ||
- ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
- num_stripes != 1)) {
- btrfs_err(fs_info,
- "invalid num_stripes:sub_stripes %u:%u for profile %llu",
- num_stripes, sub_stripes,
- type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
- return -EIO;
- }
-
- return 0;
-}
-
static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
u64 devid, u8 *uuid, bool error)
{
@@ -6466,9 +6389,15 @@ static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
length = btrfs_chunk_length(leaf, chunk);
num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
- ret = btrfs_check_chunk_valid(fs_info, leaf, chunk, logical);
- if (ret)
- return ret;
+ /*
+ * Only need to verify chunk item if we're reading from sys chunk array,
+ * as chunk item in tree block is already verified by tree-checker.
+ */
+ if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
+ ret = btrfs_check_chunk_valid(fs_info, leaf, chunk, logical);
+ if (ret)
+ return ret;
+ }
read_lock(&map_tree->map_tree.lock);
em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
@@ -6513,8 +6442,8 @@ static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
read_extent_buffer(leaf, uuid, (unsigned long)
btrfs_stripe_dev_uuid_nr(chunk, i),
BTRFS_UUID_SIZE);
- map->stripes[i].dev = btrfs_find_device(fs_info, devid,
- uuid, NULL);
+ map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices,
+ devid, uuid, NULL, true);
if (!map->stripes[i].dev &&
!btrfs_test_opt(fs_info, DEGRADED)) {
free_extent_map(em);
@@ -6653,7 +6582,8 @@ static int read_one_dev(struct btrfs_fs_info *fs_info,
return PTR_ERR(fs_devices);
}
- device = btrfs_find_device(fs_info, devid, dev_uuid, fs_uuid);
+ device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
+ fs_uuid, true);
if (!device) {
if (!btrfs_test_opt(fs_info, DEGRADED)) {
btrfs_report_missing_device(fs_info, devid,
@@ -6935,6 +6865,14 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
mutex_lock(&fs_info->chunk_mutex);
/*
+ * It is possible for mount and umount to race in such a way that
+ * we execute this code path, but open_fs_devices failed to clear
+ * total_rw_bytes. We certainly want it cleared before reading the
+ * device items, so clear it here.
+ */
+ fs_info->fs_devices->total_rw_bytes = 0;
+
+ /*
* Read all device items, and then all the chunk items. All
* device items are found before any chunk item (their object id
* is smaller than the lowest possible object id for a chunk
@@ -7243,7 +7181,8 @@ int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
int i;
mutex_lock(&fs_devices->device_list_mutex);
- dev = btrfs_find_device(fs_info, stats->devid, NULL, NULL);
+ dev = btrfs_find_device(fs_info->fs_devices, stats->devid,
+ NULL, NULL, true);
mutex_unlock(&fs_devices->device_list_mutex);
if (!dev) {
@@ -7460,7 +7399,7 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
}
/* Make sure no dev extent is beyond device bondary */
- dev = btrfs_find_device(fs_info, devid, NULL, NULL);
+ dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
if (!dev) {
btrfs_err(fs_info, "failed to find devid %llu", devid);
ret = -EUCLEAN;
@@ -7469,7 +7408,8 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
/* It's possible this device is a dummy for seed device */
if (dev->disk_total_bytes == 0) {
- dev = find_device(fs_info->fs_devices->seed, devid, NULL);
+ dev = btrfs_find_device(fs_info->fs_devices->seed, devid,
+ NULL, NULL, false);
if (!dev) {
btrfs_err(fs_info, "failed to find seed devid %llu",
devid);
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index ac703b15d679..65cd023b097c 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -257,6 +257,15 @@ struct btrfs_fs_devices {
#define BTRFS_BIO_INLINE_CSUM_SIZE 64
+#define BTRFS_MAX_DEVS(info) ((BTRFS_MAX_ITEM_SIZE(info) \
+ - sizeof(struct btrfs_chunk)) \
+ / sizeof(struct btrfs_stripe) + 1)
+
+#define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \
+ - 2 * sizeof(struct btrfs_disk_key) \
+ - 2 * sizeof(struct btrfs_chunk)) \
+ / sizeof(struct btrfs_stripe) + 1)
+
/*
* we need the mirror number and stripe index to be passed around
* the call chain while we are processing end_io (especially errors).
@@ -430,8 +439,8 @@ void __exit btrfs_cleanup_fs_uuids(void);
int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len);
int btrfs_grow_device(struct btrfs_trans_handle *trans,
struct btrfs_device *device, u64 new_size);
-struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
- u8 *uuid, u8 *fsid);
+struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
+ u64 devid, u8 *uuid, u8 *fsid, bool seed);
int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *path);
int btrfs_balance(struct btrfs_fs_info *fs_info,
diff --git a/fs/buffer.c b/fs/buffer.c
index a550e0d8e965..356e289d19f2 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1336,6 +1336,17 @@ void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
}
EXPORT_SYMBOL(__breadahead);
+void __breadahead_gfp(struct block_device *bdev, sector_t block, unsigned size,
+ gfp_t gfp)
+{
+ struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
+ if (likely(bh)) {
+ ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh);
+ brelse(bh);
+ }
+}
+EXPORT_SYMBOL(__breadahead_gfp);
+
/**
* __bread_gfp() - reads a specified block and returns the bh
* @bdev: the block_device to read from
@@ -2731,16 +2742,6 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
/* Is the page fully outside i_size? (truncate in progress) */
offset = i_size & (PAGE_SIZE-1);
if (page->index >= end_index+1 || !offset) {
- /*
- * The page may have dirty, unmapped buffers. For example,
- * they may have been added in ext3_writepage(). Make them
- * freeable here, so the page does not leak.
- */
-#if 0
- /* Not really sure about this - do we need this ? */
- if (page->mapping->a_ops->invalidatepage)
- page->mapping->a_ops->invalidatepage(page, offset);
-#endif
unlock_page(page);
return 0; /* don't care */
}
@@ -2935,12 +2936,6 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
/* Is the page fully outside i_size? (truncate in progress) */
offset = i_size & (PAGE_SIZE-1);
if (page->index >= end_index+1 || !offset) {
- /*
- * The page may have dirty, unmapped buffers. For example,
- * they may have been added in ext3_writepage(). Make them
- * freeable here, so the page does not leak.
- */
- do_invalidatepage(page, 0, PAGE_SIZE);
unlock_page(page);
return 0; /* don't care */
}
@@ -3182,6 +3177,15 @@ int __sync_dirty_buffer(struct buffer_head *bh, int op_flags)
WARN_ON(atomic_read(&bh->b_count) < 1);
lock_buffer(bh);
if (test_clear_buffer_dirty(bh)) {
+ /*
+ * The bh should be mapped, but it might not be if the
+ * device was hot-removed. Not much we can do but fail the I/O.
+ */
+ if (!buffer_mapped(bh)) {
+ unlock_buffer(bh);
+ return -EIO;
+ }
+
get_bh(bh);
bh->b_end_io = end_buffer_write_sync;
ret = submit_bh(REQ_OP_WRITE, op_flags, bh);
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index 8a577409d030..f5bf10729a87 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -64,9 +64,9 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
object = container_of(op->op.object, struct cachefiles_object, fscache);
spin_lock(&object->work_lock);
list_add_tail(&monitor->op_link, &op->to_do);
+ fscache_enqueue_retrieval(op);
spin_unlock(&object->work_lock);
- fscache_enqueue_retrieval(op);
fscache_put_retrieval(op);
return 0;
}
@@ -125,7 +125,7 @@ static int cachefiles_read_reissue(struct cachefiles_object *object,
_debug("reissue read");
ret = bmapping->a_ops->readpage(NULL, backpage);
if (ret < 0)
- goto unlock_discard;
+ goto discard;
}
/* but the page may have been read before the monitor was installed, so
@@ -142,6 +142,7 @@ static int cachefiles_read_reissue(struct cachefiles_object *object,
unlock_discard:
unlock_page(backpage);
+discard:
spin_lock_irq(&object->work_lock);
list_del(&monitor->op_link);
spin_unlock_irq(&object->work_lock);
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 476728bdae8c..e59b2f53a81f 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -1437,7 +1437,7 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_file_info *fi = vma->vm_file->private_data;
struct page *pinned_page = NULL;
- loff_t off = vmf->pgoff << PAGE_SHIFT;
+ loff_t off = (loff_t)vmf->pgoff << PAGE_SHIFT;
int want, got, err;
sigset_t oldset;
vm_fault_t ret = VM_FAULT_SIGBUS;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 4c0b220e20ba..918781c51f0b 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1047,12 +1047,19 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
{
struct ceph_mds_session *session = cap->session;
struct ceph_inode_info *ci = cap->ci;
- struct ceph_mds_client *mdsc =
- ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
+ struct ceph_mds_client *mdsc;
int removed = 0;
+ /* 'ci' being NULL means the remove have already occurred */
+ if (!ci) {
+ dout("%s: cap inode is NULL\n", __func__);
+ return;
+ }
+
dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
+ mdsc = ceph_inode_to_client(&ci->vfs_inode)->mdsc;
+
/* remove from inode's cap rbtree, and clear auth cap */
rb_erase(&cap->ci_node, &ci->i_caps);
if (ci->i_auth_cap == cap)
@@ -1772,6 +1779,7 @@ static int try_nonblocking_invalidate(struct inode *inode)
u32 invalidating_gen = ci->i_rdcache_gen;
spin_unlock(&ci->i_ceph_lock);
+ ceph_fscache_invalidate(inode);
invalidate_mapping_pages(&inode->i_data, 0, -1);
spin_lock(&ci->i_ceph_lock);
@@ -1972,8 +1980,12 @@ retry_locked:
}
/* want more caps from mds? */
- if (want & ~(cap->mds_wanted | cap->issued))
- goto ack;
+ if (want & ~cap->mds_wanted) {
+ if (want & ~(cap->mds_wanted | cap->issued))
+ goto ack;
+ if (!__cap_is_valid(cap))
+ goto ack;
+ }
/* things we might delay */
if ((cap->issued & ~retain) == 0 &&
@@ -2011,12 +2023,24 @@ ack:
if (mutex_trylock(&session->s_mutex) == 0) {
dout("inverting session/ino locks on %p\n",
session);
+ session = ceph_get_mds_session(session);
spin_unlock(&ci->i_ceph_lock);
if (took_snap_rwsem) {
up_read(&mdsc->snap_rwsem);
took_snap_rwsem = 0;
}
- mutex_lock(&session->s_mutex);
+ if (session) {
+ mutex_lock(&session->s_mutex);
+ ceph_put_mds_session(session);
+ } else {
+ /*
+ * Because we take the reference while
+ * holding the i_ceph_lock, it should
+ * never be NULL. Throw a warning if it
+ * ever is.
+ */
+ WARN_ON_ONCE(true);
+ }
goto retry;
}
}
@@ -3628,6 +3652,7 @@ retry:
WARN_ON(1);
tsession = NULL;
target = -1;
+ mutex_lock(&session->s_mutex);
}
goto retry;
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index 3c59ad180ef0..4cfe1154d4c7 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -151,6 +151,11 @@ static struct dentry *__get_parent(struct super_block *sb,
req->r_num_caps = 1;
err = ceph_mdsc_do_request(mdsc, NULL, req);
+ if (err) {
+ ceph_mdsc_put_request(req);
+ return ERR_PTR(err);
+ }
+
inode = req->r_target_inode;
if (inode)
ihold(inode);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index faca455bd3c6..4ce2752c8b71 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1819,6 +1819,7 @@ const struct file_operations ceph_file_fops = {
.mmap = ceph_mmap,
.fsync = ceph_fsync,
.lock = ceph_lock,
+ .setlease = simple_nosetlease,
.flock = ceph_flock,
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 1e438e0faf77..5f041fede7aa 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -764,8 +764,11 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
info_caps = le32_to_cpu(info->cap.caps);
/* prealloc new cap struct */
- if (info_caps && ceph_snap(inode) == CEPH_NOSNAP)
+ if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) {
new_cap = ceph_get_cap(mdsc, caps_reservation);
+ if (!new_cap)
+ return -ENOMEM;
+ }
/*
* prealloc xattr data, if it looks like we'll need it. only
@@ -1820,6 +1823,7 @@ static void ceph_invalidate_work(struct work_struct *work)
orig_gen = ci->i_rdcache_gen;
spin_unlock(&ci->i_ceph_lock);
+ ceph_fscache_invalidate(inode);
if (invalidate_inode_pages2(inode->i_mapping) < 0) {
pr_err("invalidate_pages %p fails\n", inode);
}
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index a2e903203bf9..5f3707a90e7f 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -3615,6 +3615,9 @@ static void delayed_work(struct work_struct *work)
dout("mdsc delayed_work\n");
ceph_check_delayed_caps(mdsc);
+ if (mdsc->stopping)
+ return;
+
mutex_lock(&mdsc->mutex);
renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
renew_caps = time_after_eq(jiffies, HZ*renew_interval +
@@ -3682,7 +3685,6 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
return -ENOMEM;
}
- fsc->mdsc = mdsc;
init_completion(&mdsc->safe_umount_waiters);
init_waitqueue_head(&mdsc->session_close_wq);
INIT_LIST_HEAD(&mdsc->waiting_for_map);
@@ -3723,6 +3725,8 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
strscpy(mdsc->nodename, utsname()->nodename,
sizeof(mdsc->nodename));
+
+ fsc->mdsc = mdsc;
return 0;
}
@@ -3949,7 +3953,16 @@ void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
{
dout("stop\n");
- cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
+ /*
+ * Make sure the delayed work stopped before releasing
+ * the resources.
+ *
+ * Because the cancel_delayed_work_sync() will only
+ * guarantee that the work finishes executing. But the
+ * delayed work will re-arm itself again after that.
+ */
+ flush_delayed_work(&mdsc->delayed_work);
+
if (mdsc->mdsmap)
ceph_mdsmap_destroy(mdsc->mdsmap);
kfree(mdsc->sessions);
diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
index 3d19595eb352..4a9b53229fba 100644
--- a/fs/cifs/asn1.c
+++ b/fs/cifs/asn1.c
@@ -541,8 +541,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
return 0;
} else if ((cls != ASN1_CTX) || (con != ASN1_CON)
|| (tag != ASN1_EOC)) {
- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 0\n",
- cls, con, tag, end, *end);
+ cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 0\n",
+ cls, con, tag, end);
return 0;
}
@@ -552,8 +552,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
return 0;
} else if ((cls != ASN1_UNI) || (con != ASN1_CON)
|| (tag != ASN1_SEQ)) {
- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 1\n",
- cls, con, tag, end, *end);
+ cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 1\n",
+ cls, con, tag, end);
return 0;
}
@@ -563,8 +563,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
return 0;
} else if ((cls != ASN1_CTX) || (con != ASN1_CON)
|| (tag != ASN1_EOC)) {
- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 0\n",
- cls, con, tag, end, *end);
+ cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 0\n",
+ cls, con, tag, end);
return 0;
}
@@ -575,8 +575,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
return 0;
} else if ((cls != ASN1_UNI) || (con != ASN1_CON)
|| (tag != ASN1_SEQ)) {
- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 1\n",
- cls, con, tag, end, *end);
+ cifs_dbg(FYI, "cls = %d con = %d tag = %d sequence_end = %p exit 1\n",
+ cls, con, tag, sequence_end);
return 0;
}
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index a2b2355e7f01..9986817532b1 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -501,7 +501,13 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
else if (map_chars == SFM_MAP_UNI_RSVD) {
bool end_of_string;
- if (i == srclen - 1)
+ /**
+ * Remap spaces and periods found at the end of every
+ * component of the path. The special cases of '.' and
+ * '..' do not need to be dealt with explicitly because
+ * they are addressed in namei.c:link_path_walk().
+ **/
+ if ((i == srclen - 1) || (source[i+1] == '\\'))
end_of_string = true;
else
end_of_string = false;
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index d5457015801d..bc906fcf3f6d 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -229,7 +229,7 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
rc = server->ops->queryfs(xid, tcon, buf);
free_xid(xid);
- return 0;
+ return rc;
}
static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 71c2dd0c7f03..2c632793c88c 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -259,8 +259,9 @@ struct smb_version_operations {
int (*check_message)(char *, unsigned int, struct TCP_Server_Info *);
bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *);
- void (*downgrade_oplock)(struct TCP_Server_Info *,
- struct cifsInodeInfo *, bool);
+ void (*downgrade_oplock)(struct TCP_Server_Info *server,
+ struct cifsInodeInfo *cinode, __u32 oplock,
+ unsigned int epoch, bool *purge_cache);
/* process transaction2 response */
bool (*check_trans2)(struct mid_q_entry *, struct TCP_Server_Info *,
char *, int);
@@ -1160,6 +1161,8 @@ struct cifsFileInfo {
unsigned int f_flags;
bool invalidHandle:1; /* file closed via session abend */
bool oplock_break_cancelled:1;
+ unsigned int oplock_epoch; /* epoch from the lease break */
+ __u32 oplock_level; /* oplock/lease level from the lease break */
int count;
spinlock_t file_info_lock; /* protects four flag/count fields above */
struct mutex fh_mutex; /* prevents reopen race after dead ses*/
@@ -1300,7 +1303,7 @@ struct cifsInodeInfo {
unsigned int epoch; /* used to track lease state changes */
#define CIFS_INODE_PENDING_OPLOCK_BREAK (0) /* oplock break in progress */
#define CIFS_INODE_PENDING_WRITERS (1) /* Writes in progress */
-#define CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2 (2) /* Downgrade oplock to L2 */
+#define CIFS_INODE_FLAG_UNUSED (2) /* Unused flag */
#define CIFS_INO_DELETE_PENDING (3) /* delete pending on server */
#define CIFS_INO_INVALID_MAPPING (4) /* pagecache is invalid */
#define CIFS_INO_LOCK (5) /* lock bit for synchronization */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 8b9471904f67..cb70f0c6aa1b 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -2051,8 +2051,8 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
}
}
+ kref_put(&wdata2->refcount, cifs_writedata_release);
if (rc) {
- kref_put(&wdata2->refcount, cifs_writedata_release);
if (is_retryable_error(rc))
continue;
i += nr_pages;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 975f800b9dd4..6285085195c1 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -353,8 +353,10 @@ static int reconn_set_ipaddr(struct TCP_Server_Info *server)
return rc;
}
+ spin_lock(&cifs_tcp_ses_lock);
rc = cifs_convert_address((struct sockaddr *)&server->dstaddr, ipaddr,
strlen(ipaddr));
+ spin_unlock(&cifs_tcp_ses_lock);
kfree(ipaddr);
return !rc ? -1 : 0;
@@ -775,6 +777,8 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
list_del_init(&server->tcp_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
+ cancel_delayed_work_sync(&server->echo);
+
spin_lock(&GlobalMid_Lock);
server->tcpStatus = CifsExiting;
spin_unlock(&GlobalMid_Lock);
@@ -4599,9 +4603,14 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
vol_info->nocase = master_tcon->nocase;
vol_info->nohandlecache = master_tcon->nohandlecache;
vol_info->local_lease = master_tcon->local_lease;
+ vol_info->no_lease = master_tcon->no_lease;
+ vol_info->resilient = master_tcon->use_resilient;
+ vol_info->persistent = master_tcon->use_persistent;
vol_info->no_linux_ext = !master_tcon->unix_ext;
+ vol_info->linux_ext = master_tcon->posix_extensions;
vol_info->sectype = master_tcon->ses->sectype;
vol_info->sign = master_tcon->ses->sign;
+ vol_info->seal = master_tcon->seal;
rc = cifs_set_vol_auth(vol_info, master_tcon->ses);
if (rc) {
@@ -4627,10 +4636,6 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
goto out;
}
- /* if new SMB3.11 POSIX extensions are supported do not remap / and \ */
- if (tcon->posix_extensions)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS;
-
if (cap_unix(ses))
reset_cifs_unix_caps(0, tcon, NULL, vol_info);
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index f6e3c0089825..c7e162c9383d 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -840,6 +840,7 @@ static int
cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
{
struct inode *inode;
+ int rc;
if (flags & LOOKUP_RCU)
return -ECHILD;
@@ -849,8 +850,25 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode)))
CIFS_I(inode)->time = 0; /* force reval */
- if (cifs_revalidate_dentry(direntry))
- return 0;
+ rc = cifs_revalidate_dentry(direntry);
+ if (rc) {
+ cifs_dbg(FYI, "cifs_revalidate_dentry failed with rc=%d", rc);
+ switch (rc) {
+ case -ENOENT:
+ case -ESTALE:
+ /*
+ * Those errors mean the dentry is invalid
+ * (file was deleted or recreated)
+ */
+ return 0;
+ default:
+ /*
+ * Otherwise some unexpected error happened
+ * report it as-is to VFS layer
+ */
+ return rc;
+ }
+ }
else {
/*
* If the inode wasn't known to be a dfs entry when
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index ad93b063f866..7b482489bd22 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -163,6 +163,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
goto posix_open_ret;
}
} else {
+ cifs_revalidate_mapping(*pinode);
cifs_fattr_to_inode(*pinode, &fattr);
}
@@ -3339,7 +3340,7 @@ again:
if (rc == -ENODATA)
rc = 0;
- ctx->rc = (rc == 0) ? ctx->total_len : rc;
+ ctx->rc = (rc == 0) ? (ssize_t)ctx->total_len : rc;
mutex_unlock(&ctx->aio_mutex);
@@ -3532,7 +3533,7 @@ cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
* than it negotiated since it will refuse the read
* then.
*/
- if ((tcon->ses) && !(tcon->ses->capabilities &
+ if (!(tcon->ses->capabilities &
tcon->ses->server->vals->cap_large_files)) {
current_read_size = min_t(uint,
current_read_size, CIFSMaxBufSize);
@@ -3804,7 +3805,8 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
break;
__SetPageLocked(page);
- if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
+ rc = add_to_page_cache_locked(page, mapping, page->index, gfp);
+ if (rc) {
__ClearPageLocked(page);
break;
}
@@ -3820,6 +3822,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
struct list_head *page_list, unsigned num_pages)
{
int rc;
+ int err = 0;
struct list_head tmplist;
struct cifsFileInfo *open_file = file->private_data;
struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
@@ -3860,7 +3863,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
* the order of declining indexes. When we put the pages in
* the rdata->pages, then we want them in increasing order.
*/
- while (!list_empty(page_list)) {
+ while (!list_empty(page_list) && !err) {
unsigned int i, nr_pages, bytes, rsize;
loff_t offset;
struct page *page, *tpage;
@@ -3883,9 +3886,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
return 0;
}
- rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
+ nr_pages = 0;
+ err = readpages_get_pages(mapping, page_list, rsize, &tmplist,
&nr_pages, &offset, &bytes);
- if (rc) {
+ if (!nr_pages) {
add_credits_and_wake_if(server, credits, 0);
break;
}
@@ -4185,12 +4189,13 @@ void cifs_oplock_break(struct work_struct *work)
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
int rc = 0;
+ bool purge_cache = false;
wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
TASK_UNINTERRUPTIBLE);
- server->ops->downgrade_oplock(server, cinode,
- test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
+ server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
+ cfile->oplock_epoch, &purge_cache);
if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
cifs_has_mand_locks(cinode)) {
@@ -4205,18 +4210,21 @@ void cifs_oplock_break(struct work_struct *work)
else
break_lease(inode, O_WRONLY);
rc = filemap_fdatawrite(inode->i_mapping);
- if (!CIFS_CACHE_READ(cinode)) {
+ if (!CIFS_CACHE_READ(cinode) || purge_cache) {
rc = filemap_fdatawait(inode->i_mapping);
mapping_set_error(inode->i_mapping, rc);
cifs_zap_mapping(inode);
}
cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
+ if (CIFS_CACHE_WRITE(cinode))
+ goto oplock_break_ack;
}
rc = cifs_push_locks(cfile);
if (rc)
cifs_dbg(VFS, "Push locks rc = %d\n", rc);
+oplock_break_ack:
/*
* releasing stale oplock after recent reconnect of smb session using
* a now incorrect file handle is not a data integrity issue but do
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 51d410c6f6a4..d30eb4350656 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -2219,6 +2219,15 @@ set_size_out:
if (rc == 0) {
cifsInode->server_eof = attrs->ia_size;
cifs_setsize(inode, attrs->ia_size);
+
+ /*
+ * The man page of truncate says if the size changed,
+ * then the st_ctime and st_mtime fields for the file
+ * are updated.
+ */
+ attrs->ia_ctime = attrs->ia_mtime = current_time(inode);
+ attrs->ia_valid |= ATTR_CTIME | ATTR_MTIME;
+
cifs_truncate_page(inode->i_mapping, inode->i_size);
}
@@ -2541,13 +2550,18 @@ cifs_setattr(struct dentry *direntry, struct iattr *attrs)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
struct cifs_tcon *pTcon = cifs_sb_master_tcon(cifs_sb);
+ int rc, retries = 0;
- if (pTcon->unix_ext)
- return cifs_setattr_unix(direntry, attrs);
-
- return cifs_setattr_nounix(direntry, attrs);
+ do {
+ if (pTcon->unix_ext)
+ rc = cifs_setattr_unix(direntry, attrs);
+ else
+ rc = cifs_setattr_nounix(direntry, attrs);
+ retries++;
+ } while (is_retryable_error(rc) && retries < 2);
/* BB: add cifs_setattr_legacy for really old servers */
+ return rc;
}
#if 0
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index e45f8e321371..dd67f56ea61e 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -477,21 +477,10 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
&pCifsInode->flags);
- /*
- * Set flag if the server downgrades the oplock
- * to L2 else clear.
- */
- if (pSMB->OplockLevel)
- set_bit(
- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
- &pCifsInode->flags);
- else
- clear_bit(
- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
- &pCifsInode->flags);
-
- cifs_queue_oplock_break(netfile);
+ netfile->oplock_epoch = 0;
+ netfile->oplock_level = pSMB->OplockLevel;
netfile->oplock_break_cancelled = false;
+ cifs_queue_oplock_break(netfile);
spin_unlock(&tcon->open_file_lock);
spin_unlock(&cifs_tcp_ses_lock);
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index c7f0c8566442..0b7f92451284 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -381,12 +381,10 @@ coalesce_t2(char *second_buf, struct smb_hdr *target_hdr)
static void
cifs_downgrade_oplock(struct TCP_Server_Info *server,
- struct cifsInodeInfo *cinode, bool set_level2)
+ struct cifsInodeInfo *cinode, __u32 oplock,
+ unsigned int epoch, bool *purge_cache)
{
- if (set_level2)
- cifs_set_oplock_level(cinode, OPLOCK_READ);
- else
- cifs_set_oplock_level(cinode, 0);
+ cifs_set_oplock_level(cinode, oplock);
}
static bool
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 14265b4bbcc0..7177720e822e 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -509,15 +509,31 @@ cifs_ses_oplock_break(struct work_struct *work)
kfree(lw);
}
+static void
+smb2_queue_pending_open_break(struct tcon_link *tlink, __u8 *lease_key,
+ __le32 new_lease_state)
+{
+ struct smb2_lease_break_work *lw;
+
+ lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL);
+ if (!lw) {
+ cifs_put_tlink(tlink);
+ return;
+ }
+
+ INIT_WORK(&lw->lease_break, cifs_ses_oplock_break);
+ lw->tlink = tlink;
+ lw->lease_state = new_lease_state;
+ memcpy(lw->lease_key, lease_key, SMB2_LEASE_KEY_SIZE);
+ queue_work(cifsiod_wq, &lw->lease_break);
+}
+
static bool
-smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
- struct smb2_lease_break_work *lw)
+smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp)
{
- bool found;
__u8 lease_state;
struct list_head *tmp;
struct cifsFileInfo *cfile;
- struct cifs_pending_open *open;
struct cifsInodeInfo *cinode;
int ack_req = le32_to_cpu(rsp->Flags &
SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED);
@@ -534,7 +550,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
cifs_dbg(FYI, "found in the open list\n");
cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
- le32_to_cpu(rsp->NewLeaseState));
+ lease_state);
if (ack_req)
cfile->oplock_break_cancelled = false;
@@ -543,40 +559,38 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
- /*
- * Set or clear flags depending on the lease state being READ.
- * HANDLE caching flag should be added when the client starts
- * to defer closing remote file handles with HANDLE leases.
- */
- if (lease_state & SMB2_LEASE_READ_CACHING_HE)
- set_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
- &cinode->flags);
- else
- clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
- &cinode->flags);
+ cfile->oplock_epoch = le16_to_cpu(rsp->Epoch);
+ cfile->oplock_level = lease_state;
cifs_queue_oplock_break(cfile);
- kfree(lw);
return true;
}
- found = false;
+ return false;
+}
+
+static struct cifs_pending_open *
+smb2_tcon_find_pending_open_lease(struct cifs_tcon *tcon,
+ struct smb2_lease_break *rsp)
+{
+ __u8 lease_state = le32_to_cpu(rsp->NewLeaseState);
+ int ack_req = le32_to_cpu(rsp->Flags &
+ SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED);
+ struct cifs_pending_open *open;
+ struct cifs_pending_open *found = NULL;
+
list_for_each_entry(open, &tcon->pending_opens, olist) {
if (memcmp(open->lease_key, rsp->LeaseKey,
SMB2_LEASE_KEY_SIZE))
continue;
if (!found && ack_req) {
- found = true;
- memcpy(lw->lease_key, open->lease_key,
- SMB2_LEASE_KEY_SIZE);
- lw->tlink = cifs_get_tlink(open->tlink);
- queue_work(cifsiod_wq, &lw->lease_break);
+ found = open;
}
cifs_dbg(FYI, "found in the pending open list\n");
cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
- le32_to_cpu(rsp->NewLeaseState));
+ lease_state);
open->oplock = lease_state;
}
@@ -592,14 +606,7 @@ smb2_is_valid_lease_break(char *buffer)
struct TCP_Server_Info *server;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
- struct smb2_lease_break_work *lw;
-
- lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL);
- if (!lw)
- return false;
-
- INIT_WORK(&lw->lease_break, cifs_ses_oplock_break);
- lw->lease_state = rsp->NewLeaseState;
+ struct cifs_pending_open *open;
cifs_dbg(FYI, "Checking for lease break\n");
@@ -617,9 +624,25 @@ smb2_is_valid_lease_break(char *buffer)
spin_lock(&tcon->open_file_lock);
cifs_stats_inc(
&tcon->stats.cifs_stats.num_oplock_brks);
- if (smb2_tcon_has_lease(tcon, rsp, lw)) {
+ if (smb2_tcon_has_lease(tcon, rsp)) {
+ spin_unlock(&tcon->open_file_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
+ return true;
+ }
+ open = smb2_tcon_find_pending_open_lease(tcon,
+ rsp);
+ if (open) {
+ __u8 lease_key[SMB2_LEASE_KEY_SIZE];
+ struct tcon_link *tlink;
+
+ tlink = cifs_get_tlink(open->tlink);
+ memcpy(lease_key, open->lease_key,
+ SMB2_LEASE_KEY_SIZE);
spin_unlock(&tcon->open_file_lock);
spin_unlock(&cifs_tcp_ses_lock);
+ smb2_queue_pending_open_break(tlink,
+ lease_key,
+ rsp->NewLeaseState);
return true;
}
spin_unlock(&tcon->open_file_lock);
@@ -639,7 +662,6 @@ smb2_is_valid_lease_break(char *buffer)
}
}
spin_unlock(&cifs_tcp_ses_lock);
- kfree(lw);
cifs_dbg(FYI, "Can not process lease break - no lease matched\n");
return false;
}
@@ -701,18 +723,9 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
&cinode->flags);
- /*
- * Set flag if the server downgrades the oplock
- * to L2 else clear.
- */
- if (rsp->OplockLevel)
- set_bit(
- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
- &cinode->flags);
- else
- clear_bit(
- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
- &cinode->flags);
+ cfile->oplock_epoch = 0;
+ cfile->oplock_level = rsp->OplockLevel;
+
spin_unlock(&cfile->file_info_lock);
cifs_queue_oplock_break(cfile);
@@ -725,8 +738,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
}
}
spin_unlock(&cifs_tcp_ses_lock);
- cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
- return false;
+ cifs_dbg(FYI, "No file id matched, oplock break ignored\n");
+ return true;
}
void
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 6fc16329ceb4..5a14f518cd97 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -366,7 +366,8 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
goto out;
}
- if (bytes_left || p->Next)
+ /* Azure rounds the buffer size up 8, to a 16 byte boundary */
+ if ((bytes_left > 8) || p->Next)
cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
@@ -950,7 +951,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
return rc;
}
- len = sizeof(ea) + ea_name_len + ea_value_len + 1;
+ len = sizeof(*ea) + ea_name_len + ea_value_len + 1;
ea = kzalloc(len, GFP_KERNEL);
if (ea == NULL) {
SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
@@ -1173,6 +1174,8 @@ smb2_copychunk_range(const unsigned int xid,
cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
/* Request server copy to target from src identified by key */
+ kfree(retbuf);
+ retbuf = NULL;
rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
true /* is_fsctl */, (char *)pcchunk,
@@ -2180,6 +2183,12 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
inode = d_inode(cfile->dentry);
cifsi = CIFS_I(inode);
+ /*
+ * We zero the range through ioctl, so we need remove the page caches
+ * first, otherwise the data may be inconsistent with the server.
+ */
+ truncate_pagecache_range(inode, offset, offset + len - 1);
+
/* if file not oplocked can't be sure whether asking to extend size */
if (!CIFS_CACHE_READ(cifsi))
if (keep_size == false) {
@@ -2248,6 +2257,12 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
return rc;
}
+ /*
+ * We implement the punch hole through ioctl, so we need remove the page
+ * caches first, otherwise the data may be inconsistent with the server.
+ */
+ truncate_pagecache_range(inode, offset, offset + len - 1);
+
cifs_dbg(FYI, "offset %lld len %lld", offset, len);
fsctl_buf.FileOffset = cpu_to_le64(offset);
@@ -2346,22 +2361,38 @@ static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
static void
smb2_downgrade_oplock(struct TCP_Server_Info *server,
- struct cifsInodeInfo *cinode, bool set_level2)
+ struct cifsInodeInfo *cinode, __u32 oplock,
+ unsigned int epoch, bool *purge_cache)
{
- if (set_level2)
- server->ops->set_oplock_level(cinode, SMB2_OPLOCK_LEVEL_II,
- 0, NULL);
- else
- server->ops->set_oplock_level(cinode, 0, 0, NULL);
+ server->ops->set_oplock_level(cinode, oplock, 0, NULL);
}
static void
-smb21_downgrade_oplock(struct TCP_Server_Info *server,
- struct cifsInodeInfo *cinode, bool set_level2)
+smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
+ unsigned int epoch, bool *purge_cache);
+
+static void
+smb3_downgrade_oplock(struct TCP_Server_Info *server,
+ struct cifsInodeInfo *cinode, __u32 oplock,
+ unsigned int epoch, bool *purge_cache)
{
- server->ops->set_oplock_level(cinode,
- set_level2 ? SMB2_LEASE_READ_CACHING_HE :
- 0, 0, NULL);
+ unsigned int old_state = cinode->oplock;
+ unsigned int old_epoch = cinode->epoch;
+ unsigned int new_state;
+
+ if (epoch > old_epoch) {
+ smb21_set_oplock_level(cinode, oplock, 0, NULL);
+ cinode->epoch = epoch;
+ }
+
+ new_state = cinode->oplock;
+ *purge_cache = false;
+
+ if ((old_state & CIFS_CACHE_READ_FLG) != 0 &&
+ (new_state & CIFS_CACHE_READ_FLG) == 0)
+ *purge_cache = true;
+ else if (old_state == new_state && (epoch - old_epoch > 1))
+ *purge_cache = true;
}
static void
@@ -2671,7 +2702,7 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
}
spin_unlock(&cifs_tcp_ses_lock);
- return 1;
+ return -EAGAIN;
}
/*
* Encrypt or decrypt @rqst message. @rqst[0] has the following format:
@@ -2702,7 +2733,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
if (rc) {
cifs_dbg(VFS, "%s: Could not get %scryption key\n", __func__,
enc ? "en" : "de");
- return 0;
+ return rc;
}
rc = smb3_crypto_aead_allocate(server);
@@ -3437,7 +3468,7 @@ struct smb_version_operations smb21_operations = {
.print_stats = smb2_print_stats,
.is_oplock_break = smb2_is_valid_oplock_break,
.handle_cancelled_mid = smb2_handle_cancelled_mid,
- .downgrade_oplock = smb21_downgrade_oplock,
+ .downgrade_oplock = smb2_downgrade_oplock,
.need_neg = smb2_need_neg,
.negotiate = smb2_negotiate,
.negotiate_wsize = smb2_negotiate_wsize,
@@ -3534,7 +3565,7 @@ struct smb_version_operations smb30_operations = {
.dump_share_caps = smb2_dump_share_caps,
.is_oplock_break = smb2_is_valid_oplock_break,
.handle_cancelled_mid = smb2_handle_cancelled_mid,
- .downgrade_oplock = smb21_downgrade_oplock,
+ .downgrade_oplock = smb3_downgrade_oplock,
.need_neg = smb2_need_neg,
.negotiate = smb2_negotiate,
.negotiate_wsize = smb2_negotiate_wsize,
@@ -3639,7 +3670,7 @@ struct smb_version_operations smb311_operations = {
.dump_share_caps = smb2_dump_share_caps,
.is_oplock_break = smb2_is_valid_oplock_break,
.handle_cancelled_mid = smb2_handle_cancelled_mid,
- .downgrade_oplock = smb21_downgrade_oplock,
+ .downgrade_oplock = smb3_downgrade_oplock,
.need_neg = smb2_need_neg,
.negotiate = smb2_negotiate,
.negotiate_wsize = smb2_negotiate_wsize,
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index e2d2b749c8f3..43478ec6fd67 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -406,8 +406,8 @@ build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt)
pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES;
pneg_ctxt->DataLength = cpu_to_le16(38);
pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1);
- pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE);
- get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE);
+ pneg_ctxt->SaltLength = cpu_to_le16(SMB311_LINUX_CLIENT_SALT_SIZE);
+ get_random_bytes(pneg_ctxt->Salt, SMB311_LINUX_CLIENT_SALT_SIZE);
pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512;
}
@@ -461,6 +461,9 @@ static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt)
if (len < MIN_PREAUTH_CTXT_DATA_LEN) {
printk_once(KERN_WARNING "server sent bad preauth context\n");
return;
+ } else if (len < MIN_PREAUTH_CTXT_DATA_LEN + le16_to_cpu(ctxt->SaltLength)) {
+ pr_warn_once("server sent invalid SaltLength\n");
+ return;
}
if (le16_to_cpu(ctxt->HashAlgorithmCount) != 1)
printk_once(KERN_WARNING "illegal SMB3 hash algorithm count\n");
@@ -788,6 +791,13 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
/* Internal types */
server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
+ /*
+ * SMB3.0 supports only 1 cipher and doesn't have a encryption neg context
+ * Set the cipher type manually.
+ */
+ if (server->dialect == SMB30_PROT_ID && (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
+ server->cipher_type = SMB2_ENCRYPTION_AES128_CCM;
+
security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
(struct smb2_sync_hdr *)rsp);
/*
@@ -1132,6 +1142,8 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
spnego_key = cifs_get_spnego_key(ses);
if (IS_ERR(spnego_key)) {
rc = PTR_ERR(spnego_key);
+ if (rc == -ENOKEY)
+ cifs_dbg(VFS, "Verify user has a krb5 ticket and keyutils is installed\n");
spnego_key = NULL;
goto out;
}
@@ -3112,10 +3124,10 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
* Related requests use info from previous read request
* in chain.
*/
- shdr->SessionId = 0xFFFFFFFF;
+ shdr->SessionId = 0xFFFFFFFFFFFFFFFF;
shdr->TreeId = 0xFFFFFFFF;
- req->PersistentFileId = 0xFFFFFFFF;
- req->VolatileFileId = 0xFFFFFFFF;
+ req->PersistentFileId = 0xFFFFFFFFFFFFFFFF;
+ req->VolatileFileId = 0xFFFFFFFFFFFFFFFF;
}
}
if (remaining_bytes > io_parms->length)
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 308c682fa4d3..8a44d59947b7 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -222,7 +222,7 @@ struct smb2_negotiate_req {
__le32 NegotiateContextOffset; /* SMB3.1.1 only. MBZ earlier */
__le16 NegotiateContextCount; /* SMB3.1.1 only. MBZ earlier */
__le16 Reserved2;
- __le16 Dialects[1]; /* One dialect (vers=) at a time for now */
+ __le16 Dialects[4]; /* BB expand this if autonegotiate > 4 dialects */
} __packed;
/* Dialects */
@@ -257,12 +257,20 @@ struct smb2_neg_context {
/* Followed by array of data */
} __packed;
-#define SMB311_SALT_SIZE 32
+#define SMB311_LINUX_CLIENT_SALT_SIZE 32
/* Hash Algorithm Types */
#define SMB2_PREAUTH_INTEGRITY_SHA512 cpu_to_le16(0x0001)
#define SMB2_PREAUTH_HASH_SIZE 64
-#define MIN_PREAUTH_CTXT_DATA_LEN (SMB311_SALT_SIZE + 6)
+/*
+ * SaltLength that the server send can be zero, so the only three required
+ * fields (all __le16) end up six bytes total, so the minimum context data len
+ * in the response is six bytes which accounts for
+ *
+ * HashAlgorithmCount, SaltLength, and 1 HashAlgorithm.
+ */
+#define MIN_PREAUTH_CTXT_DATA_LEN 6
+
struct smb2_preauth_neg_context {
__le16 ContextType; /* 1 */
__le16 DataLength;
@@ -270,7 +278,7 @@ struct smb2_preauth_neg_context {
__le16 HashAlgorithmCount; /* 1 */
__le16 SaltLength;
__le16 HashAlgorithms; /* HashAlgorithms[0] since only one defined */
- __u8 Salt[SMB311_SALT_SIZE];
+ __u8 Salt[SMB311_LINUX_CLIENT_SALT_SIZE];
} __packed;
/* Encryption Algorithms Ciphers */
@@ -1209,7 +1217,7 @@ struct smb2_oplock_break {
struct smb2_lease_break {
struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 44 */
- __le16 Reserved;
+ __le16 Epoch;
__le32 Flags;
__u8 LeaseKey[16];
__le32 CurrentLeaseState;
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 0c4df56c825a..59643acb6d67 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -392,7 +392,7 @@ smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
struct smb_rqst *rqst, int flags)
{
struct kvec iov;
- struct smb2_transform_hdr tr_hdr;
+ struct smb2_transform_hdr *tr_hdr;
struct smb_rqst cur_rqst[MAX_COMPOUND];
int rc;
@@ -402,28 +402,34 @@ smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
if (num_rqst > MAX_COMPOUND - 1)
return -ENOMEM;
- memset(&cur_rqst[0], 0, sizeof(cur_rqst));
- memset(&iov, 0, sizeof(iov));
- memset(&tr_hdr, 0, sizeof(tr_hdr));
-
- iov.iov_base = &tr_hdr;
- iov.iov_len = sizeof(tr_hdr);
- cur_rqst[0].rq_iov = &iov;
- cur_rqst[0].rq_nvec = 1;
-
if (!server->ops->init_transform_rq) {
cifs_dbg(VFS, "Encryption requested but transform callback "
"is missing\n");
return -EIO;
}
+ tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
+ if (!tr_hdr)
+ return -ENOMEM;
+
+ memset(&cur_rqst[0], 0, sizeof(cur_rqst));
+ memset(&iov, 0, sizeof(iov));
+ memset(tr_hdr, 0, sizeof(*tr_hdr));
+
+ iov.iov_base = tr_hdr;
+ iov.iov_len = sizeof(*tr_hdr);
+ cur_rqst[0].rq_iov = &iov;
+ cur_rqst[0].rq_nvec = 1;
+
rc = server->ops->init_transform_rq(server, num_rqst + 1,
&cur_rqst[0], rqst);
if (rc)
- return rc;
+ goto out;
rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
+out:
+ kfree(tr_hdr);
return rc;
}
@@ -885,9 +891,12 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
/*
* Compounding is never used during session establish.
*/
- if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
+ if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
+ mutex_lock(&ses->server->srv_mutex);
smb311_update_preauth_hash(ses, rqst[0].rq_iov,
rqst[0].rq_nvec);
+ mutex_unlock(&ses->server->srv_mutex);
+ }
if (timeout == CIFS_ASYNC_OP)
goto out;
@@ -958,7 +967,9 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
.iov_base = resp_iov[0].iov_base,
.iov_len = resp_iov[0].iov_len
};
+ mutex_lock(&ses->server->srv_mutex);
smb311_update_preauth_hash(ses, &iov, 1);
+ mutex_unlock(&ses->server->srv_mutex);
}
out:
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 2cc6b1c49d34..f9628fc20fec 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -1537,6 +1537,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
spin_lock(&configfs_dirent_lock);
configfs_detach_rollback(dentry);
spin_unlock(&configfs_dirent_lock);
+ config_item_put(parent_item);
return -EINTR;
}
frag->frag_dead = true;
diff --git a/fs/configfs/file.c b/fs/configfs/file.c
index bb0a427517e9..50b7c4c4310e 100644
--- a/fs/configfs/file.c
+++ b/fs/configfs/file.c
@@ -392,7 +392,7 @@ static int __configfs_open_file(struct inode *inode, struct file *file, int type
attr = to_attr(dentry);
if (!attr)
- goto out_put_item;
+ goto out_free_buffer;
if (type & CONFIGFS_ITEM_BIN_ATTR) {
buffer->bin_attr = to_bin_attr(dentry);
@@ -405,7 +405,7 @@ static int __configfs_open_file(struct inode *inode, struct file *file, int type
/* Grab the module reference for this attribute if we have one */
error = -ENODEV;
if (!try_module_get(buffer->owner))
- goto out_put_item;
+ goto out_free_buffer;
error = -EACCES;
if (!buffer->item->ci_type)
@@ -449,8 +449,6 @@ static int __configfs_open_file(struct inode *inode, struct file *file, int type
out_put_module:
module_put(buffer->owner);
-out_put_item:
- config_item_put(buffer->item);
out_free_buffer:
up_read(&frag->frag_sem);
kfree(buffer);
diff --git a/fs/coredump.c b/fs/coredump.c
index 1e2c87acac9b..ef7ed64947e9 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -753,6 +753,14 @@ void do_coredump(const siginfo_t *siginfo)
if (displaced)
put_files_struct(displaced);
if (!dump_interrupted()) {
+ /*
+ * umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would
+ * have this set to NULL.
+ */
+ if (!cprm.file) {
+ pr_info("Core dump to |%s disabled\n", cn.corename);
+ goto close_fail;
+ }
file_start_write(cprm.file);
core_dumped = binfmt->core_dump(&cprm);
file_end_write(cprm.file);
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index c83ddff3ff4a..04a3c2c92b21 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -314,45 +314,47 @@ int fscrypt_decrypt_page(const struct inode *inode, struct page *page,
EXPORT_SYMBOL(fscrypt_decrypt_page);
/*
- * Validate dentries for encrypted directories to make sure we aren't
- * potentially caching stale data after a key has been added or
- * removed.
+ * Validate dentries in encrypted directories to make sure we aren't potentially
+ * caching stale dentries after a key has been added.
*/
static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
{
struct dentry *dir;
- int dir_has_key, cached_with_key;
+ int err;
+ int valid;
+
+ /*
+ * Plaintext names are always valid, since fscrypt doesn't support
+ * reverting to ciphertext names without evicting the directory's inode
+ * -- which implies eviction of the dentries in the directory.
+ */
+ if (!(dentry->d_flags & DCACHE_ENCRYPTED_NAME))
+ return 1;
+
+ /*
+ * Ciphertext name; valid if the directory's key is still unavailable.
+ *
+ * Although fscrypt forbids rename() on ciphertext names, we still must
+ * use dget_parent() here rather than use ->d_parent directly. That's
+ * because a corrupted fs image may contain directory hard links, which
+ * the VFS handles by moving the directory's dentry tree in the dcache
+ * each time ->lookup() finds the directory and it already has a dentry
+ * elsewhere. Thus ->d_parent can be changing, and we must safely grab
+ * a reference to some ->d_parent to prevent it from being freed.
+ */
if (flags & LOOKUP_RCU)
return -ECHILD;
dir = dget_parent(dentry);
- if (!IS_ENCRYPTED(d_inode(dir))) {
- dput(dir);
- return 0;
- }
-
- spin_lock(&dentry->d_lock);
- cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
- spin_unlock(&dentry->d_lock);
- dir_has_key = (d_inode(dir)->i_crypt_info != NULL);
+ err = fscrypt_get_encryption_info(d_inode(dir));
+ valid = !fscrypt_has_encryption_key(d_inode(dir));
dput(dir);
- /*
- * If the dentry was cached without the key, and it is a
- * negative dentry, it might be a valid name. We can't check
- * if the key has since been made available due to locking
- * reasons, so we fail the validation so ext4_lookup() can do
- * this check.
- *
- * We also fail the validation if the dentry was created with
- * the key present, but we no longer have the key, or vice versa.
- */
- if ((!cached_with_key && d_is_negative(dentry)) ||
- (!cached_with_key && dir_has_key) ||
- (cached_with_key && !dir_has_key))
- return 0;
- return 1;
+ if (err < 0)
+ return err;
+
+ return valid;
}
const struct dentry_operations fscrypt_d_ops = {
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index d7a0f682ca12..17bb9a3fc0b0 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -354,6 +354,7 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
}
if (!lookup)
return -ENOKEY;
+ fname->is_ciphertext_name = true;
/*
* We don't have the key and we are doing a lookup; decode the
diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c
index 926e5df20ec3..aa86cb2db823 100644
--- a/fs/crypto/hooks.c
+++ b/fs/crypto/hooks.c
@@ -49,7 +49,8 @@ int fscrypt_file_open(struct inode *inode, struct file *filp)
}
EXPORT_SYMBOL_GPL(fscrypt_file_open);
-int __fscrypt_prepare_link(struct inode *inode, struct inode *dir)
+int __fscrypt_prepare_link(struct inode *inode, struct inode *dir,
+ struct dentry *dentry)
{
int err;
@@ -57,8 +58,12 @@ int __fscrypt_prepare_link(struct inode *inode, struct inode *dir)
if (err)
return err;
+ /* ... in case we looked up no-key name before key was added */
+ if (fscrypt_is_nokey_name(dentry))
+ return -ENOKEY;
+
if (!fscrypt_has_permitted_context(dir, inode))
- return -EPERM;
+ return -EXDEV;
return 0;
}
@@ -78,37 +83,42 @@ int __fscrypt_prepare_rename(struct inode *old_dir, struct dentry *old_dentry,
if (err)
return err;
+ /* ... in case we looked up no-key name(s) before key was added */
+ if (fscrypt_is_nokey_name(old_dentry) ||
+ fscrypt_is_nokey_name(new_dentry))
+ return -ENOKEY;
+
if (old_dir != new_dir) {
if (IS_ENCRYPTED(new_dir) &&
!fscrypt_has_permitted_context(new_dir,
d_inode(old_dentry)))
- return -EPERM;
+ return -EXDEV;
if ((flags & RENAME_EXCHANGE) &&
IS_ENCRYPTED(old_dir) &&
!fscrypt_has_permitted_context(old_dir,
d_inode(new_dentry)))
- return -EPERM;
+ return -EXDEV;
}
return 0;
}
EXPORT_SYMBOL_GPL(__fscrypt_prepare_rename);
-int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry)
+int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry,
+ struct fscrypt_name *fname)
{
- int err = fscrypt_get_encryption_info(dir);
+ int err = fscrypt_setup_filename(dir, &dentry->d_name, 1, fname);
- if (err)
+ if (err && err != -ENOENT)
return err;
- if (fscrypt_has_encryption_key(dir)) {
+ if (fname->is_ciphertext_name) {
spin_lock(&dentry->d_lock);
- dentry->d_flags |= DCACHE_ENCRYPTED_WITH_KEY;
+ dentry->d_flags |= DCACHE_ENCRYPTED_NAME;
spin_unlock(&dentry->d_lock);
+ d_set_d_op(dentry, &fscrypt_d_ops);
}
-
- d_set_d_op(dentry, &fscrypt_d_ops);
- return 0;
+ return err;
}
EXPORT_SYMBOL_GPL(__fscrypt_prepare_lookup);
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index 4288839501e9..e9d975f39f46 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -153,8 +153,7 @@ EXPORT_SYMBOL(fscrypt_ioctl_get_policy);
* malicious offline violations of this constraint, while the link and rename
* checks are needed to prevent online violations of this constraint.
*
- * Return: 1 if permitted, 0 if forbidden. If forbidden, the caller must fail
- * the filesystem operation with EPERM.
+ * Return: 1 if permitted, 0 if forbidden.
*/
int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
{
diff --git a/fs/dcache.c b/fs/dcache.c
index 6e0022326afe..1897833a4668 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -864,17 +864,19 @@ struct dentry *dget_parent(struct dentry *dentry)
{
int gotref;
struct dentry *ret;
+ unsigned seq;
/*
* Do optimistic parent lookup without any
* locking.
*/
rcu_read_lock();
+ seq = raw_seqcount_begin(&dentry->d_seq);
ret = READ_ONCE(dentry->d_parent);
gotref = lockref_get_not_zero(&ret->d_lockref);
rcu_read_unlock();
if (likely(gotref)) {
- if (likely(ret == READ_ONCE(dentry->d_parent)))
+ if (!read_seqcount_retry(&dentry->d_seq, seq))
return ret;
dput(ret);
}
@@ -2711,6 +2713,20 @@ static void copy_name(struct dentry *dentry, struct dentry *target)
}
/*
+ * When d_splice_alias() moves a directory's encrypted alias to its decrypted
+ * alias as a result of the encryption key being added, DCACHE_ENCRYPTED_NAME
+ * must be cleared. Note that we don't have to support arbitrary moves of this
+ * flag because fscrypt doesn't allow encrypted aliases to be the source or
+ * target of a rename().
+ */
+static inline void fscrypt_handle_d_move(struct dentry *dentry)
+{
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ dentry->d_flags &= ~DCACHE_ENCRYPTED_NAME;
+#endif
+}
+
+/*
* __d_move - move a dentry
* @dentry: entry to move
* @target: new dentry
@@ -2785,6 +2801,7 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
__d_rehash(dentry);
fsnotify_update_flags(dentry);
+ fscrypt_handle_d_move(dentry);
write_seqcount_end(&target->d_seq);
write_seqcount_end(&dentry->d_seq);
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 1abb7634b2d5..ec7c7d1c0329 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -856,6 +856,7 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
struct buffer_head *map_bh)
{
int ret = 0;
+ int boundary = sdio->boundary; /* dio_send_cur_page may clear it */
if (dio->op == REQ_OP_WRITE) {
/*
@@ -894,10 +895,10 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
sdio->cur_page_fs_offset = sdio->block_in_file << sdio->blkbits;
out:
/*
- * If sdio->boundary then we want to schedule the IO now to
+ * If boundary then we want to schedule the IO now to
* avoid metadata seeks.
*/
- if (sdio->boundary) {
+ if (boundary) {
ret = dio_send_cur_page(dio, sdio, map_bh);
if (sdio->bio)
dio_bio_submit(dio, sdio);
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index 1270551d24e3..f13d86524450 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -218,6 +218,7 @@ struct dlm_space {
struct list_head members;
struct mutex members_lock;
int members_count;
+ struct dlm_nodes *nds;
};
struct dlm_comms {
@@ -426,6 +427,7 @@ static struct config_group *make_space(struct config_group *g, const char *name)
INIT_LIST_HEAD(&sp->members);
mutex_init(&sp->members_lock);
sp->members_count = 0;
+ sp->nds = nds;
return &sp->group;
fail:
@@ -447,6 +449,7 @@ static void drop_space(struct config_group *g, struct config_item *i)
static void release_space(struct config_item *i)
{
struct dlm_space *sp = config_item_to_space(i);
+ kfree(sp->nds);
kfree(sp);
}
diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
index fa08448e35dd..bb87dad03cd4 100644
--- a/fs/dlm/debug_fs.c
+++ b/fs/dlm/debug_fs.c
@@ -544,6 +544,7 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
if (bucket >= ls->ls_rsbtbl_size) {
kfree(ri);
+ ++*pos;
return NULL;
}
tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep;
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 748e8d59e611..cb287df13a7a 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -99,7 +99,6 @@ do { \
__LINE__, __FILE__, #x, jiffies); \
{do} \
printk("\n"); \
- BUG(); \
panic("DLM: Record message above and reboot.\n"); \
} \
}
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index f1261fa0af8a..244b87e4dfe7 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -633,6 +633,9 @@ static int new_lockspace(const char *name, const char *cluster,
wait_event(ls->ls_recover_lock_wait,
test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags));
+ /* let kobject handle freeing of ls if there's an error */
+ do_unreg = 1;
+
ls->ls_kobj.kset = dlm_kset;
error = kobject_init_and_add(&ls->ls_kobj, &dlm_ktype, NULL,
"%s", ls->ls_name);
@@ -640,9 +643,6 @@ static int new_lockspace(const char *name, const char *cluster,
goto out_recoverd;
kobject_uevent(&ls->ls_kobj, KOBJ_ADD);
- /* let kobject handle freeing of ls if there's an error */
- do_unreg = 1;
-
/* This uevent triggers dlm_controld in userspace to add us to the
group of nodes that are members of this lockspace (managed by the
cluster infrastructure.) Once it's done that, it tells us who the
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index 8e5353bd72cf..708f931c36f1 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -325,10 +325,8 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
struct extent_crypt_result ecr;
int rc = 0;
- if (!crypt_stat || !crypt_stat->tfm
- || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED))
- return -EINVAL;
-
+ BUG_ON(!crypt_stat || !crypt_stat->tfm
+ || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED));
if (unlikely(ecryptfs_verbosity > 0)) {
ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n",
crypt_stat->key_size);
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 025d66a705db..b86a9b3a39c0 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -506,6 +506,12 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
goto out;
}
+ if (!dev_name) {
+ rc = -EINVAL;
+ err = "Device name cannot be null";
+ goto out;
+ }
+
rc = ecryptfs_parse_options(sbi, raw_data, &check_ruid);
if (rc) {
err = "Error parsing options";
diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
index 8c6ab6c95727..7f40343b39b0 100644
--- a/fs/efivarfs/inode.c
+++ b/fs/efivarfs/inode.c
@@ -10,6 +10,7 @@
#include <linux/efi.h>
#include <linux/fs.h>
#include <linux/ctype.h>
+#include <linux/kmemleak.h>
#include <linux/slab.h>
#include <linux/uuid.h>
@@ -106,6 +107,7 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
var->var.VariableName[i] = '\0';
inode->i_private = var;
+ kmemleak_ignore(var);
err = efivar_entry_add(var, &efivarfs_list);
if (err)
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index 5b68e4294faa..834615f13f3e 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -145,6 +145,9 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
name[len + EFI_VARIABLE_GUID_LEN+1] = '\0';
+ /* replace invalid slashes like kobject_set_name_vargs does for /sys/firmware/efi/vars. */
+ strreplace(name, '/', '!');
+
inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0,
is_removable);
if (!inode)
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 58f48ea0db23..a4a32b79e832 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -222,8 +222,7 @@ struct eventpoll {
struct file *file;
/* used to optimize loop detection check */
- int visited;
- struct list_head visited_list_link;
+ u64 gen;
#ifdef CONFIG_NET_RX_BUSY_POLL
/* used to track busy poll napi_id */
@@ -273,6 +272,8 @@ static long max_user_watches __read_mostly;
*/
static DEFINE_MUTEX(epmutex);
+static u64 loop_check_gen = 0;
+
/* Used to check for epoll file descriptor inclusion loops */
static struct nested_calls poll_loop_ncalls;
@@ -282,9 +283,6 @@ static struct kmem_cache *epi_cache __read_mostly;
/* Slab cache used to allocate "struct eppoll_entry" */
static struct kmem_cache *pwq_cache __read_mostly;
-/* Visited nodes during ep_loop_check(), so we can unset them when we finish */
-static LIST_HEAD(visited_list);
-
/*
* List of files with newly added links, where we may need to limit the number
* of emanating paths. Protected by the epmutex.
@@ -1378,7 +1376,7 @@ static int reverse_path_check(void)
static int ep_create_wakeup_source(struct epitem *epi)
{
- const char *name;
+ struct name_snapshot n;
struct wakeup_source *ws;
if (!epi->ep->ws) {
@@ -1387,8 +1385,9 @@ static int ep_create_wakeup_source(struct epitem *epi)
return -ENOMEM;
}
- name = epi->ffd.file->f_path.dentry->d_name.name;
- ws = wakeup_source_register(name);
+ take_dentry_name_snapshot(&n, epi->ffd.file->f_path.dentry);
+ ws = wakeup_source_register(n.name);
+ release_dentry_name_snapshot(&n);
if (!ws)
return -ENOMEM;
@@ -1450,6 +1449,22 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
RCU_INIT_POINTER(epi->ws, NULL);
}
+ /* Add the current item to the list of active epoll hook for this file */
+ spin_lock(&tfile->f_lock);
+ list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
+ spin_unlock(&tfile->f_lock);
+
+ /*
+ * Add the current item to the RB tree. All RB tree operations are
+ * protected by "mtx", and ep_insert() is called with "mtx" held.
+ */
+ ep_rbtree_insert(ep, epi);
+
+ /* now check if we've created too many backpaths */
+ error = -EINVAL;
+ if (full_check && reverse_path_check())
+ goto error_remove_epi;
+
/* Initialize the poll table using the queue callback */
epq.epi = epi;
init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
@@ -1472,22 +1487,6 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
if (epi->nwait < 0)
goto error_unregister;
- /* Add the current item to the list of active epoll hook for this file */
- spin_lock(&tfile->f_lock);
- list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
- spin_unlock(&tfile->f_lock);
-
- /*
- * Add the current item to the RB tree. All RB tree operations are
- * protected by "mtx", and ep_insert() is called with "mtx" held.
- */
- ep_rbtree_insert(ep, epi);
-
- /* now check if we've created too many backpaths */
- error = -EINVAL;
- if (full_check && reverse_path_check())
- goto error_remove_epi;
-
/* We have to drop the new item inside our item list to keep track of it */
spin_lock_irq(&ep->wq.lock);
@@ -1516,6 +1515,8 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
return 0;
+error_unregister:
+ ep_unregister_pollwait(ep, epi);
error_remove_epi:
spin_lock(&tfile->f_lock);
list_del_rcu(&epi->fllink);
@@ -1523,9 +1524,6 @@ error_remove_epi:
rb_erase_cached(&epi->rbn, &ep->rbr);
-error_unregister:
- ep_unregister_pollwait(ep, epi);
-
/*
* We need to do this because an event could have been arrived on some
* allocated wait queue. Note that we don't care about the ep->ovflist
@@ -1868,13 +1866,12 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
struct epitem *epi;
mutex_lock_nested(&ep->mtx, call_nests + 1);
- ep->visited = 1;
- list_add(&ep->visited_list_link, &visited_list);
+ ep->gen = loop_check_gen;
for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
epi = rb_entry(rbp, struct epitem, rbn);
if (unlikely(is_file_epoll(epi->ffd.file))) {
ep_tovisit = epi->ffd.file->private_data;
- if (ep_tovisit->visited)
+ if (ep_tovisit->gen == loop_check_gen)
continue;
error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
ep_loop_check_proc, epi->ffd.file,
@@ -1890,9 +1887,11 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
* not already there, and calling reverse_path_check()
* during ep_insert().
*/
- if (list_empty(&epi->ffd.file->f_tfile_llink))
- list_add(&epi->ffd.file->f_tfile_llink,
- &tfile_check_list);
+ if (list_empty(&epi->ffd.file->f_tfile_llink)) {
+ if (get_file_rcu(epi->ffd.file))
+ list_add(&epi->ffd.file->f_tfile_llink,
+ &tfile_check_list);
+ }
}
}
mutex_unlock(&ep->mtx);
@@ -1913,18 +1912,8 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
*/
static int ep_loop_check(struct eventpoll *ep, struct file *file)
{
- int ret;
- struct eventpoll *ep_cur, *ep_next;
-
- ret = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
+ return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
ep_loop_check_proc, file, ep, current);
- /* clear visited list */
- list_for_each_entry_safe(ep_cur, ep_next, &visited_list,
- visited_list_link) {
- ep_cur->visited = 0;
- list_del(&ep_cur->visited_list_link);
- }
- return ret;
}
static void clear_tfile_check_list(void)
@@ -1936,6 +1925,7 @@ static void clear_tfile_check_list(void)
file = list_first_entry(&tfile_check_list, struct file,
f_tfile_llink);
list_del_init(&file->f_tfile_llink);
+ fput(file);
}
INIT_LIST_HEAD(&tfile_check_list);
}
@@ -2085,19 +2075,20 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
mutex_lock_nested(&ep->mtx, 0);
if (op == EPOLL_CTL_ADD) {
if (!list_empty(&f.file->f_ep_links) ||
+ ep->gen == loop_check_gen ||
is_file_epoll(tf.file)) {
full_check = 1;
mutex_unlock(&ep->mtx);
mutex_lock(&epmutex);
if (is_file_epoll(tf.file)) {
error = -ELOOP;
- if (ep_loop_check(ep, tf.file) != 0) {
- clear_tfile_check_list();
+ if (ep_loop_check(ep, tf.file) != 0)
goto error_tgt_fput;
- }
- } else
+ } else {
+ get_file(tf.file);
list_add(&tf.file->f_tfile_llink,
&tfile_check_list);
+ }
mutex_lock_nested(&ep->mtx, 0);
if (is_file_epoll(tf.file)) {
tep = tf.file->private_data;
@@ -2121,8 +2112,6 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
error = ep_insert(ep, &epds, tf.file, fd, full_check);
} else
error = -EEXIST;
- if (full_check)
- clear_tfile_check_list();
break;
case EPOLL_CTL_DEL:
if (epi)
@@ -2145,8 +2134,11 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
mutex_unlock(&ep->mtx);
error_tgt_fput:
- if (full_check)
+ if (full_check) {
+ clear_tfile_check_list();
+ loop_check_gen++;
mutex_unlock(&epmutex);
+ }
fdput(tf);
error_fput:
diff --git a/fs/exec.c b/fs/exec.c
index 561ea64829ec..6eea921a7e72 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1011,7 +1011,7 @@ static int exec_mmap(struct mm_struct *mm)
/* Notify parent that we're no longer interested in the old VM */
tsk = current;
old_mm = current->mm;
- mm_release(tsk, old_mm);
+ exec_mm_release(tsk, old_mm);
if (old_mm) {
sync_mm_rss(old_mm);
@@ -1028,10 +1028,23 @@ static int exec_mmap(struct mm_struct *mm)
}
}
task_lock(tsk);
+
+ local_irq_disable();
active_mm = tsk->active_mm;
- tsk->mm = mm;
tsk->active_mm = mm;
+ tsk->mm = mm;
+ /*
+ * This prevents preemption while active_mm is being loaded and
+ * it and mm are being updated, which could cause problems for
+ * lazy tlb mm refcounting when these are updated by context
+ * switches. Not all architectures can handle irqs off over
+ * activate_mm yet.
+ */
+ if (!IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
+ local_irq_enable();
activate_mm(active_mm, mm);
+ if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
+ local_irq_enable();
tsk->mm->vmacache_seqnum = 0;
vmacache_flush(tsk);
task_unlock(tsk);
@@ -1269,6 +1282,8 @@ int flush_old_exec(struct linux_binprm * bprm)
*/
set_mm_exe_file(bprm->mm, bprm->file);
+ would_dump(bprm, bprm->file);
+
/*
* Release all of the old mmap stuff
*/
@@ -1378,7 +1393,7 @@ void setup_new_exec(struct linux_binprm * bprm)
/* An exec changes our domain. We are no longer part of the thread
group */
- current->self_exec_id++;
+ WRITE_ONCE(current->self_exec_id, current->self_exec_id + 1);
flush_signal_handlers(current, 0);
}
EXPORT_SYMBOL(setup_new_exec);
@@ -1814,8 +1829,6 @@ static int __do_execve_file(int fd, struct filename *filename,
if (retval < 0)
goto out;
- would_dump(bprm, bprm->file);
-
retval = exec_binprm(bprm);
if (retval < 0)
goto out;
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index 28b2609f25c1..d39d90c1b670 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -93,8 +93,10 @@ static vm_fault_t ext2_dax_fault(struct vm_fault *vmf)
struct inode *inode = file_inode(vmf->vma->vm_file);
struct ext2_inode_info *ei = EXT2_I(inode);
vm_fault_t ret;
+ bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
+ (vmf->vma->vm_flags & VM_SHARED);
- if (vmf->flags & FAULT_FLAG_WRITE) {
+ if (write) {
sb_start_pagefault(inode->i_sb);
file_update_time(vmf->vma->vm_file);
}
@@ -103,7 +105,7 @@ static vm_fault_t ext2_dax_fault(struct vm_fault *vmf)
ret = dax_iomap_fault(vmf, PE_SIZE_PTE, NULL, NULL, &ext2_iomap_ops);
up_read(&ei->dax_sem);
- if (vmf->flags & FAULT_FLAG_WRITE)
+ if (write)
sb_end_pagefault(inode->i_sb);
return ret;
}
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index 5c3d7b7e4975..d8a03b1afbc3 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -80,6 +80,7 @@ static void ext2_release_inode(struct super_block *sb, int group, int dir)
if (dir)
le16_add_cpu(&desc->bg_used_dirs_count, -1);
spin_unlock(sb_bgl_lock(EXT2_SB(sb), group));
+ percpu_counter_inc(&EXT2_SB(sb)->s_freeinodes_counter);
if (dir)
percpu_counter_dec(&EXT2_SB(sb)->s_dirs_counter);
mark_buffer_dirty(bh);
@@ -531,7 +532,7 @@ got:
goto fail;
}
- percpu_counter_add(&sbi->s_freeinodes_counter, -1);
+ percpu_counter_dec(&sbi->s_freeinodes_counter);
if (S_ISDIR(mode))
percpu_counter_inc(&sbi->s_dirs_counter);
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index dd8f10db82e9..bd1d68ff3a9f 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -56,6 +56,7 @@
#include <linux/buffer_head.h>
#include <linux/init.h>
+#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/mbcache.h>
#include <linux/quotaops.h>
@@ -84,8 +85,8 @@
printk("\n"); \
} while (0)
#else
-# define ea_idebug(f...)
-# define ea_bdebug(f...)
+# define ea_idebug(inode, f...) no_printk(f)
+# define ea_bdebug(bh, f...) no_printk(f)
#endif
static int ext2_xattr_set2(struct inode *, struct buffer_head *,
@@ -838,8 +839,7 @@ ext2_xattr_cache_insert(struct mb_cache *cache, struct buffer_head *bh)
error = mb_cache_entry_create(cache, GFP_NOFS, hash, bh->b_blocknr, 1);
if (error) {
if (error == -EBUSY) {
- ea_bdebug(bh, "already in cache (%d cache entries)",
- atomic_read(&ext2_xattr_cache->c_entry_count));
+ ea_bdebug(bh, "already in cache");
error = 0;
}
} else
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
index d203cc935ff8..1ea8fc9ff048 100644
--- a/fs/ext4/block_validity.c
+++ b/fs/ext4/block_validity.c
@@ -24,6 +24,7 @@ struct ext4_system_zone {
struct rb_node node;
ext4_fsblk_t start_blk;
unsigned int count;
+ u32 ino;
};
static struct kmem_cache *ext4_system_zone_cachep;
@@ -45,7 +46,8 @@ void ext4_exit_system_zone(void)
static inline int can_merge(struct ext4_system_zone *entry1,
struct ext4_system_zone *entry2)
{
- if ((entry1->start_blk + entry1->count) == entry2->start_blk)
+ if ((entry1->start_blk + entry1->count) == entry2->start_blk &&
+ entry1->ino == entry2->ino)
return 1;
return 0;
}
@@ -66,9 +68,9 @@ static void release_system_zone(struct ext4_system_blocks *system_blks)
*/
static int add_system_zone(struct ext4_system_blocks *system_blks,
ext4_fsblk_t start_blk,
- unsigned int count)
+ unsigned int count, u32 ino)
{
- struct ext4_system_zone *new_entry = NULL, *entry;
+ struct ext4_system_zone *new_entry, *entry;
struct rb_node **n = &system_blks->root.rb_node, *node;
struct rb_node *parent = NULL, *new_node = NULL;
@@ -79,30 +81,21 @@ static int add_system_zone(struct ext4_system_blocks *system_blks,
n = &(*n)->rb_left;
else if (start_blk >= (entry->start_blk + entry->count))
n = &(*n)->rb_right;
- else {
- if (start_blk + count > (entry->start_blk +
- entry->count))
- entry->count = (start_blk + count -
- entry->start_blk);
- new_node = *n;
- new_entry = rb_entry(new_node, struct ext4_system_zone,
- node);
- break;
- }
+ else /* Unexpected overlap of system zones. */
+ return -EFSCORRUPTED;
}
- if (!new_entry) {
- new_entry = kmem_cache_alloc(ext4_system_zone_cachep,
- GFP_KERNEL);
- if (!new_entry)
- return -ENOMEM;
- new_entry->start_blk = start_blk;
- new_entry->count = count;
- new_node = &new_entry->node;
-
- rb_link_node(new_node, parent, n);
- rb_insert_color(new_node, &system_blks->root);
- }
+ new_entry = kmem_cache_alloc(ext4_system_zone_cachep,
+ GFP_KERNEL);
+ if (!new_entry)
+ return -ENOMEM;
+ new_entry->start_blk = start_blk;
+ new_entry->count = count;
+ new_entry->ino = ino;
+ new_node = &new_entry->node;
+
+ rb_link_node(new_node, parent, n);
+ rb_insert_color(new_node, &system_blks->root);
/* Can we merge to the left? */
node = rb_prev(new_node);
@@ -155,7 +148,7 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
static int ext4_data_block_valid_rcu(struct ext4_sb_info *sbi,
struct ext4_system_blocks *system_blks,
ext4_fsblk_t start_blk,
- unsigned int count)
+ unsigned int count, ino_t ino)
{
struct ext4_system_zone *entry;
struct rb_node *n;
@@ -179,7 +172,7 @@ static int ext4_data_block_valid_rcu(struct ext4_sb_info *sbi,
n = n->rb_right;
else {
sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
- return 0;
+ return entry->ino == ino;
}
}
return 1;
@@ -214,17 +207,16 @@ static int ext4_protect_reserved_inode(struct super_block *sb,
if (n == 0) {
i++;
} else {
- if (!ext4_data_block_valid_rcu(sbi, system_blks,
- map.m_pblk, n)) {
- ext4_error(sb, "blocks %llu-%llu from inode %u "
+ err = add_system_zone(system_blks, map.m_pblk, n, ino);
+ if (err < 0) {
+ if (err == -EFSCORRUPTED) {
+ ext4_error(sb,
+ "blocks %llu-%llu from inode %u "
"overlap system zone", map.m_pblk,
map.m_pblk + map.m_len - 1, ino);
- err = -EFSCORRUPTED;
+ }
break;
}
- err = add_system_zone(system_blks, map.m_pblk, n);
- if (err < 0)
- break;
i += n;
}
}
@@ -260,14 +252,6 @@ int ext4_setup_system_zone(struct super_block *sb)
int flex_size = ext4_flex_bg_size(sbi);
int ret;
- if (!test_opt(sb, BLOCK_VALIDITY)) {
- if (sbi->system_blks)
- ext4_release_system_zone(sb);
- return 0;
- }
- if (sbi->system_blks)
- return 0;
-
system_blks = kzalloc(sizeof(*system_blks), GFP_KERNEL);
if (!system_blks)
return -ENOMEM;
@@ -277,19 +261,19 @@ int ext4_setup_system_zone(struct super_block *sb)
((i < 5) || ((i % flex_size) == 0)))
add_system_zone(system_blks,
ext4_group_first_block_no(sb, i),
- ext4_bg_num_gdb(sb, i) + 1);
+ ext4_bg_num_gdb(sb, i) + 1, 0);
gdp = ext4_get_group_desc(sb, i, NULL);
ret = add_system_zone(system_blks,
- ext4_block_bitmap(sb, gdp), 1);
+ ext4_block_bitmap(sb, gdp), 1, 0);
if (ret)
goto err;
ret = add_system_zone(system_blks,
- ext4_inode_bitmap(sb, gdp), 1);
+ ext4_inode_bitmap(sb, gdp), 1, 0);
if (ret)
goto err;
ret = add_system_zone(system_blks,
ext4_inode_table(sb, gdp),
- sbi->s_itb_per_group);
+ sbi->s_itb_per_group, 0);
if (ret)
goto err;
}
@@ -338,7 +322,7 @@ void ext4_release_system_zone(struct super_block *sb)
call_rcu(&system_blks->rcu, ext4_destroy_system_zone);
}
-int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk,
+int ext4_inode_block_valid(struct inode *inode, ext4_fsblk_t start_blk,
unsigned int count)
{
struct ext4_system_blocks *system_blks;
@@ -350,9 +334,9 @@ int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk,
* mount option.
*/
rcu_read_lock();
- system_blks = rcu_dereference(sbi->system_blks);
- ret = ext4_data_block_valid_rcu(sbi, system_blks, start_blk,
- count);
+ system_blks = rcu_dereference(EXT4_SB(inode->i_sb)->system_blks);
+ ret = ext4_data_block_valid_rcu(EXT4_SB(inode->i_sb), system_blks,
+ start_blk, count, inode->i_ino);
rcu_read_unlock();
return ret;
}
@@ -372,8 +356,7 @@ int ext4_check_blockref(const char *function, unsigned int line,
while (bref < p+max) {
blk = le32_to_cpu(*bref++);
if (blk &&
- unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
- blk, 1))) {
+ unlikely(!ext4_inode_block_valid(inode, blk, 1))) {
es->s_last_error_block = cpu_to_le64(blk);
ext4_error_inode(inode, function, line, blk,
"invalid block");
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 0a4461ac4225..6938dff9f04b 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2326,23 +2326,47 @@ static inline bool ext4_encrypted_inode(struct inode *inode)
}
#ifdef CONFIG_EXT4_FS_ENCRYPTION
+static inline void ext4_fname_from_fscrypt_name(struct ext4_filename *dst,
+ const struct fscrypt_name *src)
+{
+ memset(dst, 0, sizeof(*dst));
+
+ dst->usr_fname = src->usr_fname;
+ dst->disk_name = src->disk_name;
+ dst->hinfo.hash = src->hash;
+ dst->hinfo.minor_hash = src->minor_hash;
+ dst->crypto_buf = src->crypto_buf;
+}
+
static inline int ext4_fname_setup_filename(struct inode *dir,
- const struct qstr *iname,
- int lookup, struct ext4_filename *fname)
+ const struct qstr *iname,
+ int lookup,
+ struct ext4_filename *fname)
{
struct fscrypt_name name;
int err;
- memset(fname, 0, sizeof(struct ext4_filename));
-
err = fscrypt_setup_filename(dir, iname, lookup, &name);
+ if (err)
+ return err;
- fname->usr_fname = name.usr_fname;
- fname->disk_name = name.disk_name;
- fname->hinfo.hash = name.hash;
- fname->hinfo.minor_hash = name.minor_hash;
- fname->crypto_buf = name.crypto_buf;
- return err;
+ ext4_fname_from_fscrypt_name(fname, &name);
+ return 0;
+}
+
+static inline int ext4_fname_prepare_lookup(struct inode *dir,
+ struct dentry *dentry,
+ struct ext4_filename *fname)
+{
+ struct fscrypt_name name;
+ int err;
+
+ err = fscrypt_prepare_lookup(dir, dentry, &name);
+ if (err)
+ return err;
+
+ ext4_fname_from_fscrypt_name(fname, &name);
+ return 0;
}
static inline void ext4_fname_free_filename(struct ext4_filename *fname)
@@ -2356,19 +2380,27 @@ static inline void ext4_fname_free_filename(struct ext4_filename *fname)
fname->usr_fname = NULL;
fname->disk_name.name = NULL;
}
-#else
+#else /* !CONFIG_EXT4_FS_ENCRYPTION */
static inline int ext4_fname_setup_filename(struct inode *dir,
- const struct qstr *iname,
- int lookup, struct ext4_filename *fname)
+ const struct qstr *iname,
+ int lookup,
+ struct ext4_filename *fname)
{
fname->usr_fname = iname;
fname->disk_name.name = (unsigned char *) iname->name;
fname->disk_name.len = iname->len;
return 0;
}
-static inline void ext4_fname_free_filename(struct ext4_filename *fname) { }
-#endif
+static inline int ext4_fname_prepare_lookup(struct inode *dir,
+ struct dentry *dentry,
+ struct ext4_filename *fname)
+{
+ return ext4_fname_setup_filename(dir, &dentry->d_name, 1, fname);
+}
+
+static inline void ext4_fname_free_filename(struct ext4_filename *fname) { }
+#endif /* !CONFIG_EXT4_FS_ENCRYPTION */
/* dir.c */
extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *,
@@ -2395,7 +2427,8 @@ void ext4_insert_dentry(struct inode *inode,
struct ext4_filename *fname);
static inline void ext4_update_dx_flag(struct inode *inode)
{
- if (!ext4_has_feature_dir_index(inode->i_sb)) {
+ if (!ext4_has_feature_dir_index(inode->i_sb) &&
+ ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
/* ext4_iget() should have caught this... */
WARN_ON_ONCE(ext4_has_feature_metadata_csum(inode->i_sb));
ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
@@ -3147,9 +3180,9 @@ extern void ext4_release_system_zone(struct super_block *sb);
extern int ext4_setup_system_zone(struct super_block *sb);
extern int __init ext4_init_system_zone(void);
extern void ext4_exit_system_zone(void);
-extern int ext4_data_block_valid(struct ext4_sb_info *sbi,
- ext4_fsblk_t start_blk,
- unsigned int count);
+extern int ext4_inode_block_valid(struct inode *inode,
+ ext4_fsblk_t start_blk,
+ unsigned int count);
extern int ext4_check_blockref(const char *, unsigned int,
struct inode *, __le32 *, unsigned int);
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index adf6668b596f..c5794ff64a01 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -157,10 +157,13 @@ struct ext4_ext_path {
(EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1)
#define EXT_LAST_INDEX(__hdr__) \
(EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1)
-#define EXT_MAX_EXTENT(__hdr__) \
- (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)
+#define EXT_MAX_EXTENT(__hdr__) \
+ ((le16_to_cpu((__hdr__)->eh_max)) ? \
+ ((EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)) \
+ : 0)
#define EXT_MAX_INDEX(__hdr__) \
- (EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)
+ ((le16_to_cpu((__hdr__)->eh_max)) ? \
+ ((EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)) : 0)
static inline struct ext4_extent_header *ext_inode_hdr(struct inode *inode)
{
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index f81eb1785af2..36708d9d71cb 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -377,7 +377,7 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
*/
if (lblock + len <= lblock)
return 0;
- return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
+ return ext4_inode_block_valid(inode, block, len);
}
static int ext4_valid_extent_idx(struct inode *inode,
@@ -385,7 +385,7 @@ static int ext4_valid_extent_idx(struct inode *inode,
{
ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
- return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
+ return ext4_inode_block_valid(inode, block, 1);
}
static int ext4_valid_extent_entries(struct inode *inode,
@@ -498,6 +498,30 @@ int ext4_ext_check_inode(struct inode *inode)
return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0);
}
+static void ext4_cache_extents(struct inode *inode,
+ struct ext4_extent_header *eh)
+{
+ struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
+ ext4_lblk_t prev = 0;
+ int i;
+
+ for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
+ unsigned int status = EXTENT_STATUS_WRITTEN;
+ ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
+ int len = ext4_ext_get_actual_len(ex);
+
+ if (prev && (prev != lblk))
+ ext4_es_cache_extent(inode, prev, lblk - prev, ~0,
+ EXTENT_STATUS_HOLE);
+
+ if (ext4_ext_is_unwritten(ex))
+ status = EXTENT_STATUS_UNWRITTEN;
+ ext4_es_cache_extent(inode, lblk, len,
+ ext4_ext_pblock(ex), status);
+ prev = lblk + len;
+ }
+}
+
static struct buffer_head *
__read_extent_tree_block(const char *function, unsigned int line,
struct inode *inode, ext4_fsblk_t pblk, int depth,
@@ -518,40 +542,17 @@ __read_extent_tree_block(const char *function, unsigned int line,
}
if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
return bh;
- if (!ext4_has_feature_journal(inode->i_sb) ||
- (inode->i_ino !=
- le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) {
- err = __ext4_ext_check(function, line, inode,
- ext_block_hdr(bh), depth, pblk);
- if (err)
- goto errout;
- }
+ err = __ext4_ext_check(function, line, inode,
+ ext_block_hdr(bh), depth, pblk);
+ if (err)
+ goto errout;
set_buffer_verified(bh);
/*
* If this is a leaf block, cache all of its entries
*/
if (!(flags & EXT4_EX_NOCACHE) && depth == 0) {
struct ext4_extent_header *eh = ext_block_hdr(bh);
- struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
- ext4_lblk_t prev = 0;
- int i;
-
- for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
- unsigned int status = EXTENT_STATUS_WRITTEN;
- ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
- int len = ext4_ext_get_actual_len(ex);
-
- if (prev && (prev != lblk))
- ext4_es_cache_extent(inode, prev,
- lblk - prev, ~0,
- EXTENT_STATUS_HOLE);
-
- if (ext4_ext_is_unwritten(ex))
- status = EXTENT_STATUS_UNWRITTEN;
- ext4_es_cache_extent(inode, lblk, len,
- ext4_ext_pblock(ex), status);
- prev = lblk + len;
- }
+ ext4_cache_extents(inode, eh);
}
return bh;
errout:
@@ -899,6 +900,8 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
path[0].p_bh = NULL;
i = depth;
+ if (!(flags & EXT4_EX_NOCACHE) && depth == 0)
+ ext4_cache_extents(inode, eh);
/* walk through the tree */
while (i) {
ext_debug("depth %d: num %d, max %d\n",
@@ -2898,7 +2901,7 @@ again:
* in use to avoid freeing it when removing blocks.
*/
if (sbi->s_cluster_ratio > 1) {
- pblk = ext4_ext_pblock(ex) + end - ee_block + 2;
+ pblk = ext4_ext_pblock(ex) + end - ee_block + 1;
partial_cluster =
-(long long) EXT4_B2C(sbi, pblk);
}
@@ -3438,8 +3441,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
(unsigned long long)map->m_lblk, map_len);
sbi = EXT4_SB(inode->i_sb);
- eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
- inode->i_sb->s_blocksize_bits;
+ eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
+ >> inode->i_sb->s_blocksize_bits;
if (eof_block < map->m_lblk + map_len)
eof_block = map->m_lblk + map_len;
@@ -3694,8 +3697,8 @@ static int ext4_split_convert_extents(handle_t *handle,
__func__, inode->i_ino,
(unsigned long long)map->m_lblk, map->m_len);
- eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
- inode->i_sb->s_blocksize_bits;
+ eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
+ >> inode->i_sb->s_blocksize_bits;
if (eof_block < map->m_lblk + map->m_len)
eof_block = map->m_lblk + map->m_len;
/*
diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c
index 4b99e2db95b8..6f3f245f3a80 100644
--- a/fs/ext4/fsmap.c
+++ b/fs/ext4/fsmap.c
@@ -108,6 +108,9 @@ static int ext4_getfsmap_helper(struct super_block *sb,
/* Are we just counting mappings? */
if (info->gfi_head->fmh_count == 0) {
+ if (info->gfi_head->fmh_entries == UINT_MAX)
+ return EXT4_QUERY_RANGE_ABORT;
+
if (rec_fsblk > info->gfi_next_fsblk)
info->gfi_head->fmh_entries++;
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 5508baa11bb6..8a28d47bd502 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -44,30 +44,28 @@
*/
static int ext4_sync_parent(struct inode *inode)
{
- struct dentry *dentry = NULL;
- struct inode *next;
+ struct dentry *dentry, *next;
int ret = 0;
if (!ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY))
return 0;
- inode = igrab(inode);
+ dentry = d_find_any_alias(inode);
+ if (!dentry)
+ return 0;
while (ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) {
ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY);
- dentry = d_find_any_alias(inode);
- if (!dentry)
- break;
- next = igrab(d_inode(dentry->d_parent));
+
+ next = dget_parent(dentry);
dput(dentry);
- if (!next)
- break;
- iput(inode);
- inode = next;
+ dentry = next;
+ inode = dentry->d_inode;
+
/*
* The directory inode may have gone through rmdir by now. But
* the inode itself and its blocks are still allocated (we hold
- * a reference to the inode so it didn't go through
- * ext4_evict_inode()) and so we are safe to flush metadata
- * blocks and the inode.
+ * a reference to the inode via its dentry), so it didn't go
+ * through ext4_evict_inode()) and so we are safe to flush
+ * metadata blocks and the inode.
*/
ret = sync_mapping_buffers(inode->i_mapping);
if (ret)
@@ -76,7 +74,7 @@ static int ext4_sync_parent(struct inode *inode)
if (ret)
break;
}
- iput(inode);
+ dput(dentry);
return ret;
}
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index dafa7e4aaecb..16abe23b1417 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -665,7 +665,7 @@ static int find_group_other(struct super_block *sb, struct inode *parent,
* block has been written back to disk. (Yes, these values are
* somewhat arbitrary...)
*/
-#define RECENTCY_MIN 5
+#define RECENTCY_MIN 60
#define RECENTCY_DIRTY 300
static int recently_deleted(struct super_block *sb, ext4_group_t group, int ino)
@@ -1358,6 +1358,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
handle_t *handle;
ext4_fsblk_t blk;
int num, ret = 0, used_blks = 0;
+ unsigned long used_inos = 0;
/* This should not happen, but just to be sure check this */
if (sb_rdonly(sb)) {
@@ -1388,22 +1389,37 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
* used inodes so we need to skip blocks with used inodes in
* inode table.
*/
- if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)))
- used_blks = DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb) -
- ext4_itable_unused_count(sb, gdp)),
- sbi->s_inodes_per_block);
-
- if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) ||
- ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) -
- ext4_itable_unused_count(sb, gdp)) <
- EXT4_FIRST_INO(sb)))) {
- ext4_error(sb, "Something is wrong with group %u: "
- "used itable blocks: %d; "
- "itable unused count: %u",
- group, used_blks,
- ext4_itable_unused_count(sb, gdp));
- ret = 1;
- goto err_out;
+ if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) {
+ used_inos = EXT4_INODES_PER_GROUP(sb) -
+ ext4_itable_unused_count(sb, gdp);
+ used_blks = DIV_ROUND_UP(used_inos, sbi->s_inodes_per_block);
+
+ /* Bogus inode unused count? */
+ if (used_blks < 0 || used_blks > sbi->s_itb_per_group) {
+ ext4_error(sb, "Something is wrong with group %u: "
+ "used itable blocks: %d; "
+ "itable unused count: %u",
+ group, used_blks,
+ ext4_itable_unused_count(sb, gdp));
+ ret = 1;
+ goto err_out;
+ }
+
+ used_inos += group * EXT4_INODES_PER_GROUP(sb);
+ /*
+ * Are there some uninitialized inodes in the inode table
+ * before the first normal inode?
+ */
+ if ((used_blks != sbi->s_itb_per_group) &&
+ (used_inos < EXT4_FIRST_INO(sb))) {
+ ext4_error(sb, "Something is wrong with group %u: "
+ "itable unused count: %u; "
+ "itables initialized count: %ld",
+ group, ext4_itable_unused_count(sb, gdp),
+ used_inos);
+ ret = 1;
+ goto err_out;
+ }
}
blk = ext4_inode_table(sb, gdp) + used_blks;
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index e1801b288847..a5442528a60d 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -842,8 +842,7 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
else if (ext4_should_journal_data(inode))
flags |= EXT4_FREE_BLOCKS_FORGET;
- if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
- count)) {
+ if (!ext4_inode_block_valid(inode, block_to_free, count)) {
EXT4_ERROR_INODE(inode, "attempt to clear invalid "
"blocks %llu len %lu",
(unsigned long long) block_to_free, count);
@@ -1005,8 +1004,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
if (!nr)
continue; /* A hole */
- if (!ext4_data_block_valid(EXT4_SB(inode->i_sb),
- nr, 1)) {
+ if (!ext4_inode_block_valid(inode, nr, 1)) {
EXT4_ERROR_INODE(inode,
"invalid indirect mapped "
"block %lu (level %d)",
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 4572cb057951..c95246187659 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -1921,6 +1921,7 @@ int ext4_inline_data_truncate(struct inode *inode, int *has_inline)
ext4_write_lock_xattr(inode, &no_expand);
if (!ext4_has_inline_data(inode)) {
+ ext4_write_unlock_xattr(inode, &no_expand);
*has_inline = 0;
ext4_journal_stop(handle);
return 0;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 23b4b1745a39..7959aae4857e 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -203,6 +203,7 @@ void ext4_evict_inode(struct inode *inode)
*/
int extra_credits = 6;
struct ext4_xattr_inode_array *ea_inode_array = NULL;
+ bool freeze_protected = false;
trace_ext4_evict_inode(inode);
@@ -250,9 +251,14 @@ void ext4_evict_inode(struct inode *inode)
/*
* Protect us against freezing - iput() caller didn't have to have any
- * protection against it
+ * protection against it. When we are in a running transaction though,
+ * we are already protected against freezing and we cannot grab further
+ * protection due to lock ordering constraints.
*/
- sb_start_intwrite(inode->i_sb);
+ if (!ext4_journal_current_handle()) {
+ sb_start_intwrite(inode->i_sb);
+ freeze_protected = true;
+ }
if (!IS_NOQUOTA(inode))
extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb);
@@ -271,7 +277,8 @@ void ext4_evict_inode(struct inode *inode)
* cleaned up.
*/
ext4_orphan_del(NULL, inode);
- sb_end_intwrite(inode->i_sb);
+ if (freeze_protected)
+ sb_end_intwrite(inode->i_sb);
goto no_delete;
}
@@ -312,7 +319,8 @@ void ext4_evict_inode(struct inode *inode)
stop_handle:
ext4_journal_stop(handle);
ext4_orphan_del(NULL, inode);
- sb_end_intwrite(inode->i_sb);
+ if (freeze_protected)
+ sb_end_intwrite(inode->i_sb);
ext4_xattr_inode_array_free(ea_inode_array);
goto no_delete;
}
@@ -341,7 +349,8 @@ stop_handle:
else
ext4_free_inode(handle, inode);
ext4_journal_stop(handle);
- sb_end_intwrite(inode->i_sb);
+ if (freeze_protected)
+ sb_end_intwrite(inode->i_sb);
ext4_xattr_inode_array_free(ea_inode_array);
return;
no_delete:
@@ -412,8 +421,7 @@ static int __check_block_validity(struct inode *inode, const char *func,
(inode->i_ino ==
le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
return 0;
- if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
- map->m_len)) {
+ if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) {
ext4_error_inode(inode, func, line, map->m_pblk,
"lblock %lu mapped to illegal pblock %llu "
"(length %d)", (unsigned long) map->m_lblk,
@@ -2064,13 +2072,13 @@ static int __ext4_journalled_writepage(struct page *page,
if (!ret)
ret = err;
- if (!ext4_has_inline_data(inode))
- ext4_walk_page_buffers(NULL, page_bufs, 0, len,
- NULL, bput_one);
ext4_set_inode_state(inode, EXT4_STATE_JDATA);
out:
unlock_page(page);
out_no_pagelock:
+ if (!inline_data && page_bufs)
+ ext4_walk_page_buffers(NULL, page_bufs, 0, len,
+ NULL, bput_one);
brelse(inode_bh);
return ret;
}
@@ -2128,7 +2136,7 @@ static int ext4_writepage(struct page *page,
bool keep_towrite = false;
if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
- ext4_invalidatepage(page, 0, PAGE_SIZE);
+ inode->i_mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
unlock_page(page);
return -EIO;
}
@@ -3848,6 +3856,11 @@ static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
struct inode *inode = mapping->host;
size_t count = iov_iter_count(iter);
ssize_t ret;
+ loff_t offset = iocb->ki_pos;
+ loff_t size = i_size_read(inode);
+
+ if (offset >= size)
+ return 0;
/*
* Shared inode_lock is enough for us - it protects against concurrent
@@ -4690,7 +4703,7 @@ make_io:
if (end > table)
end = table;
while (b <= end)
- sb_breadahead(sb, b++);
+ sb_breadahead_unmovable(sb, b++);
}
/*
@@ -5058,7 +5071,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
ret = 0;
if (ei->i_file_acl &&
- !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
+ !ext4_inode_block_valid(inode, ei->i_file_acl, 1)) {
ext4_error_inode(inode, function, line, 0,
"iget: bad extended attribute block %llu",
ei->i_file_acl);
@@ -5140,7 +5153,7 @@ static int ext4_inode_blocks_set(handle_t *handle,
struct ext4_inode_info *ei)
{
struct inode *inode = &(ei->vfs_inode);
- u64 i_blocks = inode->i_blocks;
+ u64 i_blocks = READ_ONCE(inode->i_blocks);
struct super_block *sb = inode->i_sb;
if (i_blocks <= ~0U) {
@@ -5195,7 +5208,7 @@ static int other_inode_match(struct inode * inode, unsigned long ino,
(inode->i_state & I_DIRTY_TIME)) {
struct ext4_inode_info *ei = EXT4_I(inode);
- inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED);
+ inode->i_state &= ~I_DIRTY_TIME;
spin_unlock(&inode->i_lock);
spin_lock(&ei->i_raw_lock);
@@ -5253,7 +5266,7 @@ static int ext4_do_update_inode(handle_t *handle,
struct ext4_inode_info *ei = EXT4_I(inode);
struct buffer_head *bh = iloc->bh;
struct super_block *sb = inode->i_sb;
- int err = 0, rc, block;
+ int err = 0, block;
int need_datasync = 0, set_large_file = 0;
uid_t i_uid;
gid_t i_gid;
@@ -5266,6 +5279,12 @@ static int ext4_do_update_inode(handle_t *handle,
if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
+ err = ext4_inode_blocks_set(handle, raw_inode, ei);
+ if (err) {
+ spin_unlock(&ei->i_raw_lock);
+ goto out_brelse;
+ }
+
raw_inode->i_mode = cpu_to_le16(inode->i_mode);
i_uid = i_uid_read(inode);
i_gid = i_gid_read(inode);
@@ -5299,18 +5318,13 @@ static int ext4_do_update_inode(handle_t *handle,
EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
- err = ext4_inode_blocks_set(handle, raw_inode, ei);
- if (err) {
- spin_unlock(&ei->i_raw_lock);
- goto out_brelse;
- }
raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
if (likely(!test_opt2(inode->i_sb, HURD_COMPAT)))
raw_inode->i_file_acl_high =
cpu_to_le16(ei->i_file_acl >> 32);
raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
- if (ei->i_disksize != ext4_isize(inode->i_sb, raw_inode)) {
+ if (READ_ONCE(ei->i_disksize) != ext4_isize(inode->i_sb, raw_inode)) {
ext4_isize_set(raw_inode, ei->i_disksize);
need_datasync = 1;
}
@@ -5364,9 +5378,9 @@ static int ext4_do_update_inode(handle_t *handle,
bh->b_data);
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
- rc = ext4_handle_dirty_metadata(handle, NULL, bh);
- if (!err)
- err = rc;
+ err = ext4_handle_dirty_metadata(handle, NULL, bh);
+ if (err)
+ goto out_brelse;
ext4_clear_inode_state(inode, EXT4_STATE_NEW);
if (set_large_file) {
BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access");
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 783c54bb2ce7..21c9ebfe8347 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -1092,7 +1092,10 @@ resizefs_out:
err = ext4_journal_get_write_access(handle, sbi->s_sbh);
if (err)
goto pwsalt_err_journal;
+ lock_buffer(sbi->s_sbh);
generate_random_uuid(sbi->s_es->s_encrypt_pw_salt);
+ ext4_superblock_csum_set(sb);
+ unlock_buffer(sbi->s_sbh);
err = ext4_handle_dirty_metadata(handle, NULL,
sbi->s_sbh);
pwsalt_err_journal:
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 71121fcf9e8c..db47a06cfb74 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -1901,8 +1901,15 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
BUG_ON(buddy == NULL);
k = mb_find_next_zero_bit(buddy, max, 0);
- BUG_ON(k >= max);
-
+ if (k >= max) {
+ ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0,
+ "%d free clusters of order %d. But found 0",
+ grp->bb_counters[i], i);
+ ext4_mark_group_bitmap_corrupted(ac->ac_sb,
+ e4b->bd_group,
+ EXT4_GROUP_INFO_BBITMAP_CORRUPT);
+ break;
+ }
ac->ac_found++;
ac->ac_b_ex.fe_len = 1 << i;
@@ -1936,7 +1943,8 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
int free;
free = e4b->bd_info->bb_free;
- BUG_ON(free <= 0);
+ if (WARN_ON(free <= 0))
+ return;
i = e4b->bd_info->bb_first_free;
@@ -1959,7 +1967,8 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
}
mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
- BUG_ON(ex.fe_len <= 0);
+ if (WARN_ON(ex.fe_len <= 0))
+ break;
if (free < ex.fe_len) {
ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
"%d free clusters as per "
@@ -2981,7 +2990,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
- if (!ext4_data_block_valid(sbi, block, len)) {
+ if (!ext4_inode_block_valid(ac->ac_inode, block, len)) {
ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
"fs metadata", block, block+len);
/* File system mounted not to panic on error
@@ -4681,6 +4690,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
ext4_group_first_block_no(sb, group) +
EXT4_C2B(sbi, cluster),
"Block already on to-be-freed list");
+ kmem_cache_free(ext4_free_data_cachep, new_entry);
return 0;
}
}
@@ -4745,7 +4755,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
sbi = EXT4_SB(sb);
if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
- !ext4_data_block_valid(sbi, block, count)) {
+ !ext4_inode_block_valid(inode, block, count)) {
ext4_error(sb, "Freeing blocks not in datazone - "
"block = %llu, count = %lu", block, count);
goto error_return;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index a8f2e3549bb9..358f6378882f 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1309,8 +1309,8 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
ext4_match(fname, de)) {
/* found a match - just to be sure, do
* a full check */
- if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data,
- bh->b_size, offset))
+ if (ext4_check_dir_entry(dir, NULL, de, bh, search_buf,
+ buf_size, offset))
return -1;
*res_dir = de;
return 1;
@@ -1343,7 +1343,7 @@ static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block,
}
/*
- * ext4_find_entry()
+ * __ext4_find_entry()
*
* finds an entry in the specified directory with the wanted name. It
* returns the cache buffer in which the entry was found, and the entry
@@ -1353,39 +1353,32 @@ static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block,
* The returned buffer_head has ->b_count elevated. The caller is expected
* to brelse() it when appropriate.
*/
-static struct buffer_head * ext4_find_entry (struct inode *dir,
- const struct qstr *d_name,
- struct ext4_dir_entry_2 **res_dir,
- int *inlined)
+static struct buffer_head *__ext4_find_entry(struct inode *dir,
+ struct ext4_filename *fname,
+ struct ext4_dir_entry_2 **res_dir,
+ int *inlined)
{
struct super_block *sb;
struct buffer_head *bh_use[NAMEI_RA_SIZE];
struct buffer_head *bh, *ret = NULL;
ext4_lblk_t start, block;
- const u8 *name = d_name->name;
+ const u8 *name = fname->usr_fname->name;
size_t ra_max = 0; /* Number of bh's in the readahead
buffer, bh_use[] */
size_t ra_ptr = 0; /* Current index into readahead
buffer */
ext4_lblk_t nblocks;
int i, namelen, retval;
- struct ext4_filename fname;
*res_dir = NULL;
sb = dir->i_sb;
- namelen = d_name->len;
+ namelen = fname->usr_fname->len;
if (namelen > EXT4_NAME_LEN)
return NULL;
- retval = ext4_fname_setup_filename(dir, d_name, 1, &fname);
- if (retval == -ENOENT)
- return NULL;
- if (retval)
- return ERR_PTR(retval);
-
if (ext4_has_inline_data(dir)) {
int has_inline_data = 1;
- ret = ext4_find_inline_entry(dir, &fname, res_dir,
+ ret = ext4_find_inline_entry(dir, fname, res_dir,
&has_inline_data);
if (has_inline_data) {
if (inlined)
@@ -1405,7 +1398,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
goto restart;
}
if (is_dx(dir)) {
- ret = ext4_dx_find_entry(dir, &fname, res_dir);
+ ret = ext4_dx_find_entry(dir, fname, res_dir);
/*
* On success, or if the error was file not found,
* return. Otherwise, fall back to doing a search the
@@ -1470,7 +1463,7 @@ restart:
goto cleanup_and_exit;
}
set_buffer_verified(bh);
- i = search_dirblock(bh, dir, &fname,
+ i = search_dirblock(bh, dir, fname,
block << EXT4_BLOCK_SIZE_BITS(sb), res_dir);
if (i == 1) {
EXT4_I(dir)->i_dir_start_lookup = block;
@@ -1501,10 +1494,50 @@ cleanup_and_exit:
/* Clean up the read-ahead blocks */
for (; ra_ptr < ra_max; ra_ptr++)
brelse(bh_use[ra_ptr]);
- ext4_fname_free_filename(&fname);
return ret;
}
+static struct buffer_head *ext4_find_entry(struct inode *dir,
+ const struct qstr *d_name,
+ struct ext4_dir_entry_2 **res_dir,
+ int *inlined)
+{
+ int err;
+ struct ext4_filename fname;
+ struct buffer_head *bh;
+
+ err = ext4_fname_setup_filename(dir, d_name, 1, &fname);
+ if (err == -ENOENT)
+ return NULL;
+ if (err)
+ return ERR_PTR(err);
+
+ bh = __ext4_find_entry(dir, &fname, res_dir, inlined);
+
+ ext4_fname_free_filename(&fname);
+ return bh;
+}
+
+static struct buffer_head *ext4_lookup_entry(struct inode *dir,
+ struct dentry *dentry,
+ struct ext4_dir_entry_2 **res_dir)
+{
+ int err;
+ struct ext4_filename fname;
+ struct buffer_head *bh;
+
+ err = ext4_fname_prepare_lookup(dir, dentry, &fname);
+ if (err == -ENOENT)
+ return NULL;
+ if (err)
+ return ERR_PTR(err);
+
+ bh = __ext4_find_entry(dir, &fname, res_dir, NULL);
+
+ ext4_fname_free_filename(&fname);
+ return bh;
+}
+
static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
struct ext4_filename *fname,
struct ext4_dir_entry_2 **res_dir)
@@ -1563,16 +1596,11 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
struct inode *inode;
struct ext4_dir_entry_2 *de;
struct buffer_head *bh;
- int err;
-
- err = fscrypt_prepare_lookup(dir, dentry, flags);
- if (err)
- return ERR_PTR(err);
if (dentry->d_name.len > EXT4_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
- bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
+ bh = ext4_lookup_entry(dir, dentry, &de);
if (IS_ERR(bh))
return (struct dentry *) bh;
inode = NULL;
@@ -1732,7 +1760,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
blocksize, hinfo, map);
map -= count;
dx_sort_map(map, count);
- /* Split the existing block in the middle, size-wise */
+ /* Ensure that neither split block is over half full */
size = 0;
move = 0;
for (i = count-1; i >= 0; i--) {
@@ -1742,8 +1770,18 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
size += map[i].size;
move++;
}
- /* map index at which we will split */
- split = count - move;
+ /*
+ * map index at which we will split
+ *
+ * If the sum of active entries didn't exceed half the block size, just
+ * split it in half by count; each resulting block will have at least
+ * half the space free.
+ */
+ if (i > 0)
+ split = count - move;
+ else
+ split = count/2;
+
hash2 = map[split].hash;
continued = hash2 == map[split - 1].hash;
dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n",
@@ -2068,6 +2106,9 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
if (!dentry->d_name.len)
return -EINVAL;
+ if (fscrypt_is_nokey_name(dentry))
+ return -ENOKEY;
+
retval = ext4_fname_setup_filename(dir, &dentry->d_name, 0, &fname);
if (retval)
return retval;
@@ -2274,11 +2315,10 @@ again:
(frame - 1)->bh);
if (err)
goto journal_error;
- if (restart) {
- err = ext4_handle_dirty_dx_node(handle, dir,
- frame->bh);
+ err = ext4_handle_dirty_dx_node(handle, dir,
+ frame->bh);
+ if (err)
goto journal_error;
- }
} else {
struct dx_root *dxroot;
memcpy((char *) entries2, (char *) entries,
@@ -2344,7 +2384,7 @@ int ext4_generic_delete_entry(handle_t *handle,
de = (struct ext4_dir_entry_2 *)entry_buf;
while (i < buf_size - csum_size) {
if (ext4_check_dir_entry(dir, NULL, de, bh,
- bh->b_data, bh->b_size, i))
+ entry_buf, buf_size, i))
return -EFSCORRUPTED;
if (de == de_del) {
if (pde)
@@ -3396,12 +3436,35 @@ static int ext4_setent(handle_t *handle, struct ext4_renament *ent,
return retval;
}
}
- brelse(ent->bh);
- ent->bh = NULL;
return 0;
}
+static void ext4_resetent(handle_t *handle, struct ext4_renament *ent,
+ unsigned ino, unsigned file_type)
+{
+ struct ext4_renament old = *ent;
+ int retval = 0;
+
+ /*
+ * old->de could have moved from under us during make indexed dir,
+ * so the old->de may no longer valid and need to find it again
+ * before reset old inode info.
+ */
+ old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
+ if (IS_ERR(old.bh))
+ retval = PTR_ERR(old.bh);
+ if (!old.bh)
+ retval = -ENOENT;
+ if (retval) {
+ ext4_std_error(old.dir->i_sb, retval);
+ return;
+ }
+
+ ext4_setent(handle, &old, ino, file_type);
+ brelse(old.bh);
+}
+
static int ext4_find_delete_entry(handle_t *handle, struct inode *dir,
const struct qstr *d_name)
{
@@ -3561,14 +3624,14 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
*/
retval = -ENOENT;
if (!old.bh || le32_to_cpu(old.de->inode) != old.inode->i_ino)
- goto end_rename;
+ goto release_bh;
new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
&new.de, &new.inlined);
if (IS_ERR(new.bh)) {
retval = PTR_ERR(new.bh);
new.bh = NULL;
- goto end_rename;
+ goto release_bh;
}
if (new.bh) {
if (!new.inode) {
@@ -3585,18 +3648,17 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
handle = ext4_journal_start(old.dir, EXT4_HT_DIR, credits);
if (IS_ERR(handle)) {
retval = PTR_ERR(handle);
- handle = NULL;
- goto end_rename;
+ goto release_bh;
}
} else {
whiteout = ext4_whiteout_for_rename(&old, credits, &handle);
if (IS_ERR(whiteout)) {
retval = PTR_ERR(whiteout);
- whiteout = NULL;
- goto end_rename;
+ goto release_bh;
}
}
+ old_file_type = old.de->file_type;
if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir))
ext4_handle_sync(handle);
@@ -3624,7 +3686,6 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
force_reread = (new.dir->i_ino == old.dir->i_ino &&
ext4_test_inode_flag(new.dir, EXT4_INODE_INLINE_DATA));
- old_file_type = old.de->file_type;
if (whiteout) {
/*
* Do this before adding a new entry, so the old entry is sure
@@ -3696,17 +3757,23 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
retval = 0;
end_rename:
- brelse(old.dir_bh);
- brelse(old.bh);
- brelse(new.bh);
if (whiteout) {
- if (retval)
+ if (retval) {
+ ext4_resetent(handle, &old,
+ old.inode->i_ino, old_file_type);
drop_nlink(whiteout);
+ ext4_orphan_add(handle, whiteout);
+ }
unlock_new_inode(whiteout);
+ ext4_journal_stop(handle);
iput(whiteout);
- }
- if (handle)
+ } else {
ext4_journal_stop(handle);
+ }
+release_bh:
+ brelse(old.dir_bh);
+ brelse(old.bh);
+ brelse(new.bh);
return retval;
}
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index ef552d93708e..8098255c2801 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -861,8 +861,10 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
BUFFER_TRACE(dind, "get_write_access");
err = ext4_journal_get_write_access(handle, dind);
- if (unlikely(err))
+ if (unlikely(err)) {
ext4_std_error(sb, err);
+ goto errout;
+ }
/* ext4_reserve_inode_write() gets a reference on the iloc */
err = ext4_reserve_inode_write(handle, inode, &iloc);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index d44fc3f579e1..535ab6713732 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -65,10 +65,10 @@ static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
unsigned long journal_devnum);
static int ext4_show_options(struct seq_file *seq, struct dentry *root);
static int ext4_commit_super(struct super_block *sb, int sync);
-static void ext4_mark_recovery_complete(struct super_block *sb,
+static int ext4_mark_recovery_complete(struct super_block *sb,
struct ext4_super_block *es);
-static void ext4_clear_journal_err(struct super_block *sb,
- struct ext4_super_block *es);
+static int ext4_clear_journal_err(struct super_block *sb,
+ struct ext4_super_block *es);
static int ext4_sync_fs(struct super_block *sb, int wait);
static int ext4_remount(struct super_block *sb, int *flags, char *data);
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
@@ -388,7 +388,8 @@ static void save_error_info(struct super_block *sb, const char *func,
unsigned int line)
{
__save_error_info(sb, func, line);
- ext4_commit_super(sb, 1);
+ if (!bdev_read_only(sb->s_bdev))
+ ext4_commit_super(sb, 1);
}
/*
@@ -453,19 +454,17 @@ static bool system_going_down(void)
static void ext4_handle_error(struct super_block *sb)
{
+ journal_t *journal = EXT4_SB(sb)->s_journal;
+
if (test_opt(sb, WARN_ON_ERROR))
WARN_ON_ONCE(1);
- if (sb_rdonly(sb))
+ if (sb_rdonly(sb) || test_opt(sb, ERRORS_CONT))
return;
- if (!test_opt(sb, ERRORS_CONT)) {
- journal_t *journal = EXT4_SB(sb)->s_journal;
-
- EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
- if (journal)
- jbd2_journal_abort(journal, -EIO);
- }
+ EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
+ if (journal)
+ jbd2_journal_abort(journal, -EIO);
/*
* We force ERRORS_RO behavior when system is rebooting. Otherwise we
* could panic during 'reboot -f' as the underlying device got already
@@ -1747,8 +1746,8 @@ static const struct mount_opts {
{Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
MOPT_CLEAR | MOPT_Q},
- {Opt_usrjquota, 0, MOPT_Q},
- {Opt_grpjquota, 0, MOPT_Q},
+ {Opt_usrjquota, 0, MOPT_Q | MOPT_STRING},
+ {Opt_grpjquota, 0, MOPT_Q | MOPT_STRING},
{Opt_offusrjquota, 0, MOPT_Q},
{Opt_offgrpjquota, 0, MOPT_Q},
{Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
@@ -1988,6 +1987,16 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
#endif
} else if (token == Opt_dax) {
#ifdef CONFIG_FS_DAX
+ if (is_remount && test_opt(sb, DAX)) {
+ ext4_msg(sb, KERN_ERR, "can't mount with "
+ "both data=journal and dax");
+ return -1;
+ }
+ if (is_remount && !(sbi->s_mount_opt & EXT4_MOUNT_DAX)) {
+ ext4_msg(sb, KERN_ERR, "can't change "
+ "dax mount option while remounting");
+ return -1;
+ }
ext4_msg(sb, KERN_WARNING,
"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
sbi->s_mount_opt |= m->mount_opt;
@@ -2248,6 +2257,7 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
ext4_msg(sb, KERN_ERR, "revision level too high, "
"forcing read-only mode");
err = -EROFS;
+ goto done;
}
if (read_only)
goto done;
@@ -2619,9 +2629,6 @@ static void ext4_orphan_cleanup(struct super_block *sb,
sb->s_flags &= ~SB_RDONLY;
}
#ifdef CONFIG_QUOTA
- /* Needed for iput() to work correctly and not trash data */
- sb->s_flags |= SB_ACTIVE;
-
/*
* Turn on quotas which were not enabled for read-only mounts if
* filesystem has quota feature, so that they are updated correctly.
@@ -3523,7 +3530,8 @@ int ext4_calculate_overhead(struct super_block *sb)
*/
if (sbi->s_journal && !sbi->journal_bdev)
overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
- else if (ext4_has_feature_journal(sb) && !sbi->s_journal) {
+ else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) {
+ /* j_inum for internal journal is non-zero */
j_inode = ext4_get_journal_inode(sb, j_inum);
if (j_inode) {
j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
@@ -4031,7 +4039,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
sbi->s_inodes_per_group > blocksize * 8) {
ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
- sbi->s_blocks_per_group);
+ sbi->s_inodes_per_group);
goto failed_mount;
}
sbi->s_itb_per_group = sbi->s_inodes_per_group /
@@ -4162,9 +4170,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
EXT4_BLOCKS_PER_GROUP(sb) - 1);
do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
- ext4_msg(sb, KERN_WARNING, "groups count too large: %u "
+ ext4_msg(sb, KERN_WARNING, "groups count too large: %llu "
"(block count %llu, first data block %u, "
- "blocks per group %lu)", sbi->s_groups_count,
+ "blocks per group %lu)", blocks_count,
ext4_blocks_count(es),
le32_to_cpu(es->s_first_data_block),
EXT4_BLOCKS_PER_GROUP(sb));
@@ -4207,7 +4215,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
/* Pre-read the descriptors into the buffer cache */
for (i = 0; i < db_count; i++) {
block = descriptor_loc(sb, logical_sb_block, i);
- sb_breadahead(sb, block);
+ sb_breadahead_unmovable(sb, block);
}
for (i = 0; i < db_count; i++) {
@@ -4460,11 +4468,13 @@ no_journal:
ext4_set_resv_clusters(sb);
- err = ext4_setup_system_zone(sb);
- if (err) {
- ext4_msg(sb, KERN_ERR, "failed to initialize system "
- "zone (%d)", err);
- goto failed_mount4a;
+ if (test_opt(sb, BLOCK_VALIDITY)) {
+ err = ext4_setup_system_zone(sb);
+ if (err) {
+ ext4_msg(sb, KERN_ERR, "failed to initialize system "
+ "zone (%d)", err);
+ goto failed_mount4a;
+ }
}
ext4_ext_init(sb);
@@ -4532,7 +4542,9 @@ no_journal:
EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
if (needs_recovery) {
ext4_msg(sb, KERN_INFO, "recovery complete");
- ext4_mark_recovery_complete(sb, es);
+ err = ext4_mark_recovery_complete(sb, es);
+ if (err)
+ goto failed_mount8;
}
if (EXT4_SB(sb)->s_journal) {
if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
@@ -4575,10 +4587,9 @@ cantfind_ext4:
ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
goto failed_mount;
-#ifdef CONFIG_QUOTA
failed_mount8:
ext4_unregister_sysfs(sb);
-#endif
+ kobject_put(&sbi->s_kobj);
failed_mount7:
ext4_unregister_li_request(sb);
failed_mount6:
@@ -4714,7 +4725,8 @@ static journal_t *ext4_get_journal(struct super_block *sb,
struct inode *journal_inode;
journal_t *journal;
- BUG_ON(!ext4_has_feature_journal(sb));
+ if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
+ return NULL;
journal_inode = ext4_get_journal_inode(sb, journal_inum);
if (!journal_inode)
@@ -4744,7 +4756,8 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb,
struct ext4_super_block *es;
struct block_device *bdev;
- BUG_ON(!ext4_has_feature_journal(sb));
+ if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
+ return NULL;
bdev = ext4_blkdev_get(j_dev, sb);
if (bdev == NULL)
@@ -4835,8 +4848,10 @@ static int ext4_load_journal(struct super_block *sb,
dev_t journal_dev;
int err = 0;
int really_read_only;
+ int journal_dev_ro;
- BUG_ON(!ext4_has_feature_journal(sb));
+ if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
+ return -EFSCORRUPTED;
if (journal_devnum &&
journal_devnum != le32_to_cpu(es->s_journal_dev)) {
@@ -4846,7 +4861,31 @@ static int ext4_load_journal(struct super_block *sb,
} else
journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
- really_read_only = bdev_read_only(sb->s_bdev);
+ if (journal_inum && journal_dev) {
+ ext4_msg(sb, KERN_ERR,
+ "filesystem has both journal inode and journal device!");
+ return -EINVAL;
+ }
+
+ if (journal_inum) {
+ journal = ext4_get_journal(sb, journal_inum);
+ if (!journal)
+ return -EINVAL;
+ } else {
+ journal = ext4_get_dev_journal(sb, journal_dev);
+ if (!journal)
+ return -EINVAL;
+ }
+
+ journal_dev_ro = bdev_read_only(journal->j_dev);
+ really_read_only = bdev_read_only(sb->s_bdev) | journal_dev_ro;
+
+ if (journal_dev_ro && !sb_rdonly(sb)) {
+ ext4_msg(sb, KERN_ERR,
+ "journal device read-only, try mounting with '-o ro'");
+ err = -EROFS;
+ goto err_out;
+ }
/*
* Are we loading a blank journal or performing recovery after a
@@ -4861,27 +4900,14 @@ static int ext4_load_journal(struct super_block *sb,
ext4_msg(sb, KERN_ERR, "write access "
"unavailable, cannot proceed "
"(try mounting with noload)");
- return -EROFS;
+ err = -EROFS;
+ goto err_out;
}
ext4_msg(sb, KERN_INFO, "write access will "
"be enabled during recovery");
}
}
- if (journal_inum && journal_dev) {
- ext4_msg(sb, KERN_ERR, "filesystem has both journal "
- "and inode journals!");
- return -EINVAL;
- }
-
- if (journal_inum) {
- if (!(journal = ext4_get_journal(sb, journal_inum)))
- return -EINVAL;
- } else {
- if (!(journal = ext4_get_dev_journal(sb, journal_dev)))
- return -EINVAL;
- }
-
if (!(journal->j_flags & JBD2_BARRIER))
ext4_msg(sb, KERN_INFO, "barriers disabled");
@@ -4901,12 +4927,16 @@ static int ext4_load_journal(struct super_block *sb,
if (err) {
ext4_msg(sb, KERN_ERR, "error loading journal");
- jbd2_journal_destroy(journal);
- return err;
+ goto err_out;
}
EXT4_SB(sb)->s_journal = journal;
- ext4_clear_journal_err(sb, es);
+ err = ext4_clear_journal_err(sb, es);
+ if (err) {
+ EXT4_SB(sb)->s_journal = NULL;
+ jbd2_journal_destroy(journal);
+ return err;
+ }
if (!really_read_only && journal_devnum &&
journal_devnum != le32_to_cpu(es->s_journal_dev)) {
@@ -4917,6 +4947,10 @@ static int ext4_load_journal(struct super_block *sb,
}
return 0;
+
+err_out:
+ jbd2_journal_destroy(journal);
+ return err;
}
static int ext4_commit_super(struct super_block *sb, int sync)
@@ -4925,15 +4959,10 @@ static int ext4_commit_super(struct super_block *sb, int sync)
struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
int error = 0;
- if (!sbh || block_device_ejected(sb))
- return error;
-
- /*
- * The superblock bh should be mapped, but it might not be if the
- * device was hot-removed. Not much we can do but fail the I/O.
- */
- if (!buffer_mapped(sbh))
- return error;
+ if (!sbh)
+ return -EINVAL;
+ if (block_device_ejected(sb))
+ return -ENODEV;
/*
* If the file system is mounted read-only, don't update the
@@ -5002,26 +5031,32 @@ static int ext4_commit_super(struct super_block *sb, int sync)
* remounting) the filesystem readonly, then we will end up with a
* consistent fs on disk. Record that fact.
*/
-static void ext4_mark_recovery_complete(struct super_block *sb,
- struct ext4_super_block *es)
+static int ext4_mark_recovery_complete(struct super_block *sb,
+ struct ext4_super_block *es)
{
+ int err;
journal_t *journal = EXT4_SB(sb)->s_journal;
if (!ext4_has_feature_journal(sb)) {
- BUG_ON(journal != NULL);
- return;
+ if (journal != NULL) {
+ ext4_error(sb, "Journal got removed while the fs was "
+ "mounted!");
+ return -EFSCORRUPTED;
+ }
+ return 0;
}
jbd2_journal_lock_updates(journal);
- if (jbd2_journal_flush(journal) < 0)
+ err = jbd2_journal_flush(journal);
+ if (err < 0)
goto out;
if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) {
ext4_clear_feature_journal_needs_recovery(sb);
ext4_commit_super(sb, 1);
}
-
out:
jbd2_journal_unlock_updates(journal);
+ return err;
}
/*
@@ -5029,14 +5064,17 @@ out:
* has recorded an error from a previous lifetime, move that error to the
* main filesystem now.
*/
-static void ext4_clear_journal_err(struct super_block *sb,
+static int ext4_clear_journal_err(struct super_block *sb,
struct ext4_super_block *es)
{
journal_t *journal;
int j_errno;
const char *errstr;
- BUG_ON(!ext4_has_feature_journal(sb));
+ if (!ext4_has_feature_journal(sb)) {
+ ext4_error(sb, "Journal got removed while the fs was mounted!");
+ return -EFSCORRUPTED;
+ }
journal = EXT4_SB(sb)->s_journal;
@@ -5061,6 +5099,7 @@ static void ext4_clear_journal_err(struct super_block *sb,
jbd2_journal_clear_err(journal);
jbd2_journal_update_sb_errno(journal);
}
+ return 0;
}
/*
@@ -5203,7 +5242,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
{
struct ext4_super_block *es;
struct ext4_sb_info *sbi = EXT4_SB(sb);
- unsigned long old_sb_flags;
+ unsigned long old_sb_flags, vfs_flags;
struct ext4_mount_options old_opts;
int enable_quota = 0;
ext4_group_t g;
@@ -5246,6 +5285,14 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
if (sbi->s_journal && sbi->s_journal->j_task->io_context)
journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
+ /*
+ * Some options can be enabled by ext4 and/or by VFS mount flag
+ * either way we need to make sure it matches in both *flags and
+ * s_flags. Copy those selected flags from *flags to s_flags
+ */
+ vfs_flags = SB_LAZYTIME | SB_I_VERSION;
+ sb->s_flags = (sb->s_flags & ~vfs_flags) | (*flags & vfs_flags);
+
if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) {
err = -EINVAL;
goto restore_opts;
@@ -5271,12 +5318,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
err = -EINVAL;
goto restore_opts;
}
- if (test_opt(sb, DAX)) {
- ext4_msg(sb, KERN_ERR, "can't mount with "
- "both data=journal and dax");
- err = -EINVAL;
- goto restore_opts;
- }
} else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) {
if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
ext4_msg(sb, KERN_ERR, "can't mount with "
@@ -5292,12 +5333,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
goto restore_opts;
}
- if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) {
- ext4_msg(sb, KERN_WARNING, "warning: refusing change of "
- "dax flag with busy inodes while remounting");
- sbi->s_mount_opt ^= EXT4_MOUNT_DAX;
- }
-
if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
ext4_abort(sb, "Abort forced by user");
@@ -5311,9 +5346,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
}
- if (*flags & SB_LAZYTIME)
- sb->s_flags |= SB_LAZYTIME;
-
if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
err = -EROFS;
@@ -5343,8 +5375,13 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
(sbi->s_mount_state & EXT4_VALID_FS))
es->s_state = cpu_to_le16(sbi->s_mount_state);
- if (sbi->s_journal)
+ if (sbi->s_journal) {
+ /*
+ * We let remount-ro finish even if marking fs
+ * as clean failed...
+ */
ext4_mark_recovery_complete(sb, es);
+ }
if (sbi->s_mmp_tsk)
kthread_stop(sbi->s_mmp_tsk);
} else {
@@ -5392,8 +5429,11 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
* been changed by e2fsck since we originally mounted
* the partition.)
*/
- if (sbi->s_journal)
- ext4_clear_journal_err(sb, es);
+ if (sbi->s_journal) {
+ err = ext4_clear_journal_err(sb, es);
+ if (err)
+ goto restore_opts;
+ }
sbi->s_mount_state = le16_to_cpu(es->s_state);
err = ext4_setup_super(sb, es, 0);
@@ -5423,7 +5463,17 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
ext4_register_li_request(sb, first_not_zeroed);
}
- ext4_setup_system_zone(sb);
+ /*
+ * Handle creation of system zone data early because it can fail.
+ * Releasing of existing data is done when we are sure remount will
+ * succeed.
+ */
+ if (test_opt(sb, BLOCK_VALIDITY) && !sbi->system_blks) {
+ err = ext4_setup_system_zone(sb);
+ if (err)
+ goto restore_opts;
+ }
+
if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) {
err = ext4_commit_super(sb, 1);
if (err)
@@ -5444,8 +5494,16 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
}
}
#endif
+ if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks)
+ ext4_release_system_zone(sb);
+
+ /*
+ * Some options can be enabled by ext4 and/or by VFS mount flag
+ * either way we need to make sure it matches in both *flags and
+ * s_flags. Copy those selected flags from s_flags to *flags
+ */
+ *flags = (*flags & ~vfs_flags) | (sb->s_flags & vfs_flags);
- *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
kfree(orig_data);
return 0;
@@ -5459,6 +5517,8 @@ restore_opts:
sbi->s_commit_interval = old_opts.s_commit_interval;
sbi->s_min_batch_time = old_opts.s_min_batch_time;
sbi->s_max_batch_time = old_opts.s_max_batch_time;
+ if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks)
+ ext4_release_system_zone(sb);
#ifdef CONFIG_QUOTA
sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
for (i = 0; i < EXT4_MAXQUOTAS; i++) {
@@ -5690,6 +5750,11 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
/* Quotafile not on the same filesystem? */
if (path->dentry->d_sb != sb)
return -EXDEV;
+
+ /* Quota already enabled for this file? */
+ if (IS_NOQUOTA(d_inode(path->dentry)))
+ return -EBUSY;
+
/* Journaling quota? */
if (EXT4_SB(sb)->s_qf_names[type]) {
/* Quotafile not in fs root? */
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index f73fc90e5daa..0cd9b84bdd9d 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -1480,6 +1480,9 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
if (!ce)
return NULL;
+ WARN_ON_ONCE(ext4_handle_valid(journal_current_handle()) &&
+ !(current->flags & PF_MEMALLOC_NOFS));
+
ea_data = ext4_kvmalloc(value_len, GFP_NOFS);
if (!ea_data) {
mb_cache_entry_put(ea_inode_cache, ce);
@@ -1824,8 +1827,11 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
if (EXT4_I(inode)->i_file_acl) {
/* The inode already has an extended attribute block. */
bs->bh = ext4_sb_bread(sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
- if (IS_ERR(bs->bh))
- return PTR_ERR(bs->bh);
+ if (IS_ERR(bs->bh)) {
+ error = PTR_ERR(bs->bh);
+ bs->bh = NULL;
+ return error;
+ }
ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
atomic_read(&(bs->bh->b_count)),
le32_to_cpu(BHDR(bs->bh)->h_refcount));
@@ -2343,6 +2349,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
error = -ENOSPC;
goto cleanup;
}
+ WARN_ON_ONCE(!(current->flags & PF_MEMALLOC_NOFS));
}
error = ext4_reserve_inode_write(handle, inode, &is.iloc);
@@ -2416,7 +2423,7 @@ retry_inode:
* external inode if possible.
*/
if (ext4_has_feature_ea_inode(inode->i_sb) &&
- !i.in_inode) {
+ i.value_len && !i.in_inode) {
i.in_inode = 1;
goto retry_inode;
}
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 388500eec729..a563de5ccd21 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -218,6 +218,8 @@ int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
blkno * NAT_ENTRY_PER_BLOCK);
break;
case META_SIT:
+ if (unlikely(blkno >= TOTAL_SEGS(sbi)))
+ goto out;
/* get sit block addr */
fio.new_blkaddr = current_sit_addr(sbi,
blkno * SIT_ENTRY_PER_BLOCK);
@@ -1003,8 +1005,12 @@ int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type)
get_pages(sbi, is_dir ?
F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
retry:
- if (unlikely(f2fs_cp_error(sbi)))
+ if (unlikely(f2fs_cp_error(sbi))) {
+ trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir,
+ get_pages(sbi, is_dir ?
+ F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
return -EIO;
+ }
spin_lock(&sbi->inode_lock[type]);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index c81a1f3f0a10..c63f5e32630e 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2490,6 +2490,9 @@ static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
unsigned long align = offset | iov_iter_alignment(iter);
struct block_device *bdev = inode->i_sb->s_bdev;
+ if (iov_iter_rw(iter) == READ && offset >= i_size_read(inode))
+ return 1;
+
if (align & blocksize_mask) {
if (bdev)
blkbits = blksize_bits(bdev_logical_block_size(bdev));
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index ebe19894884b..2cd85ce3e450 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -208,16 +208,15 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
unsigned int max_depth;
unsigned int level;
+ *res_page = NULL;
+
if (f2fs_has_inline_dentry(dir)) {
- *res_page = NULL;
de = f2fs_find_in_inline_dir(dir, fname, res_page);
goto out;
}
- if (npages == 0) {
- *res_page = NULL;
+ if (npages == 0)
goto out;
- }
max_depth = F2FS_I(dir)->i_current_depth;
if (unlikely(max_depth > MAX_DIR_HASH_DEPTH)) {
@@ -229,7 +228,6 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
}
for (level = 0; level < max_depth; level++) {
- *res_page = NULL;
de = find_in_level(dir, level, fname, res_page);
if (de || IS_ERR(*res_page))
break;
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 6b5b685af599..aacd8e11758c 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -2857,6 +2857,8 @@ bool f2fs_empty_dir(struct inode *dir);
static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
{
+ if (fscrypt_is_nokey_name(dentry))
+ return -ENOKEY;
return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name,
inode, inode->i_ino, inode->i_mode);
}
@@ -2921,7 +2923,7 @@ bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
-void f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
+int f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
@@ -3314,7 +3316,7 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page);
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
int f2fs_convert_inline_inode(struct inode *inode);
int f2fs_write_inline_data(struct inode *inode, struct page *page);
-bool f2fs_recover_inline_data(struct inode *inode, struct page *npage);
+int f2fs_recover_inline_data(struct inode *inode, struct page *npage);
struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
struct fscrypt_name *fname, struct page **res_page);
int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 59b5c0b032bb..95330dfdbb1a 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -675,6 +675,10 @@ int f2fs_truncate(struct inode *inode)
return -EIO;
}
+ err = dquot_initialize(inode);
+ if (err)
+ return err;
+
/* we should check inline_data size */
if (!f2fs_may_inline_data(inode)) {
err = f2fs_convert_inline_inode(inode);
@@ -756,7 +760,8 @@ static void __setattr_copy(struct inode *inode, const struct iattr *attr)
if (ia_valid & ATTR_MODE) {
umode_t mode = attr->ia_mode;
- if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
+ if (!in_group_p(inode->i_gid) &&
+ !capable_wrt_inode_uidgid(inode, CAP_FSETID))
mode &= ~S_ISGID;
set_acl_inode(inode, mode);
}
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index c1ba29d10789..6bf78cf63ea2 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -193,6 +193,10 @@ int f2fs_convert_inline_inode(struct inode *inode)
if (!f2fs_has_inline_data(inode))
return 0;
+ err = dquot_initialize(inode);
+ if (err)
+ return err;
+
page = f2fs_grab_cache_page(inode->i_mapping, 0, false);
if (!page)
return -ENOMEM;
@@ -216,7 +220,8 @@ out:
f2fs_put_page(page, 1);
- f2fs_balance_fs(sbi, dn.node_changed);
+ if (!err)
+ f2fs_balance_fs(sbi, dn.node_changed);
return err;
}
@@ -256,7 +261,7 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page)
return 0;
}
-bool f2fs_recover_inline_data(struct inode *inode, struct page *npage)
+int f2fs_recover_inline_data(struct inode *inode, struct page *npage)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode *ri = NULL;
@@ -278,7 +283,8 @@ bool f2fs_recover_inline_data(struct inode *inode, struct page *npage)
ri && (ri->i_inline & F2FS_INLINE_DATA)) {
process_inline:
ipage = f2fs_get_node_page(sbi, inode->i_ino);
- f2fs_bug_on(sbi, IS_ERR(ipage));
+ if (IS_ERR(ipage))
+ return PTR_ERR(ipage);
f2fs_wait_on_page_writeback(ipage, NODE, true);
@@ -291,21 +297,25 @@ process_inline:
set_page_dirty(ipage);
f2fs_put_page(ipage, 1);
- return true;
+ return 1;
}
if (f2fs_has_inline_data(inode)) {
ipage = f2fs_get_node_page(sbi, inode->i_ino);
- f2fs_bug_on(sbi, IS_ERR(ipage));
+ if (IS_ERR(ipage))
+ return PTR_ERR(ipage);
f2fs_truncate_inline_inode(inode, ipage, 0);
clear_inode_flag(inode, FI_INLINE_DATA);
f2fs_put_page(ipage, 1);
} else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
- if (f2fs_truncate_blocks(inode, 0, false))
- return false;
+ int ret;
+
+ ret = f2fs_truncate_blocks(inode, 0, false);
+ if (ret)
+ return ret;
goto process_inline;
}
- return false;
+ return 0;
}
struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 4f0cc0c79d1e..e20a0f9e6845 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -432,19 +432,23 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
nid_t ino = -1;
int err = 0;
unsigned int root_ino = F2FS_ROOT_INO(F2FS_I_SB(dir));
+ struct fscrypt_name fname;
trace_f2fs_lookup_start(dir, dentry, flags);
- err = fscrypt_prepare_lookup(dir, dentry, flags);
- if (err)
- goto out;
-
if (dentry->d_name.len > F2FS_NAME_LEN) {
err = -ENAMETOOLONG;
goto out;
}
- de = f2fs_find_entry(dir, &dentry->d_name, &page);
+ err = fscrypt_prepare_lookup(dir, dentry, &fname);
+ if (err == -ENOENT)
+ goto out_splice;
+ if (err)
+ goto out;
+ de = __f2fs_find_entry(dir, &fname, &page);
+ fscrypt_free_filename(&fname);
+
if (!de) {
if (IS_ERR(page)) {
err = PTR_ERR(page);
@@ -484,8 +488,7 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
}
out_splice:
new = d_splice_alias(inode, dentry);
- if (IS_ERR(new))
- err = PTR_ERR(new);
+ err = PTR_ERR_OR_ZERO(new);
trace_f2fs_lookup_end(dir, dentry, ino, err);
return new;
out_iput:
@@ -769,7 +772,11 @@ static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry,
if (whiteout) {
f2fs_i_links_write(inode, false);
+
+ spin_lock(&inode->i_lock);
inode->i_state |= I_LINKABLE;
+ spin_unlock(&inode->i_lock);
+
*whiteout = inode;
} else {
d_tmpfile(dentry, inode);
@@ -963,7 +970,11 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
err = f2fs_add_link(old_dentry, whiteout);
if (err)
goto put_out_dir;
+
+ spin_lock(&whiteout->i_lock);
whiteout->i_state &= ~I_LINKABLE;
+ spin_unlock(&whiteout->i_lock);
+
iput(whiteout);
}
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index e5d474681471..ff3f97ba1a55 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1559,15 +1559,16 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
if (atomic && !test_opt(sbi, NOBARRIER))
fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
- set_page_writeback(page);
- ClearPageError(page);
-
+ /* should add to global list before clearing PAGECACHE status */
if (f2fs_in_warm_node_list(sbi, page)) {
seq = f2fs_add_fsync_node_entry(sbi, page);
if (seq_id)
*seq_id = seq;
}
+ set_page_writeback(page);
+ ClearPageError(page);
+
fio.old_blkaddr = ni.blk_addr;
f2fs_do_write_node_page(nid, &fio);
set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
@@ -2256,6 +2257,9 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
if (unlikely(nid >= nm_i->max_nid))
nid = 0;
+ if (unlikely(nid % NAT_ENTRY_PER_BLOCK))
+ nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK;
+
/* Enough entries */
if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
return 0;
@@ -2450,7 +2454,7 @@ int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
return nr - nr_shrink;
}
-void f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
+int f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
{
void *src_addr, *dst_addr;
size_t inline_size;
@@ -2458,7 +2462,8 @@ void f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
struct f2fs_inode *ri;
ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
- f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage));
+ if (IS_ERR(ipage))
+ return PTR_ERR(ipage);
ri = F2FS_INODE(page);
if (ri->i_inline & F2FS_INLINE_XATTR) {
@@ -2477,6 +2482,7 @@ void f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
update_inode:
f2fs_update_inode(inode, ipage);
f2fs_put_page(ipage, 1);
+ return 0;
}
int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
@@ -2648,6 +2654,9 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
struct f2fs_nat_entry raw_ne;
nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
+ if (f2fs_check_nid_range(sbi, nid))
+ continue;
+
raw_ne = nat_in_journal(journal, i);
ne = __lookup_nat_cache(nm_i, nid);
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 733f005b85d6..ad0486beee2c 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -471,7 +471,9 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
/* step 1: recover xattr */
if (IS_INODE(page)) {
- f2fs_recover_inline_xattr(inode, page);
+ err = f2fs_recover_inline_xattr(inode, page);
+ if (err)
+ goto out;
} else if (f2fs_has_xattr_block(ofs_of_node(page))) {
err = f2fs_recover_xattr_data(inode, page);
if (!err)
@@ -480,8 +482,12 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
}
/* step 2: recover inline data */
- if (f2fs_recover_inline_data(inode, page))
+ err = f2fs_recover_inline_data(inode, page);
+ if (err) {
+ if (err == 1)
+ err = 0;
goto out;
+ }
/* step 3: recover data indices */
start = f2fs_start_bidx_of_node(ofs_of_node(page), inode);
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 9c2a55ad61bc..1f5db4cbc499 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -91,11 +91,11 @@
#define BLKS_PER_SEC(sbi) \
((sbi)->segs_per_sec * (sbi)->blocks_per_seg)
#define GET_SEC_FROM_SEG(sbi, segno) \
- ((segno) / (sbi)->segs_per_sec)
+ (((segno) == -1) ? -1: (segno) / (sbi)->segs_per_sec)
#define GET_SEG_FROM_SEC(sbi, secno) \
((secno) * (sbi)->segs_per_sec)
#define GET_ZONE_FROM_SEC(sbi, secno) \
- ((secno) / (sbi)->secs_per_zone)
+ (((secno) == -1) ? -1: (secno) / (sbi)->secs_per_zone)
#define GET_ZONE_FROM_SEG(sbi, segno) \
GET_ZONE_FROM_SEC(sbi, GET_SEC_FROM_SEG(sbi, segno))
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index da348cf4ff56..161ce0eb8891 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1004,6 +1004,9 @@ static void f2fs_put_super(struct super_block *sb)
int i;
bool dropped;
+ /* unregister procfs/sysfs entries in advance to avoid race case */
+ f2fs_unregister_sysfs(sbi);
+
f2fs_quota_off_umount(sb);
/* prevent remaining shrinker jobs */
@@ -1067,8 +1070,6 @@ static void f2fs_put_super(struct super_block *sb)
kfree(sbi->ckpt);
- f2fs_unregister_sysfs(sbi);
-
sb->s_fs_info = NULL;
if (sbi->s_chksum_driver)
crypto_free_shash(sbi->s_chksum_driver);
@@ -1154,7 +1155,8 @@ static int f2fs_statfs_project(struct super_block *sb,
limit >>= sb->s_blocksize_bits;
if (limit && buf->f_blocks > limit) {
- curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
+ curblock = (dquot->dq_dqb.dqb_curspace +
+ dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
buf->f_blocks = limit;
buf->f_bfree = buf->f_bavail =
(buf->f_blocks > curblock) ?
@@ -1648,6 +1650,7 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type,
int offset = off & (sb->s_blocksize - 1);
size_t towrite = len;
struct page *page;
+ void *fsdata = NULL;
char *kaddr;
int err = 0;
int tocopy;
@@ -1657,7 +1660,7 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type,
towrite);
retry:
err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
- &page, NULL);
+ &page, &fsdata);
if (unlikely(err)) {
if (err == -ENOMEM) {
congestion_wait(BLK_RW_ASYNC, HZ/50);
@@ -1672,7 +1675,7 @@ retry:
flush_dcache_page(page);
a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
- page, NULL);
+ page, fsdata);
offset = 0;
towrite -= tocopy;
off += tocopy;
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 9a59f49ba405..89b6c33ba6a4 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -717,4 +717,5 @@ void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi)
}
kobject_del(&sbi->s_kobj);
kobject_put(&sbi->s_kobj);
+ wait_for_completion(&sbi->s_kobj_unregister);
}
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 1dae74f7ccca..201e9da1692a 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -538,8 +538,9 @@ out:
ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
{
struct inode *inode = d_inode(dentry);
+ nid_t xnid = F2FS_I(inode)->i_xattr_nid;
struct f2fs_xattr_entry *entry;
- void *base_addr;
+ void *base_addr, *last_base_addr;
int error = 0;
size_t rest = buffer_size;
@@ -549,6 +550,8 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
if (error)
return error;
+ last_base_addr = (void *)base_addr + XATTR_SIZE(xnid, inode);
+
list_for_each_xattr(entry, base_addr) {
const struct xattr_handler *handler =
f2fs_xattr_handler(entry->e_name_index);
@@ -556,6 +559,16 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
size_t prefix_len;
size_t size;
+ if ((void *)(entry) + sizeof(__u32) > last_base_addr ||
+ (void *)XATTR_NEXT_ENTRY(entry) > last_base_addr) {
+ f2fs_msg(dentry->d_sb, KERN_ERR,
+ "inode (%lu) has corrupted xattr",
+ inode->i_ino);
+ set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
+ error = -EFSCORRUPTED;
+ goto cleanup;
+ }
+
if (!handler || (handler->list && !handler->list(dentry)))
continue;
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 70d37a5fd72c..607e1d124062 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -1519,6 +1519,12 @@ static int fat_read_bpb(struct super_block *sb, struct fat_boot_sector *b,
goto out;
}
+ if (bpb->fat_fat_length == 0 && bpb->fat32_length == 0) {
+ if (!silent)
+ fat_msg(sb, KERN_ERR, "bogus number of FAT sectors");
+ goto out;
+ }
+
error = 0;
out:
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 4137d96534a6..e039af1872ab 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -779,9 +779,10 @@ void send_sigio(struct fown_struct *fown, int fd, int band)
{
struct task_struct *p;
enum pid_type type;
+ unsigned long flags;
struct pid *pid;
- read_lock(&fown->lock);
+ read_lock_irqsave(&fown->lock, flags);
type = fown->pid_type;
pid = fown->pid;
@@ -802,7 +803,7 @@ void send_sigio(struct fown_struct *fown, int fd, int band)
read_unlock(&tasklist_lock);
}
out_unlock_fown:
- read_unlock(&fown->lock);
+ read_unlock_irqrestore(&fown->lock, flags);
}
static void send_sigurg_to_task(struct task_struct *p,
@@ -817,9 +818,10 @@ int send_sigurg(struct fown_struct *fown)
struct task_struct *p;
enum pid_type type;
struct pid *pid;
+ unsigned long flags;
int ret = 0;
- read_lock(&fown->lock);
+ read_lock_irqsave(&fown->lock, flags);
type = fown->pid_type;
pid = fown->pid;
@@ -842,7 +844,7 @@ int send_sigurg(struct fown_struct *fown)
read_unlock(&tasklist_lock);
}
out_unlock_fown:
- read_unlock(&fown->lock);
+ read_unlock_irqrestore(&fown->lock, flags);
return ret;
}
diff --git a/fs/file.c b/fs/file.c
index 780d29e58847..3762a3f136fd 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -70,7 +70,7 @@ static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
*/
static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
{
- unsigned int cpy, set;
+ size_t cpy, set;
BUG_ON(nfdt->max_fds < ofdt->max_fds);
diff --git a/fs/filesystems.c b/fs/filesystems.c
index b03f57b1105b..181200daeeba 100644
--- a/fs/filesystems.c
+++ b/fs/filesystems.c
@@ -267,7 +267,9 @@ struct file_system_type *get_fs_type(const char *name)
fs = __get_fs_type(name, len);
if (!fs && (request_module("fs-%.*s", len, name) == 0)) {
fs = __get_fs_type(name, len);
- WARN_ONCE(!fs, "request_module fs-%.*s succeeded, but still no fs?\n", len, name);
+ if (!fs)
+ pr_warn_once("request_module fs-%.*s succeeded, but still no fs?\n",
+ len, name);
}
if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index a89e27367e34..a247cb4b00e2 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -45,7 +45,6 @@ struct wb_completion {
struct wb_writeback_work {
long nr_pages;
struct super_block *sb;
- unsigned long *older_than_this;
enum writeback_sync_modes sync_mode;
unsigned int tagged_writepages:1;
unsigned int for_kupdate:1;
@@ -160,7 +159,9 @@ static void inode_io_list_del_locked(struct inode *inode,
struct bdi_writeback *wb)
{
assert_spin_locked(&wb->list_lock);
+ assert_spin_locked(&inode->i_lock);
+ inode->i_state &= ~I_SYNC_QUEUED;
list_del_init(&inode->i_io_list);
wb_io_lists_depopulated(wb);
}
@@ -269,6 +270,7 @@ void __inode_attach_wb(struct inode *inode, struct page *page)
if (unlikely(cmpxchg(&inode->i_wb, NULL, wb)))
wb_put(wb);
}
+EXPORT_SYMBOL_GPL(__inode_attach_wb);
/**
* locked_inode_to_wb_and_lock_list - determine a locked inode's wb and lock it
@@ -1041,7 +1043,9 @@ void inode_io_list_del(struct inode *inode)
struct bdi_writeback *wb;
wb = inode_to_wb_and_lock_list(inode);
+ spin_lock(&inode->i_lock);
inode_io_list_del_locked(inode, wb);
+ spin_unlock(&inode->i_lock);
spin_unlock(&wb->list_lock);
}
@@ -1090,8 +1094,10 @@ void sb_clear_inode_writeback(struct inode *inode)
* the case then the inode must have been redirtied while it was being written
* out and we don't reset its dirtied_when.
*/
-static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
+static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb)
{
+ assert_spin_locked(&inode->i_lock);
+
if (!list_empty(&wb->b_dirty)) {
struct inode *tail;
@@ -1100,6 +1106,14 @@ static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
inode->dirtied_when = jiffies;
}
inode_io_list_move_locked(inode, wb, &wb->b_dirty);
+ inode->i_state &= ~I_SYNC_QUEUED;
+}
+
+static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
+{
+ spin_lock(&inode->i_lock);
+ redirty_tail_locked(inode, wb);
+ spin_unlock(&inode->i_lock);
}
/*
@@ -1138,16 +1152,13 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t)
#define EXPIRE_DIRTY_ATIME 0x0001
/*
- * Move expired (dirtied before work->older_than_this) dirty inodes from
+ * Move expired (dirtied before dirtied_before) dirty inodes from
* @delaying_queue to @dispatch_queue.
*/
static int move_expired_inodes(struct list_head *delaying_queue,
struct list_head *dispatch_queue,
- int flags,
- struct wb_writeback_work *work)
+ unsigned long dirtied_before)
{
- unsigned long *older_than_this = NULL;
- unsigned long expire_time;
LIST_HEAD(tmp);
struct list_head *pos, *node;
struct super_block *sb = NULL;
@@ -1155,21 +1166,15 @@ static int move_expired_inodes(struct list_head *delaying_queue,
int do_sb_sort = 0;
int moved = 0;
- if ((flags & EXPIRE_DIRTY_ATIME) == 0)
- older_than_this = work->older_than_this;
- else if (!work->for_sync) {
- expire_time = jiffies - (dirtytime_expire_interval * HZ);
- older_than_this = &expire_time;
- }
while (!list_empty(delaying_queue)) {
inode = wb_inode(delaying_queue->prev);
- if (older_than_this &&
- inode_dirtied_after(inode, *older_than_this))
+ if (inode_dirtied_after(inode, dirtied_before))
break;
list_move(&inode->i_io_list, &tmp);
moved++;
- if (flags & EXPIRE_DIRTY_ATIME)
- set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state);
+ spin_lock(&inode->i_lock);
+ inode->i_state |= I_SYNC_QUEUED;
+ spin_unlock(&inode->i_lock);
if (sb_is_blkdev_sb(inode->i_sb))
continue;
if (sb && sb != inode->i_sb)
@@ -1207,18 +1212,22 @@ out:
* |
* +--> dequeue for IO
*/
-static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
+static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work,
+ unsigned long dirtied_before)
{
int moved;
+ unsigned long time_expire_jif = dirtied_before;
assert_spin_locked(&wb->list_lock);
list_splice_init(&wb->b_more_io, &wb->b_io);
- moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work);
+ moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, dirtied_before);
+ if (!work->for_sync)
+ time_expire_jif = jiffies - dirtytime_expire_interval * HZ;
moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io,
- EXPIRE_DIRTY_ATIME, work);
+ time_expire_jif);
if (moved)
wb_io_lists_populated(wb);
- trace_writeback_queue_io(wb, work, moved);
+ trace_writeback_queue_io(wb, work, dirtied_before, moved);
}
static int write_inode(struct inode *inode, struct writeback_control *wbc)
@@ -1312,7 +1321,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
* writeback is not making progress due to locked
* buffers. Skip this inode for now.
*/
- redirty_tail(inode, wb);
+ redirty_tail_locked(inode, wb);
return;
}
@@ -1332,7 +1341,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
* retrying writeback of the dirty page/inode
* that cannot be performed immediately.
*/
- redirty_tail(inode, wb);
+ redirty_tail_locked(inode, wb);
}
} else if (inode->i_state & I_DIRTY) {
/*
@@ -1340,10 +1349,11 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
* such as delayed allocation during submission or metadata
* updates after data IO completion.
*/
- redirty_tail(inode, wb);
+ redirty_tail_locked(inode, wb);
} else if (inode->i_state & I_DIRTY_TIME) {
inode->dirtied_when = jiffies;
inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
+ inode->i_state &= ~I_SYNC_QUEUED;
} else {
/* The inode is clean. Remove from writeback lists. */
inode_io_list_del_locked(inode, wb);
@@ -1383,25 +1393,25 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
}
/*
+ * If the inode has dirty timestamps and we need to write them, call
+ * mark_inode_dirty_sync() to notify the filesystem about it and to
+ * change I_DIRTY_TIME into I_DIRTY_SYNC.
+ */
+ if ((inode->i_state & I_DIRTY_TIME) &&
+ (wbc->sync_mode == WB_SYNC_ALL || wbc->for_sync ||
+ time_after(jiffies, inode->dirtied_time_when +
+ dirtytime_expire_interval * HZ))) {
+ trace_writeback_lazytime(inode);
+ mark_inode_dirty_sync(inode);
+ }
+
+ /*
* Some filesystems may redirty the inode during the writeback
* due to delalloc, clear dirty metadata flags right before
* write_inode()
*/
spin_lock(&inode->i_lock);
-
dirty = inode->i_state & I_DIRTY;
- if (inode->i_state & I_DIRTY_TIME) {
- if ((dirty & I_DIRTY_INODE) ||
- wbc->sync_mode == WB_SYNC_ALL ||
- unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) ||
- unlikely(time_after(jiffies,
- (inode->dirtied_time_when +
- dirtytime_expire_interval * HZ)))) {
- dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED;
- trace_writeback_lazytime(inode);
- }
- } else
- inode->i_state &= ~I_DIRTY_TIME_EXPIRED;
inode->i_state &= ~dirty;
/*
@@ -1422,8 +1432,6 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
spin_unlock(&inode->i_lock);
- if (dirty & I_DIRTY_TIME)
- mark_inode_dirty_sync(inode);
/* Don't write the inode if only I_DIRTY_PAGES was set */
if (dirty & ~I_DIRTY_PAGES) {
int err = write_inode(inode, wbc);
@@ -1587,8 +1595,8 @@ static long writeback_sb_inodes(struct super_block *sb,
*/
spin_lock(&inode->i_lock);
if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
+ redirty_tail_locked(inode, wb);
spin_unlock(&inode->i_lock);
- redirty_tail(inode, wb);
continue;
}
if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
@@ -1729,7 +1737,7 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
blk_start_plug(&plug);
spin_lock(&wb->list_lock);
if (list_empty(&wb->b_io))
- queue_io(wb, &work);
+ queue_io(wb, &work, jiffies);
__writeback_inodes_wb(wb, &work);
spin_unlock(&wb->list_lock);
blk_finish_plug(&plug);
@@ -1749,7 +1757,7 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
* takes longer than a dirty_writeback_interval interval, then leave a
* one-second gap.
*
- * older_than_this takes precedence over nr_to_write. So we'll only write back
+ * dirtied_before takes precedence over nr_to_write. So we'll only write back
* all dirty pages if they are all attached to "old" mappings.
*/
static long wb_writeback(struct bdi_writeback *wb,
@@ -1757,14 +1765,11 @@ static long wb_writeback(struct bdi_writeback *wb,
{
unsigned long wb_start = jiffies;
long nr_pages = work->nr_pages;
- unsigned long oldest_jif;
+ unsigned long dirtied_before = jiffies;
struct inode *inode;
long progress;
struct blk_plug plug;
- oldest_jif = jiffies;
- work->older_than_this = &oldest_jif;
-
blk_start_plug(&plug);
spin_lock(&wb->list_lock);
for (;;) {
@@ -1798,14 +1803,14 @@ static long wb_writeback(struct bdi_writeback *wb,
* safe.
*/
if (work->for_kupdate) {
- oldest_jif = jiffies -
+ dirtied_before = jiffies -
msecs_to_jiffies(dirty_expire_interval * 10);
} else if (work->for_background)
- oldest_jif = jiffies;
+ dirtied_before = jiffies;
trace_writeback_start(wb, work);
if (list_empty(&wb->b_io))
- queue_io(wb, work);
+ queue_io(wb, work, dirtied_before);
if (work->sb)
progress = writeback_sb_inodes(work->sb, wb, work);
else
@@ -1981,7 +1986,7 @@ void wb_workfn(struct work_struct *work)
struct bdi_writeback, dwork);
long pages_written;
- set_worker_desc("flush-%s", dev_name(wb->bdi->dev));
+ set_worker_desc("flush-%s", bdi_dev_name(wb->bdi));
current->flags |= PF_SWAPWRITE;
if (likely(!current_is_workqueue_rescuer() ||
@@ -2207,11 +2212,12 @@ void __mark_inode_dirty(struct inode *inode, int flags)
inode->i_state |= flags;
/*
- * If the inode is being synced, just update its dirty state.
- * The unlocker will place the inode on the appropriate
- * superblock list, based upon its state.
+ * If the inode is queued for writeback by flush worker, just
+ * update its dirty state. Once the flush worker is done with
+ * the inode it will place it on the appropriate superblock
+ * list, based upon its state.
*/
- if (inode->i_state & I_SYNC)
+ if (inode->i_state & I_SYNC_QUEUED)
goto out_unlock_inode;
/*
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index f057c213c453..e10e2b62ccf4 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -621,6 +621,8 @@ static int __init cuse_init(void)
cuse_channel_fops.owner = THIS_MODULE;
cuse_channel_fops.open = cuse_channel_open;
cuse_channel_fops.release = cuse_channel_release;
+ /* CUSE is not prepared for FUSE_DEV_IOC_CLONE */
+ cuse_channel_fops.unlocked_ioctl = NULL;
cuse_class = class_create(THIS_MODULE, "cuse");
if (IS_ERR(cuse_class))
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 6d39143cfa09..1ff5a6b21db0 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -831,14 +831,14 @@ static int fuse_check_page(struct page *page)
{
if (page_mapcount(page) ||
page->mapping != NULL ||
- page_count(page) != 1 ||
(page->flags & PAGE_FLAGS_CHECK_AT_PREP &
~(1 << PG_locked |
1 << PG_referenced |
1 << PG_uptodate |
1 << PG_lru |
1 << PG_active |
- 1 << PG_reclaim))) {
+ 1 << PG_reclaim |
+ 1 << PG_waiters))) {
printk(KERN_WARNING "fuse: trying to steal weird page\n");
printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
return 1;
@@ -853,15 +853,16 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
struct page *newpage;
struct pipe_buffer *buf = cs->pipebufs;
+ get_page(oldpage);
err = unlock_request(cs->req);
if (err)
- return err;
+ goto out_put_old;
fuse_copy_finish(cs);
err = pipe_buf_confirm(cs->pipe, buf);
if (err)
- return err;
+ goto out_put_old;
BUG_ON(!cs->nr_segs);
cs->currbuf = buf;
@@ -901,7 +902,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
if (err) {
unlock_page(newpage);
- return err;
+ goto out_put_old;
}
get_page(newpage);
@@ -920,14 +921,19 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
if (err) {
unlock_page(newpage);
put_page(newpage);
- return err;
+ goto out_put_old;
}
unlock_page(oldpage);
+ /* Drop ref for ap->pages[] array */
put_page(oldpage);
cs->len = 0;
- return 0;
+ err = 0;
+out_put_old:
+ /* Drop ref obtained in this function */
+ put_page(oldpage);
+ return err;
out_fallback_unlock:
unlock_page(newpage);
@@ -936,10 +942,10 @@ out_fallback:
cs->offset = buf->offset;
err = lock_request(cs->req);
- if (err)
- return err;
+ if (!err)
+ err = 1;
- return 1;
+ goto out_put_old;
}
static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
@@ -951,14 +957,16 @@ static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
if (cs->nr_segs == cs->pipe->buffers)
return -EIO;
+ get_page(page);
err = unlock_request(cs->req);
- if (err)
+ if (err) {
+ put_page(page);
return err;
+ }
fuse_copy_finish(cs);
buf = cs->pipebufs;
- get_page(page);
buf->page = page;
buf->offset = offset;
buf->len = count;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 3964325432ae..6a3d89672ff7 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -18,6 +18,7 @@
#include <linux/swap.h>
#include <linux/falloc.h>
#include <linux/uio.h>
+#include <linux/fs.h>
static const struct file_operations fuse_direct_io_file_operations;
@@ -2535,7 +2536,16 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
struct iovec *iov = iov_page;
iov->iov_base = (void __user *)arg;
- iov->iov_len = _IOC_SIZE(cmd);
+
+ switch (cmd) {
+ case FS_IOC_GETFLAGS:
+ case FS_IOC_SETFLAGS:
+ iov->iov_len = sizeof(int);
+ break;
+ default:
+ iov->iov_len = _IOC_SIZE(cmd);
+ break;
+ }
if (_IOC_DIR(cmd) & _IOC_WRITE) {
in_iov = iov;
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 096b47972139..43f53020553b 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -530,10 +530,12 @@ lower_metapath:
/* Advance in metadata tree. */
(mp->mp_list[hgt])++;
- if (mp->mp_list[hgt] >= sdp->sd_inptrs) {
- if (!hgt)
+ if (hgt) {
+ if (mp->mp_list[hgt] >= sdp->sd_inptrs)
+ goto lower_metapath;
+ } else {
+ if (mp->mp_list[hgt] >= sdp->sd_diptrs)
break;
- goto lower_metapath;
}
fill_up_metapath:
@@ -879,10 +881,9 @@ static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
ret = -ENOENT;
goto unlock;
} else {
- /* report a hole */
iomap->offset = pos;
iomap->length = length;
- goto do_alloc;
+ goto hole_found;
}
}
iomap->length = size;
@@ -936,8 +937,6 @@ unlock:
return ret;
do_alloc:
- iomap->addr = IOMAP_NULL_ADDR;
- iomap->type = IOMAP_HOLE;
if (flags & IOMAP_REPORT) {
if (pos >= size)
ret = -ENOENT;
@@ -959,6 +958,9 @@ do_alloc:
if (pos < size && height == ip->i_height)
ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
}
+hole_found:
+ iomap->addr = IOMAP_NULL_ADDR;
+ iomap->type = IOMAP_HOLE;
goto out;
}
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index ccdd8c821abd..c20d71d86812 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -870,7 +870,8 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
out_free:
kfree(gl->gl_lksb.sb_lvbptr);
kmem_cache_free(cachep, gl);
- atomic_dec(&sdp->sd_glock_disposal);
+ if (atomic_dec_and_test(&sdp->sd_glock_disposal))
+ wake_up(&sdp->sd_glock_wait);
out:
return ret;
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index c63bee9adb6a..20f08f4391c9 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -89,6 +89,8 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
memset(&tr, 0, sizeof(tr));
INIT_LIST_HEAD(&tr.tr_buf);
INIT_LIST_HEAD(&tr.tr_databuf);
+ INIT_LIST_HEAD(&tr.tr_ail1_list);
+ INIT_LIST_HEAD(&tr.tr_ail2_list);
tr.tr_revokes = atomic_read(&gl->gl_ail_count);
if (!tr.tr_revokes)
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index d968b5c5df21..a52b8b0dceeb 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -715,7 +715,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
error = gfs2_trans_begin(sdp, blocks, 0);
if (error)
- goto fail_gunlock2;
+ goto fail_free_inode;
if (blocks > 1) {
ip->i_eattr = ip->i_no_addr + 1;
@@ -726,7 +726,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
if (error)
- goto fail_gunlock2;
+ goto fail_free_inode;
BUG_ON(test_and_set_bit(GLF_INODE_CREATING, &io_gl->gl_flags));
@@ -735,7 +735,6 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
goto fail_gunlock2;
glock_set_object(ip->i_iopen_gh.gh_gl, ip);
- gfs2_glock_put(io_gl);
gfs2_set_iop(inode);
insert_inode_hash(inode);
@@ -768,6 +767,8 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
mark_inode_dirty(inode);
d_instantiate(dentry, inode);
+ /* After instantiate, errors should result in evict which will destroy
+ * both inode and iopen glocks properly. */
if (file) {
file->f_mode |= FMODE_CREATED;
error = finish_open(file, dentry, gfs2_open_common);
@@ -775,15 +776,15 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
gfs2_glock_dq_uninit(ghs);
gfs2_glock_dq_uninit(ghs + 1);
clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
+ gfs2_glock_put(io_gl);
return error;
fail_gunlock3:
glock_clear_object(io_gl, ip);
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
- gfs2_glock_put(io_gl);
fail_gunlock2:
- if (io_gl)
- clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
+ clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
+ gfs2_glock_put(io_gl);
fail_free_inode:
if (ip->i_gl) {
glock_clear_object(ip->i_gl, ip);
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 62edf8f5615f..56dddc1f8ddd 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -283,7 +283,6 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
- int lvb_needs_unlock = 0;
int error;
if (gl->gl_lksb.sb_lkid == 0) {
@@ -296,13 +295,10 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
gfs2_update_request_times(gl);
- /* don't want to skip dlm_unlock writing the lvb when lock is ex */
-
- if (gl->gl_lksb.sb_lvbptr && (gl->gl_state == LM_ST_EXCLUSIVE))
- lvb_needs_unlock = 1;
+ /* don't want to skip dlm_unlock writing the lvb when lock has one */
if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
- !lvb_needs_unlock) {
+ !gl->gl_lksb.sb_lvbptr) {
gfs2_glock_free(gl);
return;
}
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index d3f0612e3347..74c1fe9c4a04 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -806,8 +806,6 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
tr = sdp->sd_log_tr;
if (tr) {
sdp->sd_log_tr = NULL;
- INIT_LIST_HEAD(&tr->tr_ail1_list);
- INIT_LIST_HEAD(&tr->tr_ail2_list);
tr->tr_first = sdp->sd_log_flush_head;
if (unlikely (state == SFS_FROZEN))
gfs2_assert_withdraw(sdp, !tr->tr_num_buf_new && !tr->tr_num_databuf_new);
@@ -877,8 +875,10 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
* @new: New transaction to be merged
*/
-static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new)
+static void gfs2_merge_trans(struct gfs2_sbd *sdp, struct gfs2_trans *new)
{
+ struct gfs2_trans *old = sdp->sd_log_tr;
+
WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags));
old->tr_num_buf_new += new->tr_num_buf_new;
@@ -890,6 +890,11 @@ static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new)
list_splice_tail_init(&new->tr_databuf, &old->tr_databuf);
list_splice_tail_init(&new->tr_buf, &old->tr_buf);
+
+ spin_lock(&sdp->sd_ail_lock);
+ list_splice_tail_init(&new->tr_ail1_list, &old->tr_ail1_list);
+ list_splice_tail_init(&new->tr_ail2_list, &old->tr_ail2_list);
+ spin_unlock(&sdp->sd_ail_lock);
}
static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
@@ -901,7 +906,7 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
gfs2_log_lock(sdp);
if (sdp->sd_log_tr) {
- gfs2_merge_trans(sdp->sd_log_tr, tr);
+ gfs2_merge_trans(sdp, tr);
} else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) {
gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags));
sdp->sd_log_tr = tr;
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index b041cb8ae383..17001f4e9f84 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -161,15 +161,19 @@ static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent)
return -EINVAL;
}
- /* If format numbers match exactly, we're done. */
-
- if (sb->sb_fs_format == GFS2_FORMAT_FS &&
- sb->sb_multihost_format == GFS2_FORMAT_MULTI)
- return 0;
+ if (sb->sb_fs_format != GFS2_FORMAT_FS ||
+ sb->sb_multihost_format != GFS2_FORMAT_MULTI) {
+ fs_warn(sdp, "Unknown on-disk format, unable to mount\n");
+ return -EINVAL;
+ }
- fs_warn(sdp, "Unknown on-disk format, unable to mount\n");
+ if (sb->sb_bsize < 512 || sb->sb_bsize > PAGE_SIZE ||
+ (sb->sb_bsize & (sb->sb_bsize - 1))) {
+ pr_warn("Invalid superblock size\n");
+ return -EINVAL;
+ }
- return -EINVAL;
+ return 0;
}
static void end_bio_io_page(struct bio *bio)
@@ -903,7 +907,7 @@ fail:
}
static const match_table_t nolock_tokens = {
- { Opt_jid, "jid=%d\n", },
+ { Opt_jid, "jid=%d", },
{ Opt_err, NULL },
};
@@ -1160,7 +1164,17 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent
goto fail_per_node;
}
- if (!sb_rdonly(sb)) {
+ if (sb_rdonly(sb)) {
+ struct gfs2_holder freeze_gh;
+
+ error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
+ GL_EXACT, &freeze_gh);
+ if (error) {
+ fs_err(sdp, "can't make FS RO: %d\n", error);
+ goto fail_per_node;
+ }
+ gfs2_glock_dq_uninit(&freeze_gh);
+ } else {
error = gfs2_make_fs_rw(sdp);
if (error) {
fs_err(sdp, "can't make FS RW: %d\n", error);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 0efae7a0ee80..dd0f9bc13164 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -1043,8 +1043,7 @@ int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
u32 x;
int error = 0;
- if (capable(CAP_SYS_RESOURCE) ||
- sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
+ if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
return 0;
error = gfs2_quota_hold(ip, uid, gid);
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index 836f29480be6..e3a6e2404d11 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -47,7 +47,8 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip,
int ret;
ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
- if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
+ if (capable(CAP_SYS_RESOURCE) ||
+ sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
return 0;
ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
if (ret)
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index c94c4ac1ae78..054fdfd4fb8b 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -739,9 +739,9 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
}
gfs2_free_clones(rgd);
+ return_all_reservations(rgd);
kfree(rgd->rd_bits);
rgd->rd_bits = NULL;
- return_all_reservations(rgd);
kmem_cache_free(gfs2_rgrpd_cachep, rgd);
}
}
@@ -1009,6 +1009,10 @@ static int gfs2_ri_update(struct gfs2_inode *ip)
if (error < 0)
return error;
+ if (RB_EMPTY_ROOT(&sdp->sd_rindex_tree)) {
+ fs_err(sdp, "no resource groups found in the file system.\n");
+ return -ENOENT;
+ }
set_rgrp_preferences(sdp);
sdp->sd_rindex_uptodate = 1;
@@ -1387,6 +1391,9 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
+ return -EROFS;
+
if (!blk_queue_discard(q))
return -EOPNOTSUPP;
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index a971862b186e..3cc2237e5896 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -934,6 +934,7 @@ restart:
gfs2_jindex_free(sdp);
/* Take apart glock structures and buffer lists */
gfs2_gl_hash_clear(sdp);
+ truncate_inode_pages_final(&sdp->sd_aspace);
gfs2_delete_debugfs_file(sdp);
/* Unmount the locking protocol */
gfs2_lm_unmount(sdp);
@@ -999,11 +1000,13 @@ void gfs2_freeze_func(struct work_struct *work)
static int gfs2_freeze(struct super_block *sb)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
- int error = 0;
+ int error;
mutex_lock(&sdp->sd_freeze_mutex);
- if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN)
+ if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN) {
+ error = -EBUSY;
goto out;
+ }
if (test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
error = -EINVAL;
@@ -1045,10 +1048,10 @@ static int gfs2_unfreeze(struct super_block *sb)
struct gfs2_sbd *sdp = sb->s_fs_info;
mutex_lock(&sdp->sd_freeze_mutex);
- if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
+ if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
!gfs2_holder_initialized(&sdp->sd_freeze_gh)) {
mutex_unlock(&sdp->sd_freeze_mutex);
- return 0;
+ return -EINVAL;
}
gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index 812b5d5978b2..9313f7904e34 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -56,6 +56,8 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
sizeof(u64));
INIT_LIST_HEAD(&tr->tr_databuf);
INIT_LIST_HEAD(&tr->tr_buf);
+ INIT_LIST_HEAD(&tr->tr_ail1_list);
+ INIT_LIST_HEAD(&tr->tr_ail2_list);
sb_start_intwrite(sdp->sd_vfs);
diff --git a/fs/hfsplus/attributes.c b/fs/hfsplus/attributes.c
index e6d554476db4..eeebe80c6be4 100644
--- a/fs/hfsplus/attributes.c
+++ b/fs/hfsplus/attributes.c
@@ -292,6 +292,10 @@ static int __hfsplus_delete_attr(struct inode *inode, u32 cnid,
return -ENOENT;
}
+ /* Avoid btree corruption */
+ hfs_bnode_read(fd->bnode, fd->search_key,
+ fd->keyoffset, fd->keylength);
+
err = hfs_brec_remove(fd);
if (err)
return err;
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index a930ddd15681..7054a542689f 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -598,13 +598,15 @@ void hfsplus_file_truncate(struct inode *inode)
res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
if (res)
break;
- hfs_brec_remove(&fd);
- mutex_unlock(&fd.tree->tree_lock);
start = hip->cached_start;
+ if (blk_cnt <= start)
+ hfs_brec_remove(&fd);
+ mutex_unlock(&fd.tree->tree_lock);
hfsplus_free_extents(sb, hip->cached_extents,
alloc_cnt - start, alloc_cnt - blk_cnt);
hfsplus_dump_extent(hip->cached_extents);
+ mutex_lock(&fd.tree->tree_lock);
if (blk_cnt > start) {
hip->extent_state |= HFSPLUS_EXT_DIRTY;
break;
@@ -612,7 +614,6 @@ void hfsplus_file_truncate(struct inode *inode)
alloc_cnt = start;
hip->cached_start = hip->cached_blocks = 0;
hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
- mutex_lock(&fd.tree->tree_lock);
}
hfs_find_exit(&fd);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 7a24f91af29e..b313627b1801 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -426,7 +426,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
u32 hash;
index = page->index;
- hash = hugetlb_fault_mutex_hash(h, mapping, index, 0);
+ hash = hugetlb_fault_mutex_hash(h, mapping, index);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
/*
@@ -623,7 +623,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
addr = index * hpage_size;
/* mutex taken here, fault path and hole punch */
- hash = hugetlb_fault_mutex_hash(h, mapping, index, addr);
+ hash = hugetlb_fault_mutex_hash(h, mapping, index);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
/* See if already present in mapping to avoid alloc/free */
@@ -654,9 +654,10 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+ set_page_huge_active(page);
/*
* unlock_page because locked by add_to_page_cache()
- * page_put due to reference from alloc_huge_page()
+ * put_page() due to reference from alloc_huge_page()
*/
unlock_page(page);
put_page(page);
diff --git a/fs/iomap.c b/fs/iomap.c
index 03edf62633dc..ac7b2152c3ad 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -116,6 +116,7 @@ iomap_page_create(struct inode *inode, struct page *page)
iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
atomic_set(&iop->read_count, 0);
atomic_set(&iop->write_count, 0);
+ spin_lock_init(&iop->uptodate_lock);
bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
/*
@@ -204,25 +205,38 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
}
static void
-iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
+iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len)
{
struct iomap_page *iop = to_iomap_page(page);
struct inode *inode = page->mapping->host;
unsigned first = off >> inode->i_blkbits;
unsigned last = (off + len - 1) >> inode->i_blkbits;
- unsigned int i;
bool uptodate = true;
+ unsigned long flags;
+ unsigned int i;
- if (iop) {
- for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) {
- if (i >= first && i <= last)
- set_bit(i, iop->uptodate);
- else if (!test_bit(i, iop->uptodate))
- uptodate = false;
- }
+ spin_lock_irqsave(&iop->uptodate_lock, flags);
+ for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) {
+ if (i >= first && i <= last)
+ set_bit(i, iop->uptodate);
+ else if (!test_bit(i, iop->uptodate))
+ uptodate = false;
}
- if (uptodate && !PageError(page))
+ if (uptodate)
+ SetPageUptodate(page);
+ spin_unlock_irqrestore(&iop->uptodate_lock, flags);
+}
+
+static void
+iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
+{
+ if (PageError(page))
+ return;
+
+ if (page_has_private(page))
+ iomap_iop_set_range_uptodate(page, off, len);
+ else
SetPageUptodate(page);
}
diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c
index 947ce22f5b3c..55df4d80793b 100644
--- a/fs/isofs/dir.c
+++ b/fs/isofs/dir.c
@@ -152,6 +152,7 @@ static int do_isofs_readdir(struct inode *inode, struct file *file,
printk(KERN_NOTICE "iso9660: Corrupted directory entry"
" in block %lu of inode %lu\n", block,
inode->i_ino);
+ brelse(bh);
return -EIO;
}
diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c
index cac468f04820..558e7c51ce0d 100644
--- a/fs/isofs/namei.c
+++ b/fs/isofs/namei.c
@@ -102,6 +102,7 @@ isofs_find_entry(struct inode *dir, struct dentry *dentry,
printk(KERN_NOTICE "iso9660: Corrupted directory entry"
" in block %lu of inode %lu\n", block,
dir->i_ino);
+ brelse(bh);
return 0;
}
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 4200a6fe9599..97760cb9bcd7 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -992,9 +992,10 @@ restart_loop:
* journalled data) we need to unmap buffer and clear
* more bits. We also need to be careful about the check
* because the data page mapping can get cleared under
- * out hands, which alse need not to clear more bits
- * because the page and buffers will be freed and can
- * never be reused once we are done with them.
+ * our hands. Note that if mapping == NULL, we don't
+ * need to make buffer unmapped because the page is
+ * already detached from the mapping and buffers cannot
+ * get reused.
*/
mapping = READ_ONCE(bh->b_page->mapping);
if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) {
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index a15a22d20909..8a50722bca29 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1370,8 +1370,10 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
int ret;
/* Buffer got discarded which means block device got invalidated */
- if (!buffer_mapped(bh))
+ if (!buffer_mapped(bh)) {
+ unlock_buffer(bh);
return -EIO;
+ }
trace_jbd2_write_superblock(journal, write_flags);
if (!(journal->j_flags & JBD2_BARRIER))
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 43693b679710..8c305593fb51 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1915,6 +1915,9 @@ static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
*/
static void __jbd2_journal_unfile_buffer(struct journal_head *jh)
{
+ J_ASSERT_JH(jh, jh->b_transaction != NULL);
+ J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
+
__jbd2_journal_temp_unlink_buffer(jh);
jh->b_transaction = NULL;
jbd2_journal_put_journal_head(jh);
@@ -2006,6 +2009,7 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal,
{
struct buffer_head *head;
struct buffer_head *bh;
+ bool has_write_io_error = false;
int ret = 0;
J_ASSERT(PageLocked(page));
@@ -2030,11 +2034,26 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal,
jbd_unlock_bh_state(bh);
if (buffer_jbd(bh))
goto busy;
+
+ /*
+ * If we free a metadata buffer which has been failed to
+ * write out, the jbd2 checkpoint procedure will not detect
+ * this failure and may lead to filesystem inconsistency
+ * after cleanup journal tail.
+ */
+ if (buffer_write_io_error(bh)) {
+ pr_err("JBD2: Error while async write back metadata bh %llu.",
+ (unsigned long long)bh->b_blocknr);
+ has_write_io_error = true;
+ }
} while ((bh = bh->b_this_page) != head);
ret = try_to_free_buffers(page);
busy:
+ if (has_write_io_error)
+ jbd2_journal_abort(journal, -EIO);
+
return ret;
}
@@ -2462,6 +2481,13 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh)
was_dirty = test_clear_buffer_jbddirty(bh);
__jbd2_journal_temp_unlink_buffer(jh);
+
+ /*
+ * b_transaction must be set, otherwise the new b_transaction won't
+ * be holding jh reference
+ */
+ J_ASSERT_JH(jh, jh->b_transaction != NULL);
+
/*
* We set b_transaction here because b_next_transaction will inherit
* our jh reference and thus __jbd2_journal_file_buffer() must not
diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
index 406d9cc84ba8..79e771ab624f 100644
--- a/fs/jffs2/compr_rtime.c
+++ b/fs/jffs2/compr_rtime.c
@@ -37,6 +37,9 @@ static int jffs2_rtime_compress(unsigned char *data_in,
int outpos = 0;
int pos=0;
+ if (*dstlen <= 3)
+ return -1;
+
memset(positions,0,sizeof(positions));
while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index f20cff1194bb..776493713153 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -590,10 +590,14 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
int ret;
uint32_t now = JFFS2_NOW();
+ mutex_lock(&f->sem);
for (fd = f->dents ; fd; fd = fd->next) {
- if (fd->ino)
+ if (fd->ino) {
+ mutex_unlock(&f->sem);
return -ENOTEMPTY;
+ }
}
+ mutex_unlock(&f->sem);
ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name,
dentry->d_name.len, f, now);
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index bccfc40b3a74..d19483fa1fe8 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -672,6 +672,22 @@ static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_r
jffs2_free_full_dirent(fd);
return -EIO;
}
+
+#ifdef CONFIG_JFFS2_SUMMARY
+ /*
+ * we use CONFIG_JFFS2_SUMMARY because without it, we
+ * have checked it while mounting
+ */
+ crc = crc32(0, fd->name, rd->nsize);
+ if (unlikely(crc != je32_to_cpu(rd->name_crc))) {
+ JFFS2_NOTICE("name CRC failed on dirent node at"
+ "%#08x: read %#08x,calculated %#08x\n",
+ ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
+ jffs2_mark_node_obsolete(c, ref);
+ jffs2_free_full_dirent(fd);
+ return 0;
+ }
+#endif
}
fd->nhash = full_name_hash(NULL, fd->name, rd->nsize);
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index 90431dd613b8..08813789fcf0 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -1075,7 +1075,7 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
memcpy(&fd->name, rd->name, checkedlen);
fd->name[checkedlen] = 0;
- crc = crc32(0, fd->name, rd->nsize);
+ crc = crc32(0, fd->name, checkedlen);
if (crc != je32_to_cpu(rd->name_crc)) {
pr_notice("%s(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
__func__, ofs, je32_to_cpu(rd->name_crc), crc);
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c
index be7c8a6a5748..4fe64519870f 100644
--- a/fs/jffs2/summary.c
+++ b/fs/jffs2/summary.c
@@ -783,6 +783,8 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock
dbg_summary("Writing unknown RWCOMPAT_COPY node type %x\n",
je16_to_cpu(temp->u.nodetype));
jffs2_sum_disable_collecting(c->summary);
+ /* The above call removes the list, nothing more to do */
+ goto bail_rwcompat;
} else {
BUG(); /* unknown node in summary information */
}
@@ -794,6 +796,7 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock
c->summary->sum_num--;
}
+ bail_rwcompat:
jffs2_sum_reset_collected(c->summary);
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index 49263e220dbc..687b07b9b4f6 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -1669,7 +1669,7 @@ s64 dbDiscardAG(struct inode *ip, int agno, s64 minlen)
} else if (rc == -ENOSPC) {
/* search for next smaller log2 block */
l2nb = BLKSTOL2(nblocks) - 1;
- nblocks = 1 << l2nb;
+ nblocks = 1LL << l2nb;
} else {
/* Trim any already allocated blocks */
jfs_error(bmp->db_ipbmap->i_sb, "-EIO\n");
diff --git a/fs/jfs/jfs_dmap.h b/fs/jfs/jfs_dmap.h
index 562b9a7e4311..f502a15c6c98 100644
--- a/fs/jfs/jfs_dmap.h
+++ b/fs/jfs/jfs_dmap.h
@@ -196,7 +196,7 @@ typedef union dmtree {
#define dmt_leafidx t1.leafidx
#define dmt_height t1.height
#define dmt_budmin t1.budmin
-#define dmt_stree t1.stree
+#define dmt_stree t2.stree
/*
* on-disk aggregate disk allocation map descriptor.
diff --git a/fs/jfs/jfs_filsys.h b/fs/jfs/jfs_filsys.h
index b67d64671bb4..415bfa90607a 100644
--- a/fs/jfs/jfs_filsys.h
+++ b/fs/jfs/jfs_filsys.h
@@ -281,5 +281,6 @@
* fsck() must be run to repair
*/
#define FM_EXTENDFS 0x00000008 /* file system extendfs() in progress */
+#define FM_STATE_MAX 0x0000000f /* max value of s_state */
#endif /* _H_JFS_FILSYS */
diff --git a/fs/jfs/jfs_mount.c b/fs/jfs/jfs_mount.c
index d8658607bf46..b5214c9ac47a 100644
--- a/fs/jfs/jfs_mount.c
+++ b/fs/jfs/jfs_mount.c
@@ -49,6 +49,7 @@
#include <linux/fs.h>
#include <linux/buffer_head.h>
+#include <linux/log2.h>
#include "jfs_incore.h"
#include "jfs_filsys.h"
@@ -378,6 +379,15 @@ static int chkSuper(struct super_block *sb)
sbi->bsize = bsize;
sbi->l2bsize = le16_to_cpu(j_sb->s_l2bsize);
+ /* check some fields for possible corruption */
+ if (sbi->l2bsize != ilog2((u32)bsize) ||
+ j_sb->pad != 0 ||
+ le32_to_cpu(j_sb->s_state) > FM_STATE_MAX) {
+ rc = -EINVAL;
+ jfs_err("jfs_mount: Mount Failure: superblock is corrupt!");
+ goto out;
+ }
+
/*
* For now, ignore s_pbsize, l2bfactor. All I/O going through buffer
* cache.
diff --git a/fs/libfs.c b/fs/libfs.c
index 02158618f4c9..be57e64834e5 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -868,7 +868,7 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
{
struct simple_attr *attr;
- u64 val;
+ unsigned long long val;
size_t size;
ssize_t ret;
@@ -886,7 +886,9 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf,
goto out;
attr->set_buf[size] = '\0';
- val = simple_strtoll(attr->set_buf, NULL, 0);
+ ret = kstrtoull(attr->set_buf, 0, &val);
+ if (ret)
+ goto out;
ret = attr->set(attr->data, val);
if (ret == 0)
ret = len; /* on success, claim we got the whole input */
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index f0b5c987d6ae..3f6ba0cd2bd9 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -432,12 +432,7 @@ nlm_bind_host(struct nlm_host *host)
* RPC rebind is required
*/
if ((clnt = host->h_rpcclnt) != NULL) {
- if (time_after_eq(jiffies, host->h_nextrebind)) {
- rpc_force_rebind(clnt);
- host->h_nextrebind = jiffies + NLM_HOST_REBIND;
- dprintk("lockd: next rebind in %lu jiffies\n",
- host->h_nextrebind - jiffies);
- }
+ nlm_rebind_host(host);
} else {
unsigned long increment = nlmsvc_timeout;
struct rpc_timeout timeparms = {
@@ -485,13 +480,20 @@ nlm_bind_host(struct nlm_host *host)
return clnt;
}
-/*
- * Force a portmap lookup of the remote lockd port
+/**
+ * nlm_rebind_host - If needed, force a portmap lookup of the peer's lockd port
+ * @host: NLM host handle for peer
+ *
+ * This is not needed when using a connection-oriented protocol, such as TCP.
+ * The existing autobind mechanism is sufficient to force a rebind when
+ * required, e.g. on connection state transitions.
*/
void
nlm_rebind_host(struct nlm_host *host)
{
- dprintk("lockd: rebind host %s\n", host->h_name);
+ if (host->h_proto != IPPROTO_UDP)
+ return;
+
if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) {
rpc_force_rebind(host->h_rpcclnt);
host->h_nextrebind = jiffies + NLM_HOST_REBIND;
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index 72e308c3e66b..03fe8bac36cf 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -155,6 +155,25 @@ static int minix_remount (struct super_block * sb, int * flags, char * data)
return 0;
}
+static bool minix_check_superblock(struct super_block *sb)
+{
+ struct minix_sb_info *sbi = minix_sb(sb);
+
+ if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0)
+ return false;
+
+ /*
+ * s_max_size must not exceed the block mapping limitation. This check
+ * is only needed for V1 filesystems, since V2/V3 support an extra level
+ * of indirect blocks which places the limit well above U32_MAX.
+ */
+ if (sbi->s_version == MINIX_V1 &&
+ sb->s_maxbytes > (7 + 512 + 512*512) * BLOCK_SIZE)
+ return false;
+
+ return true;
+}
+
static int minix_fill_super(struct super_block *s, void *data, int silent)
{
struct buffer_head *bh;
@@ -190,7 +209,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
sbi->s_zmap_blocks = ms->s_zmap_blocks;
sbi->s_firstdatazone = ms->s_firstdatazone;
sbi->s_log_zone_size = ms->s_log_zone_size;
- sbi->s_max_size = ms->s_max_size;
+ s->s_maxbytes = ms->s_max_size;
s->s_magic = ms->s_magic;
if (s->s_magic == MINIX_SUPER_MAGIC) {
sbi->s_version = MINIX_V1;
@@ -221,7 +240,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
sbi->s_zmap_blocks = m3s->s_zmap_blocks;
sbi->s_firstdatazone = m3s->s_firstdatazone;
sbi->s_log_zone_size = m3s->s_log_zone_size;
- sbi->s_max_size = m3s->s_max_size;
+ s->s_maxbytes = m3s->s_max_size;
sbi->s_ninodes = m3s->s_ninodes;
sbi->s_nzones = m3s->s_zones;
sbi->s_dirsize = 64;
@@ -233,11 +252,12 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
} else
goto out_no_fs;
+ if (!minix_check_superblock(s))
+ goto out_illegal_sb;
+
/*
* Allocate the buffer map to keep the superblock small.
*/
- if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0)
- goto out_illegal_sb;
i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh);
map = kzalloc(i, GFP_KERNEL);
if (!map)
@@ -471,6 +491,13 @@ static struct inode *V1_minix_iget(struct inode *inode)
iget_failed(inode);
return ERR_PTR(-EIO);
}
+ if (raw_inode->i_nlinks == 0) {
+ printk("MINIX-fs: deleted inode referenced: %lu\n",
+ inode->i_ino);
+ brelse(bh);
+ iget_failed(inode);
+ return ERR_PTR(-ESTALE);
+ }
inode->i_mode = raw_inode->i_mode;
i_uid_write(inode, raw_inode->i_uid);
i_gid_write(inode, raw_inode->i_gid);
@@ -504,6 +531,13 @@ static struct inode *V2_minix_iget(struct inode *inode)
iget_failed(inode);
return ERR_PTR(-EIO);
}
+ if (raw_inode->i_nlinks == 0) {
+ printk("MINIX-fs: deleted inode referenced: %lu\n",
+ inode->i_ino);
+ brelse(bh);
+ iget_failed(inode);
+ return ERR_PTR(-ESTALE);
+ }
inode->i_mode = raw_inode->i_mode;
i_uid_write(inode, raw_inode->i_uid);
i_gid_write(inode, raw_inode->i_gid);
diff --git a/fs/minix/itree_common.c b/fs/minix/itree_common.c
index 043c3fdbc8e7..446148792f41 100644
--- a/fs/minix/itree_common.c
+++ b/fs/minix/itree_common.c
@@ -75,6 +75,7 @@ static int alloc_branch(struct inode *inode,
int n = 0;
int i;
int parent = minix_new_block(inode);
+ int err = -ENOSPC;
branch[0].key = cpu_to_block(parent);
if (parent) for (n = 1; n < num; n++) {
@@ -85,6 +86,11 @@ static int alloc_branch(struct inode *inode,
break;
branch[n].key = cpu_to_block(nr);
bh = sb_getblk(inode->i_sb, parent);
+ if (!bh) {
+ minix_free_block(inode, nr);
+ err = -ENOMEM;
+ break;
+ }
lock_buffer(bh);
memset(bh->b_data, 0, bh->b_size);
branch[n].bh = bh;
@@ -103,7 +109,7 @@ static int alloc_branch(struct inode *inode,
bforget(branch[i].bh);
for (i = 0; i < n; i++)
minix_free_block(inode, block_to_cpu(branch[i].key));
- return -ENOSPC;
+ return err;
}
static inline int splice_branch(struct inode *inode,
diff --git a/fs/minix/itree_v1.c b/fs/minix/itree_v1.c
index 046cc96ee7ad..1fed906042aa 100644
--- a/fs/minix/itree_v1.c
+++ b/fs/minix/itree_v1.c
@@ -29,12 +29,12 @@ static int block_to_path(struct inode * inode, long block, int offsets[DEPTH])
if (block < 0) {
printk("MINIX-fs: block_to_path: block %ld < 0 on dev %pg\n",
block, inode->i_sb->s_bdev);
- } else if (block >= (minix_sb(inode->i_sb)->s_max_size/BLOCK_SIZE)) {
- if (printk_ratelimit())
- printk("MINIX-fs: block_to_path: "
- "block %ld too big on dev %pg\n",
- block, inode->i_sb->s_bdev);
- } else if (block < 7) {
+ return 0;
+ }
+ if ((u64)block * BLOCK_SIZE >= inode->i_sb->s_maxbytes)
+ return 0;
+
+ if (block < 7) {
offsets[n++] = block;
} else if ((block -= 7) < 512) {
offsets[n++] = 7;
diff --git a/fs/minix/itree_v2.c b/fs/minix/itree_v2.c
index f7fc7ecccccc..9d00f31a2d9d 100644
--- a/fs/minix/itree_v2.c
+++ b/fs/minix/itree_v2.c
@@ -32,13 +32,12 @@ static int block_to_path(struct inode * inode, long block, int offsets[DEPTH])
if (block < 0) {
printk("MINIX-fs: block_to_path: block %ld < 0 on dev %pg\n",
block, sb->s_bdev);
- } else if ((u64)block * (u64)sb->s_blocksize >=
- minix_sb(sb)->s_max_size) {
- if (printk_ratelimit())
- printk("MINIX-fs: block_to_path: "
- "block %ld too big on dev %pg\n",
- block, sb->s_bdev);
- } else if (block < DIRCOUNT) {
+ return 0;
+ }
+ if ((u64)block * (u64)sb->s_blocksize >= sb->s_maxbytes)
+ return 0;
+
+ if (block < DIRCOUNT) {
offsets[n++] = block;
} else if ((block -= DIRCOUNT) < INDIRCOUNT(sb)) {
offsets[n++] = DIRCOUNT;
diff --git a/fs/minix/minix.h b/fs/minix/minix.h
index df081e8afcc3..168d45d3de73 100644
--- a/fs/minix/minix.h
+++ b/fs/minix/minix.h
@@ -32,7 +32,6 @@ struct minix_sb_info {
unsigned long s_zmap_blocks;
unsigned long s_firstdatazone;
unsigned long s_log_zone_size;
- unsigned long s_max_size;
int s_dirsize;
int s_namelen;
struct buffer_head ** s_imap;
diff --git a/fs/namespace.c b/fs/namespace.c
index 1fce41ba3535..741f40cd955e 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -3142,8 +3142,8 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
/* make certain new is below the root */
if (!is_path_reachable(new_mnt, new.dentry, &root))
goto out4;
- root_mp->m_count++; /* pin it so it won't go away */
lock_mount_hash();
+ root_mp->m_count++; /* pin it so it won't go away */
detach_mnt(new_mnt, &parent_path);
detach_mnt(root_mnt, &root_parent);
if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index ac3e06367cb6..e55f86713948 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -127,7 +127,7 @@ config PNFS_BLOCK
config PNFS_FLEXFILE_LAYOUT
tristate
depends on NFS_V4_1 && NFS_V3
- default m
+ default NFS_V4
config NFS_V4_1_IMPLEMENTATION_ID_DOMAIN
string "NFSv4.1 Implementation ID Domain"
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 315967354954..bcc51f131a49 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -130,6 +130,8 @@ static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp,
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
list_for_each_entry(lo, &server->layouts, plh_layouts) {
+ if (!pnfs_layout_is_valid(lo))
+ continue;
if (stateid != NULL &&
!nfs4_stateid_match_other(stateid, &lo->plh_stateid))
continue;
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 4ae726e70d87..733fd9e4f0a1 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -553,6 +553,9 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
do {
+ if (entry->label)
+ entry->label->len = NFS4_MAXLABELLEN;
+
status = xdr_decode(desc, entry, &stream);
if (status != 0) {
if (status == -EAGAIN)
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index c61bd3fc723e..e5da9d7fb69e 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -600,6 +600,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
l_ctx = nfs_get_lock_context(dreq->ctx);
if (IS_ERR(l_ctx)) {
result = PTR_ERR(l_ctx);
+ nfs_direct_req_release(dreq);
goto out_release;
}
dreq->l_ctx = l_ctx;
@@ -1023,6 +1024,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
l_ctx = nfs_get_lock_context(dreq->ctx);
if (IS_ERR(l_ctx)) {
result = PTR_ERR(l_ctx);
+ nfs_direct_req_release(dreq);
goto out_release;
}
dreq->l_ctx = l_ctx;
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index 2478a69da0f0..e8e825497cbd 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -717,7 +717,7 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
if (unlikely(!p))
goto out_err;
fl->fh_array[i]->size = be32_to_cpup(p++);
- if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) {
+ if (fl->fh_array[i]->size > NFS_MAXFHSIZE) {
printk(KERN_ERR "NFS: Too big fh %d received %d\n",
i, fl->fh_array[i]->size);
goto out_err;
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index e0fe9a0f1bf1..fee421da2197 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -101,7 +101,7 @@ static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
if (unlikely(!p))
return -ENOBUFS;
fh->size = be32_to_cpup(p++);
- if (fh->size > sizeof(struct nfs_fh)) {
+ if (fh->size > NFS_MAXFHSIZE) {
printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
fh->size);
return -EOVERFLOW;
@@ -915,9 +915,8 @@ retry:
goto out_mds;
/* Use a direct mapping of ds_idx to pgio mirror_idx */
- if (WARN_ON_ONCE(pgio->pg_mirror_count !=
- FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)))
- goto out_mds;
+ if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))
+ goto out_eagain;
for (i = 0; i < pgio->pg_mirror_count; i++) {
ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true);
@@ -936,11 +935,15 @@ retry:
}
return;
-
+out_eagain:
+ pnfs_generic_pg_cleanup(pgio);
+ pgio->pg_error = -EAGAIN;
+ return;
out_mds:
pnfs_put_lseg(pgio->pg_lseg);
pgio->pg_lseg = NULL;
nfs_pageio_reset_write_mds(pgio);
+ pgio->pg_error = -EAGAIN;
}
static unsigned int
diff --git a/fs/nfs/fscache-index.c b/fs/nfs/fscache-index.c
index 666415d13d52..b7ca0b85b1fe 100644
--- a/fs/nfs/fscache-index.c
+++ b/fs/nfs/fscache-index.c
@@ -88,8 +88,10 @@ enum fscache_checkaux nfs_fscache_inode_check_aux(void *cookie_netfs_data,
return FSCACHE_CHECKAUX_OBSOLETE;
memset(&auxdata, 0, sizeof(auxdata));
- auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime);
- auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime);
+ auxdata.mtime_sec = nfsi->vfs_inode.i_mtime.tv_sec;
+ auxdata.mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec;
+ auxdata.ctime_sec = nfsi->vfs_inode.i_ctime.tv_sec;
+ auxdata.ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec;
if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
auxdata.change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode);
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index 6f45b1a95739..7dfa45a38088 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -192,7 +192,8 @@ void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int
/* create a cache index for looking up filehandles */
nfss->fscache = fscache_acquire_cookie(nfss->nfs_client->fscache,
&nfs_fscache_super_index_def,
- key, sizeof(*key) + ulen,
+ &key->key,
+ sizeof(key->key) + ulen,
NULL, 0,
nfss, 0, true);
dfprintk(FSCACHE, "NFS: get superblock cookie (0x%p/0x%p)\n",
@@ -230,6 +231,19 @@ void nfs_fscache_release_super_cookie(struct super_block *sb)
}
}
+static void nfs_fscache_update_auxdata(struct nfs_fscache_inode_auxdata *auxdata,
+ struct nfs_inode *nfsi)
+{
+ memset(auxdata, 0, sizeof(*auxdata));
+ auxdata->mtime_sec = nfsi->vfs_inode.i_mtime.tv_sec;
+ auxdata->mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec;
+ auxdata->ctime_sec = nfsi->vfs_inode.i_ctime.tv_sec;
+ auxdata->ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec;
+
+ if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
+ auxdata->change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode);
+}
+
/*
* Initialise the per-inode cache cookie pointer for an NFS inode.
*/
@@ -243,12 +257,7 @@ void nfs_fscache_init_inode(struct inode *inode)
if (!(nfss->fscache && S_ISREG(inode->i_mode)))
return;
- memset(&auxdata, 0, sizeof(auxdata));
- auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime);
- auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime);
-
- if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
- auxdata.change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode);
+ nfs_fscache_update_auxdata(&auxdata, nfsi);
nfsi->fscache = fscache_acquire_cookie(NFS_SB(inode->i_sb)->fscache,
&nfs_fscache_inode_object_def,
@@ -268,9 +277,7 @@ void nfs_fscache_clear_inode(struct inode *inode)
dfprintk(FSCACHE, "NFS: clear cookie (0x%p/0x%p)\n", nfsi, cookie);
- memset(&auxdata, 0, sizeof(auxdata));
- auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime);
- auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime);
+ nfs_fscache_update_auxdata(&auxdata, nfsi);
fscache_relinquish_cookie(cookie, &auxdata, false);
nfsi->fscache = NULL;
}
@@ -310,9 +317,7 @@ void nfs_fscache_open_file(struct inode *inode, struct file *filp)
if (!fscache_cookie_valid(cookie))
return;
- memset(&auxdata, 0, sizeof(auxdata));
- auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime);
- auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime);
+ nfs_fscache_update_auxdata(&auxdata, nfsi);
if (inode_is_open_for_write(inode)) {
dfprintk(FSCACHE, "NFS: nfsi 0x%p disabling cache\n", nfsi);
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
index 6363ea956858..89d2f956668f 100644
--- a/fs/nfs/fscache.h
+++ b/fs/nfs/fscache.h
@@ -66,9 +66,11 @@ struct nfs_fscache_key {
* cache object.
*/
struct nfs_fscache_inode_auxdata {
- struct timespec mtime;
- struct timespec ctime;
- u64 change_attr;
+ s64 mtime_sec;
+ s64 mtime_nsec;
+ s64 ctime_sec;
+ s64 ctime_nsec;
+ u64 change_attr;
};
/*
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index e4cd3a2fe698..dc55ecc3bec4 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1607,10 +1607,10 @@ EXPORT_SYMBOL_GPL(_nfs_display_fhandle);
*/
static int nfs_inode_attrs_need_update(const struct inode *inode, const struct nfs_fattr *fattr)
{
- const struct nfs_inode *nfsi = NFS_I(inode);
+ unsigned long attr_gencount = NFS_I(inode)->attr_gencount;
- return ((long)fattr->gencount - (long)nfsi->attr_gencount) > 0 ||
- ((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0);
+ return (long)(fattr->gencount - attr_gencount) > 0 ||
+ (long)(attr_gencount - nfs_read_attr_generation_counter()) > 0;
}
static int nfs_refresh_inode_locked(struct inode *inode, struct nfs_fattr *fattr)
@@ -2034,7 +2034,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
nfsi->attrtimeo_timestamp = now;
}
/* Set the barrier to be more recent than this fattr */
- if ((long)fattr->gencount - (long)nfsi->attr_gencount > 0)
+ if ((long)(fattr->gencount - nfsi->attr_gencount) > 0)
nfsi->attr_gencount = fattr->gencount;
}
@@ -2142,7 +2142,7 @@ static int nfsiod_start(void)
{
struct workqueue_struct *wq;
dprintk("RPC: creating workqueue nfsiod\n");
- wq = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM, 0);
+ wq = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
if (wq == NULL)
return -ENOMEM;
nfsiod_workqueue = wq;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 8357ff69962f..cc07189a501f 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -575,12 +575,14 @@ extern int nfs4_test_session_trunk(struct rpc_clnt *,
static inline struct inode *nfs_igrab_and_active(struct inode *inode)
{
- inode = igrab(inode);
- if (inode != NULL && !nfs_sb_active(inode->i_sb)) {
- iput(inode);
- inode = NULL;
+ struct super_block *sb = inode->i_sb;
+
+ if (sb && nfs_sb_active(sb)) {
+ if (igrab(inode))
+ return inode;
+ nfs_sb_deactive(sb);
}
- return inode;
+ return NULL;
}
static inline void nfs_iput_and_deactive(struct inode *inode)
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index e5686be67be8..d57d453aecc2 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -30,9 +30,9 @@ int nfs_mountpoint_expiry_timeout = 500 * HZ;
/*
* nfs_path - reconstruct the path given an arbitrary dentry
* @base - used to return pointer to the end of devname part of path
- * @dentry - pointer to dentry
+ * @dentry_in - pointer to dentry
* @buffer - result buffer
- * @buflen - length of buffer
+ * @buflen_in - length of buffer
* @flags - options (see below)
*
* Helper function for constructing the server pathname
@@ -47,15 +47,19 @@ int nfs_mountpoint_expiry_timeout = 500 * HZ;
* the original device (export) name
* (if unset, the original name is returned verbatim)
*/
-char *nfs_path(char **p, struct dentry *dentry, char *buffer, ssize_t buflen,
- unsigned flags)
+char *nfs_path(char **p, struct dentry *dentry_in, char *buffer,
+ ssize_t buflen_in, unsigned flags)
{
char *end;
int namelen;
unsigned seq;
const char *base;
+ struct dentry *dentry;
+ ssize_t buflen;
rename_retry:
+ buflen = buflen_in;
+ dentry = dentry_in;
end = buffer+buflen;
*--end = '\0';
buflen--;
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
index 9fce18548f7e..24d39cce8ba4 100644
--- a/fs/nfs/nfs3acl.c
+++ b/fs/nfs/nfs3acl.c
@@ -255,37 +255,45 @@ int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
int nfs3_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{
- struct posix_acl *alloc = NULL, *dfacl = NULL;
+ struct posix_acl *orig = acl, *dfacl = NULL, *alloc;
int status;
if (S_ISDIR(inode->i_mode)) {
switch(type) {
case ACL_TYPE_ACCESS:
- alloc = dfacl = get_acl(inode, ACL_TYPE_DEFAULT);
+ alloc = get_acl(inode, ACL_TYPE_DEFAULT);
if (IS_ERR(alloc))
goto fail;
+ dfacl = alloc;
break;
case ACL_TYPE_DEFAULT:
- dfacl = acl;
- alloc = acl = get_acl(inode, ACL_TYPE_ACCESS);
+ alloc = get_acl(inode, ACL_TYPE_ACCESS);
if (IS_ERR(alloc))
goto fail;
+ dfacl = acl;
+ acl = alloc;
break;
}
}
if (acl == NULL) {
- alloc = acl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
+ alloc = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
if (IS_ERR(alloc))
goto fail;
+ acl = alloc;
}
status = __nfs3_proc_setacls(inode, acl, dfacl);
- posix_acl_release(alloc);
+out:
+ if (acl != orig)
+ posix_acl_release(acl);
+ if (dfacl != orig)
+ posix_acl_release(dfacl);
return status;
fail:
- return PTR_ERR(alloc);
+ status = PTR_ERR(alloc);
+ goto out;
}
const struct xattr_handler *nfs3_xattr_handlers[] = {
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 9956453aa6ff..0ed419bb02b0 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -34,6 +34,7 @@
*/
#define NFS3_fhandle_sz (1+16)
#define NFS3_fh_sz (NFS3_fhandle_sz) /* shorthand */
+#define NFS3_post_op_fh_sz (1+NFS3_fh_sz)
#define NFS3_sattr_sz (15)
#define NFS3_filename_sz (1+(NFS3_MAXNAMLEN>>2))
#define NFS3_path_sz (1+(NFS3_MAXPATHLEN>>2))
@@ -71,7 +72,7 @@
#define NFS3_readlinkres_sz (1+NFS3_post_op_attr_sz+1)
#define NFS3_readres_sz (1+NFS3_post_op_attr_sz+3)
#define NFS3_writeres_sz (1+NFS3_wcc_data_sz+4)
-#define NFS3_createres_sz (1+NFS3_fh_sz+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
+#define NFS3_createres_sz (1+NFS3_post_op_fh_sz+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
#define NFS3_renameres_sz (1+(2 * NFS3_wcc_data_sz))
#define NFS3_linkres_sz (1+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
#define NFS3_readdirres_sz (1+NFS3_post_op_attr_sz+2)
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 526441de89c1..be252795a6f7 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -59,7 +59,8 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
loff_t offset, loff_t len)
{
- struct nfs_server *server = NFS_SERVER(file_inode(filep));
+ struct inode *inode = file_inode(filep);
+ struct nfs_server *server = NFS_SERVER(inode);
struct nfs4_exception exception = { };
struct nfs_lock_context *lock;
int err;
@@ -68,9 +69,13 @@ static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
if (IS_ERR(lock))
return PTR_ERR(lock);
- exception.inode = file_inode(filep);
+ exception.inode = inode;
exception.state = lock->open_context->state;
+ err = nfs_sync_inode(inode);
+ if (err)
+ goto out;
+
do {
err = _nfs42_proc_fallocate(msg, filep, lock, offset, len);
if (err == -ENOTSUPP) {
@@ -79,7 +84,7 @@ static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
}
err = nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
-
+out:
nfs_put_lock_context(lock);
return err;
}
@@ -117,16 +122,13 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
return -EOPNOTSUPP;
inode_lock(inode);
- err = nfs_sync_inode(inode);
- if (err)
- goto out_unlock;
err = nfs42_proc_fallocate(&msg, filep, offset, len);
if (err == 0)
truncate_pagecache_range(inode, offset, (offset + len) -1);
if (err == -EOPNOTSUPP)
NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE;
-out_unlock:
+
inode_unlock(inode);
return err;
}
@@ -498,7 +500,10 @@ static loff_t _nfs42_proc_llseek(struct file *filep,
if (status)
return status;
- return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
+ if (whence == SEEK_DATA && res.sr_eof)
+ return -NFS4ERR_NXIO;
+ else
+ return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
}
loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 75d3cf86f172..e053a883d08d 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -148,7 +148,7 @@ static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence)
case SEEK_HOLE:
case SEEK_DATA:
ret = nfs42_proc_llseek(filep, offset, whence);
- if (ret != -ENOTSUPP)
+ if (ret != -EOPNOTSUPP)
return ret;
/* Fall through */
default:
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 668b648064b7..bcad052db065 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3129,8 +3129,10 @@ static int _nfs4_do_setattr(struct inode *inode,
/* Servers should only apply open mode checks for file size changes */
truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false;
- if (!truncate)
+ if (!truncate) {
+ nfs4_inode_make_writeable(inode);
goto zero_stateid;
+ }
if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) {
/* Use that stateid */
@@ -4685,12 +4687,12 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
u64 cookie, struct page **pages, unsigned int count, bool plus)
{
struct inode *dir = d_inode(dentry);
+ struct nfs_server *server = NFS_SERVER(dir);
struct nfs4_readdir_arg args = {
.fh = NFS_FH(dir),
.pages = pages,
.pgbase = 0,
.count = count,
- .bitmask = NFS_SERVER(d_inode(dentry))->attr_bitmask,
.plus = plus,
};
struct nfs4_readdir_res res;
@@ -4705,9 +4707,15 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__,
dentry,
(unsigned long long)cookie);
+ if (!(server->caps & NFS_CAP_SECURITY_LABEL))
+ args.bitmask = server->attr_bitmask_nl;
+ else
+ args.bitmask = server->attr_bitmask;
+
nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
res.pgbase = args.pgbase;
- status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
+ status = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
+ &res.seq_res, 0);
if (status >= 0) {
memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
status += args.pgbase;
@@ -5527,6 +5535,9 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
int ret, i;
+ /* You can't remove system.nfs4_acl: */
+ if (buflen == 0)
+ return -EINVAL;
if (!nfs4_server_supports_acls(server))
return -EOPNOTSUPP;
if (npages > ARRAY_SIZE(pages))
@@ -5603,9 +5614,7 @@ static int _nfs4_get_security_label(struct inode *inode, void *buf,
return ret;
if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
return -ENOENT;
- if (buflen < label.len)
- return -ERANGE;
- return 0;
+ return label.len;
}
static int nfs4_get_security_label(struct inode *inode, void *buf,
@@ -6715,9 +6724,9 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
data->arg.new_lock_owner, ret);
} else
data->cancelled = true;
+ trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
rpc_put_task(task);
dprintk("%s: done, ret = %d!\n", __func__, ret);
- trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
return ret;
}
@@ -7008,7 +7017,12 @@ int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state,
err = nfs4_set_lock_state(state, fl);
if (err != 0)
return err;
- err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
+ do {
+ err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
+ if (err != -NFS4ERR_DELAY)
+ break;
+ ssleep(1);
+ } while (err == -NFS4ERR_DELAY);
return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err);
}
@@ -7595,9 +7609,11 @@ int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
* both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
* DS flags set.
*/
-static int nfs4_check_cl_exchange_flags(u32 flags)
+static int nfs4_check_cl_exchange_flags(u32 flags, u32 version)
{
- if (flags & ~EXCHGID4_FLAG_MASK_R)
+ if (version >= 2 && (flags & ~EXCHGID4_2_FLAG_MASK_R))
+ goto out_inval;
+ else if (version < 2 && (flags & ~EXCHGID4_FLAG_MASK_R))
goto out_inval;
if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
(flags & EXCHGID4_FLAG_USE_NON_PNFS))
@@ -7624,7 +7640,7 @@ nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
}
static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = {
- .rpc_call_done = &nfs4_bind_one_conn_to_session_done,
+ .rpc_call_done = nfs4_bind_one_conn_to_session_done,
};
/*
@@ -7992,7 +8008,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
if (status != 0)
goto out;
- status = nfs4_check_cl_exchange_flags(resp->flags);
+ status = nfs4_check_cl_exchange_flags(resp->flags,
+ clp->cl_mvops->minor_version);
if (status != 0)
goto out;
diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c
index 6fb7cb6b3f4b..e7a10f5f5405 100644
--- a/fs/nfs/nfs4super.c
+++ b/fs/nfs/nfs4super.c
@@ -95,7 +95,7 @@ static void nfs4_evict_inode(struct inode *inode)
nfs_inode_return_delegation_noreclaim(inode);
/* Note that above delegreturn would trigger pnfs return-on-close */
pnfs_return_layout(inode);
- pnfs_destroy_layout(NFS_I(inode));
+ pnfs_destroy_layout_final(NFS_I(inode));
/* First call standard NFS clear_inode() code */
nfs_clear_inode(inode);
}
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index c4cf0192d7bb..0a5cae8f8aff 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -4280,7 +4280,11 @@ static int decode_attr_security_label(struct xdr_stream *xdr, uint32_t *bitmap,
goto out_overflow;
if (len < NFS4_MAXLABELLEN) {
if (label) {
- memcpy(label->label, p, len);
+ if (label->len) {
+ if (label->len < len)
+ return -ERANGE;
+ memcpy(label->label, p, len);
+ }
label->len = len;
label->pi = pi;
label->lfs = lfs;
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 9cf59e2622f8..a9e1bcdd9394 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -132,47 +132,70 @@ nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx)
EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait);
/*
- * nfs_page_group_lock - lock the head of the page group
- * @req - request in group that is to be locked
+ * nfs_page_set_headlock - set the request PG_HEADLOCK
+ * @req: request that is to be locked
*
- * this lock must be held when traversing or modifying the page
- * group list
+ * this lock must be held when modifying req->wb_head
*
* return 0 on success, < 0 on error
*/
int
-nfs_page_group_lock(struct nfs_page *req)
+nfs_page_set_headlock(struct nfs_page *req)
{
- struct nfs_page *head = req->wb_head;
-
- WARN_ON_ONCE(head != head->wb_head);
-
- if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags))
+ if (!test_and_set_bit(PG_HEADLOCK, &req->wb_flags))
return 0;
- set_bit(PG_CONTENDED1, &head->wb_flags);
+ set_bit(PG_CONTENDED1, &req->wb_flags);
smp_mb__after_atomic();
- return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
+ return wait_on_bit_lock(&req->wb_flags, PG_HEADLOCK,
TASK_UNINTERRUPTIBLE);
}
/*
- * nfs_page_group_unlock - unlock the head of the page group
- * @req - request in group that is to be unlocked
+ * nfs_page_clear_headlock - clear the request PG_HEADLOCK
+ * @req: request that is to be locked
*/
void
-nfs_page_group_unlock(struct nfs_page *req)
+nfs_page_clear_headlock(struct nfs_page *req)
{
- struct nfs_page *head = req->wb_head;
-
- WARN_ON_ONCE(head != head->wb_head);
-
smp_mb__before_atomic();
- clear_bit(PG_HEADLOCK, &head->wb_flags);
+ clear_bit(PG_HEADLOCK, &req->wb_flags);
smp_mb__after_atomic();
- if (!test_bit(PG_CONTENDED1, &head->wb_flags))
+ if (!test_bit(PG_CONTENDED1, &req->wb_flags))
return;
- wake_up_bit(&head->wb_flags, PG_HEADLOCK);
+ wake_up_bit(&req->wb_flags, PG_HEADLOCK);
+}
+
+/*
+ * nfs_page_group_lock - lock the head of the page group
+ * @req: request in group that is to be locked
+ *
+ * this lock must be held when traversing or modifying the page
+ * group list
+ *
+ * return 0 on success, < 0 on error
+ */
+int
+nfs_page_group_lock(struct nfs_page *req)
+{
+ int ret;
+
+ ret = nfs_page_set_headlock(req);
+ if (ret || req->wb_head == req)
+ return ret;
+ return nfs_page_set_headlock(req->wb_head);
+}
+
+/*
+ * nfs_page_group_unlock - unlock the head of the page group
+ * @req: request in group that is to be unlocked
+ */
+void
+nfs_page_group_unlock(struct nfs_page *req)
+{
+ if (req != req->wb_head)
+ nfs_page_clear_headlock(req->wb_head);
+ nfs_page_clear_headlock(req);
}
/*
@@ -865,15 +888,6 @@ static void nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
pgio->pg_mirror_count = mirror_count;
}
-/*
- * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1)
- */
-void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio)
-{
- pgio->pg_mirror_count = 1;
- pgio->pg_mirror_idx = 0;
-}
-
static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor *pgio)
{
pgio->pg_mirror_count = 1;
@@ -973,17 +987,16 @@ static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
{
struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
-
if (!list_empty(&mirror->pg_list)) {
int error = desc->pg_ops->pg_doio(desc);
if (error < 0)
desc->pg_error = error;
- else
+ if (list_empty(&mirror->pg_list)) {
mirror->pg_bytes_written += mirror->pg_count;
- }
- if (list_empty(&mirror->pg_list)) {
- mirror->pg_count = 0;
- mirror->pg_base = 0;
+ mirror->pg_count = 0;
+ mirror->pg_base = 0;
+ mirror->pg_recoalesce = 0;
+ }
}
}
@@ -1081,7 +1094,6 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
do {
list_splice_init(&mirror->pg_list, &head);
- mirror->pg_bytes_written -= mirror->pg_count;
mirror->pg_count = 0;
mirror->pg_base = 0;
mirror->pg_recoalesce = 0;
@@ -1302,6 +1314,14 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
}
}
+/*
+ * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1)
+ */
+void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio)
+{
+ nfs_pageio_complete(pgio);
+}
+
int __init nfs_init_nfspagecache(void)
{
nfs_page_cachep = kmem_cache_create("nfs_page",
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 66f699e18755..c900cb2119ba 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -294,6 +294,7 @@ void
pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
{
struct inode *inode;
+ unsigned long i_state;
if (!lo)
return;
@@ -304,8 +305,12 @@ pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
if (!list_empty(&lo->plh_segs))
WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
pnfs_detach_layout_hdr(lo);
+ i_state = inode->i_state;
spin_unlock(&inode->i_lock);
pnfs_free_layout_hdr(lo);
+ /* Notify pnfs_destroy_layout_final() that we're done */
+ if (i_state & (I_FREEING | I_CLEAR))
+ wake_up_var(lo);
}
}
@@ -713,8 +718,7 @@ pnfs_free_lseg_list(struct list_head *free_me)
}
}
-void
-pnfs_destroy_layout(struct nfs_inode *nfsi)
+static struct pnfs_layout_hdr *__pnfs_destroy_layout(struct nfs_inode *nfsi)
{
struct pnfs_layout_hdr *lo;
LIST_HEAD(tmp_list);
@@ -732,9 +736,34 @@ pnfs_destroy_layout(struct nfs_inode *nfsi)
pnfs_put_layout_hdr(lo);
} else
spin_unlock(&nfsi->vfs_inode.i_lock);
+ return lo;
+}
+
+void pnfs_destroy_layout(struct nfs_inode *nfsi)
+{
+ __pnfs_destroy_layout(nfsi);
}
EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
+static bool pnfs_layout_removed(struct nfs_inode *nfsi,
+ struct pnfs_layout_hdr *lo)
+{
+ bool ret;
+
+ spin_lock(&nfsi->vfs_inode.i_lock);
+ ret = nfsi->layout != lo;
+ spin_unlock(&nfsi->vfs_inode.i_lock);
+ return ret;
+}
+
+void pnfs_destroy_layout_final(struct nfs_inode *nfsi)
+{
+ struct pnfs_layout_hdr *lo = __pnfs_destroy_layout(nfsi);
+
+ if (lo)
+ wait_var_event(lo, pnfs_layout_removed(nfsi, lo));
+}
+
static bool
pnfs_layout_add_bulk_destroy_list(struct inode *inode,
struct list_head *layout_list)
@@ -1181,31 +1210,27 @@ out:
return status;
}
+static bool
+pnfs_layout_segments_returnable(struct pnfs_layout_hdr *lo,
+ enum pnfs_iomode iomode,
+ u32 seq)
+{
+ struct pnfs_layout_range recall_range = {
+ .length = NFS4_MAX_UINT64,
+ .iomode = iomode,
+ };
+ return pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs,
+ &recall_range, seq) != -EBUSY;
+}
+
/* Return true if layoutreturn is needed */
static bool
pnfs_layout_need_return(struct pnfs_layout_hdr *lo)
{
- struct pnfs_layout_segment *s;
- enum pnfs_iomode iomode;
- u32 seq;
-
if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
return false;
-
- seq = lo->plh_return_seq;
- iomode = lo->plh_return_iomode;
-
- /* Defer layoutreturn until all recalled lsegs are done */
- list_for_each_entry(s, &lo->plh_segs, pls_list) {
- if (seq && pnfs_seqid_is_newer(s->pls_seq, seq))
- continue;
- if (iomode != IOMODE_ANY && s->pls_range.iomode != iomode)
- continue;
- if (test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags))
- return false;
- }
-
- return true;
+ return pnfs_layout_segments_returnable(lo, lo->plh_return_iomode,
+ lo->plh_return_seq);
}
static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo)
@@ -1243,6 +1268,11 @@ _pnfs_return_layout(struct inode *ino)
{
struct pnfs_layout_hdr *lo = NULL;
struct nfs_inode *nfsi = NFS_I(ino);
+ struct pnfs_layout_range range = {
+ .iomode = IOMODE_ANY,
+ .offset = 0,
+ .length = NFS4_MAX_UINT64,
+ };
LIST_HEAD(tmp_list);
nfs4_stateid stateid;
int status = 0;
@@ -1269,16 +1299,10 @@ _pnfs_return_layout(struct inode *ino)
}
valid_layout = pnfs_layout_is_valid(lo);
pnfs_clear_layoutcommit(ino, &tmp_list);
- pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL, 0);
+ pnfs_mark_matching_lsegs_return(lo, &tmp_list, &range, 0);
- if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
- struct pnfs_layout_range range = {
- .iomode = IOMODE_ANY,
- .offset = 0,
- .length = NFS4_MAX_UINT64,
- };
+ if (NFS_SERVER(ino)->pnfs_curr_ld->return_range)
NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range);
- }
/* Don't send a LAYOUTRETURN if list was initially empty */
if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) ||
@@ -1435,12 +1459,18 @@ void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
int ret)
{
struct pnfs_layout_hdr *lo = args->layout;
+ struct inode *inode = args->inode;
const nfs4_stateid *arg_stateid = NULL;
const nfs4_stateid *res_stateid = NULL;
struct nfs4_xdr_opaque_data *ld_private = args->ld_private;
switch (ret) {
case -NFS4ERR_NOMATCHING_LAYOUT:
+ spin_lock(&inode->i_lock);
+ if (pnfs_layout_is_valid(lo) &&
+ nfs4_stateid_match_other(&args->stateid, &lo->plh_stateid))
+ pnfs_set_plh_return_info(lo, args->range.iomode, 0);
+ spin_unlock(&inode->i_lock);
break;
case 0:
if (res->lrs_present)
@@ -2116,6 +2146,7 @@ static void _lgopen_prepare_attached(struct nfs4_opendata *data,
&rng, GFP_KERNEL);
if (!lgp) {
pnfs_clear_first_layoutget(lo);
+ nfs_layoutget_end(lo);
pnfs_put_layout_hdr(lo);
return;
}
@@ -2269,7 +2300,13 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
* We got an entirely new state ID. Mark all segments for the
* inode invalid, and retry the layoutget
*/
- pnfs_mark_layout_stateid_invalid(lo, &free_me);
+ struct pnfs_layout_range range = {
+ .iomode = IOMODE_ANY,
+ .length = NFS4_MAX_UINT64,
+ };
+ pnfs_set_plh_return_info(lo, IOMODE_ANY, 0);
+ pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs,
+ &range, 0);
goto out_forget;
}
@@ -2288,19 +2325,10 @@ out_forget:
spin_unlock(&ino->i_lock);
lseg->pls_layout = lo;
NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
+ pnfs_free_lseg_list(&free_me);
return ERR_PTR(-EAGAIN);
}
-static int
-mark_lseg_invalid_or_return(struct pnfs_layout_segment *lseg,
- struct list_head *tmp_list)
-{
- if (!mark_lseg_invalid(lseg, tmp_list))
- return 0;
- pnfs_cache_lseg_for_layoutreturn(lseg->pls_layout, lseg);
- return 1;
-}
-
/**
* pnfs_mark_matching_lsegs_return - Free or return matching layout segments
* @lo: pointer to layout header
@@ -2330,6 +2358,9 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
assert_spin_locked(&lo->plh_inode->i_lock);
+ if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
+ tmp_list = &lo->plh_return_segs;
+
list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
if (pnfs_match_lseg_recall(lseg, return_range, seq)) {
dprintk("%s: marking lseg %p iomode %d "
@@ -2337,7 +2368,9 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
lseg, lseg->pls_range.iomode,
lseg->pls_range.offset,
lseg->pls_range.length);
- if (mark_lseg_invalid_or_return(lseg, tmp_list))
+ if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
+ tmp_list = &lo->plh_return_segs;
+ if (mark_lseg_invalid(lseg, tmp_list))
continue;
remaining++;
set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 3ba44819a88a..80fafa29e567 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -254,6 +254,7 @@ struct pnfs_layout_segment *pnfs_layout_process(struct nfs4_layoutget *lgp);
void pnfs_layoutget_free(struct nfs4_layoutget *lgp);
void pnfs_free_lseg_list(struct list_head *tmp_list);
void pnfs_destroy_layout(struct nfs_inode *);
+void pnfs_destroy_layout_final(struct nfs_inode *);
void pnfs_destroy_all_layouts(struct nfs_client *);
int pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
struct nfs_fsid *fsid,
@@ -645,6 +646,10 @@ static inline void pnfs_destroy_layout(struct nfs_inode *nfsi)
{
}
+static inline void pnfs_destroy_layout_final(struct nfs_inode *nfsi)
+{
+}
+
static inline struct pnfs_layout_segment *
pnfs_get_lseg(struct pnfs_layout_segment *lseg)
{
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index ce1da8cbac00..d419d89b91f7 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -416,22 +416,29 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
destroy_list = (subreq->wb_this_page == old_head) ?
NULL : subreq->wb_this_page;
+ /* Note: lock subreq in order to change subreq->wb_head */
+ nfs_page_set_headlock(subreq);
WARN_ON_ONCE(old_head != subreq->wb_head);
/* make sure old group is not used */
subreq->wb_this_page = subreq;
+ subreq->wb_head = subreq;
clear_bit(PG_REMOVE, &subreq->wb_flags);
/* Note: races with nfs_page_group_destroy() */
if (!kref_read(&subreq->wb_kref)) {
/* Check if we raced with nfs_page_group_destroy() */
- if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags))
+ if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) {
+ nfs_page_clear_headlock(subreq);
nfs_free_request(subreq);
+ } else
+ nfs_page_clear_headlock(subreq);
continue;
}
+ nfs_page_clear_headlock(subreq);
- subreq->wb_head = subreq;
+ nfs_release_request(old_head);
if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) {
nfs_release_request(subreq);
diff --git a/fs/nfs_common/grace.c b/fs/nfs_common/grace.c
index 5be08f02a76b..4f90c444907f 100644
--- a/fs/nfs_common/grace.c
+++ b/fs/nfs_common/grace.c
@@ -68,10 +68,14 @@ __state_in_grace(struct net *net, bool open)
if (!open)
return !list_empty(grace_list);
+ spin_lock(&grace_lock);
list_for_each_entry(lm, grace_list, list) {
- if (lm->block_opens)
+ if (lm->block_opens) {
+ spin_unlock(&grace_lock);
return true;
+ }
}
+ spin_unlock(&grace_lock);
return false;
}
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 83919116d5cb..b90bea1c434e 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -844,9 +844,14 @@ compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp,
if (isdotent(name, namlen)) {
if (namlen == 2) {
dchild = dget_parent(dparent);
- /* filesystem root - cannot return filehandle for ".." */
+ /*
+ * Don't return filehandle for ".." if we're at
+ * the filesystem or export root:
+ */
if (dchild == dparent)
goto out;
+ if (dparent == exp->ex_path.dentry)
+ goto out;
} else
dchild = dget(dparent);
} else
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index ebbb0285addb..7ee417b685e9 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -1149,6 +1149,8 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
err = setup_callback_client(clp, &conn, ses);
if (err) {
nfsd4_mark_cb_down(clp, err);
+ if (c)
+ svc_xprt_put(c->cn_xprt);
return;
}
}
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index ed73e86194fa..655079ae1dd1 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -252,6 +252,8 @@ find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
if (!nbl) {
nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
if (nbl) {
+ INIT_LIST_HEAD(&nbl->nbl_list);
+ INIT_LIST_HEAD(&nbl->nbl_lru);
fh_copy_shallow(&nbl->nbl_fh, fh);
locks_init_lock(&nbl->nbl_lock);
nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
@@ -469,6 +471,8 @@ find_any_file(struct nfs4_file *f)
{
struct file *ret;
+ if (!f)
+ return NULL;
spin_lock(&f->fi_lock);
ret = __nfs4_get_fd(f, O_RDWR);
if (!ret) {
@@ -1205,6 +1209,12 @@ static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
nfs4_free_stateowner(sop);
}
+static bool
+nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp)
+{
+ return list_empty(&stp->st_perfile);
+}
+
static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
{
struct nfs4_file *fp = stp->st_stid.sc_file;
@@ -1272,9 +1282,11 @@ static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
{
lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
+ if (!unhash_ol_stateid(stp))
+ return false;
list_del_init(&stp->st_locks);
nfs4_unhash_stid(&stp->st_stid);
- return unhash_ol_stateid(stp);
+ return true;
}
static void release_lock_stateid(struct nfs4_ol_stateid *stp)
@@ -1339,13 +1351,12 @@ static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
struct list_head *reaplist)
{
- bool unhashed;
-
lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
- unhashed = unhash_ol_stateid(stp);
+ if (!unhash_ol_stateid(stp))
+ return false;
release_open_stateid_locks(stp, reaplist);
- return unhashed;
+ return true;
}
static void release_open_stateid(struct nfs4_ol_stateid *stp)
@@ -5772,21 +5783,21 @@ alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
}
static struct nfs4_ol_stateid *
-find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
+find_lock_stateid(const struct nfs4_lockowner *lo,
+ const struct nfs4_ol_stateid *ost)
{
struct nfs4_ol_stateid *lst;
- struct nfs4_client *clp = lo->lo_owner.so_client;
- lockdep_assert_held(&clp->cl_lock);
+ lockdep_assert_held(&ost->st_stid.sc_client->cl_lock);
- list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
- if (lst->st_stid.sc_type != NFS4_LOCK_STID)
- continue;
- if (lst->st_stid.sc_file == fp) {
- refcount_inc(&lst->st_stid.sc_count);
- return lst;
+ /* If ost is not hashed, ost->st_locks will not be valid */
+ if (!nfs4_ol_stateid_unhashed(ost))
+ list_for_each_entry(lst, &ost->st_locks, st_locks) {
+ if (lst->st_stateowner == &lo->lo_owner) {
+ refcount_inc(&lst->st_stid.sc_count);
+ return lst;
+ }
}
- }
return NULL;
}
@@ -5802,11 +5813,11 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
retry:
spin_lock(&clp->cl_lock);
- spin_lock(&fp->fi_lock);
- retstp = find_lock_stateid(lo, fp);
+ if (nfs4_ol_stateid_unhashed(open_stp))
+ goto out_close;
+ retstp = find_lock_stateid(lo, open_stp);
if (retstp)
- goto out_unlock;
-
+ goto out_found;
refcount_inc(&stp->st_stid.sc_count);
stp->st_stid.sc_type = NFS4_LOCK_STID;
stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
@@ -5815,22 +5826,26 @@ retry:
stp->st_access_bmap = 0;
stp->st_deny_bmap = open_stp->st_deny_bmap;
stp->st_openstp = open_stp;
+ spin_lock(&fp->fi_lock);
list_add(&stp->st_locks, &open_stp->st_locks);
list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
list_add(&stp->st_perfile, &fp->fi_stateids);
-out_unlock:
spin_unlock(&fp->fi_lock);
spin_unlock(&clp->cl_lock);
- if (retstp) {
- if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
- nfs4_put_stid(&retstp->st_stid);
- goto retry;
- }
- /* To keep mutex tracking happy */
- mutex_unlock(&stp->st_mutex);
- stp = retstp;
- }
return stp;
+out_found:
+ spin_unlock(&clp->cl_lock);
+ if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
+ nfs4_put_stid(&retstp->st_stid);
+ goto retry;
+ }
+ /* To keep mutex tracking happy */
+ mutex_unlock(&stp->st_mutex);
+ return retstp;
+out_close:
+ spin_unlock(&clp->cl_lock);
+ mutex_unlock(&stp->st_mutex);
+ return NULL;
}
static struct nfs4_ol_stateid *
@@ -5845,7 +5860,7 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
*new = false;
spin_lock(&clp->cl_lock);
- lst = find_lock_stateid(lo, fi);
+ lst = find_lock_stateid(lo, ost);
spin_unlock(&clp->cl_lock);
if (lst != NULL) {
if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index 0d20fd161225..01f7ce1ae127 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -118,6 +118,13 @@ done:
return nfsd_return_attrs(nfserr, resp);
}
+/* Obsolete, replaced by MNTPROC_MNT. */
+static __be32
+nfsd_proc_root(struct svc_rqst *rqstp)
+{
+ return nfs_ok;
+}
+
/*
* Look up a path name component
* Note: the dentry in the resp->fh may be negative if the file
@@ -201,6 +208,13 @@ nfsd_proc_read(struct svc_rqst *rqstp)
return fh_getattr(&resp->fh, &resp->stat);
}
+/* Reserved */
+static __be32
+nfsd_proc_writecache(struct svc_rqst *rqstp)
+{
+ return nfs_ok;
+}
+
/*
* Write data to a file
* N.B. After this call resp->fh needs an fh_put
@@ -615,6 +629,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_xdrressize = ST+AT,
},
[NFSPROC_ROOT] = {
+ .pc_func = nfsd_proc_root,
.pc_decode = nfssvc_decode_void,
.pc_encode = nfssvc_encode_void,
.pc_argsize = sizeof(struct nfsd_void),
@@ -652,6 +667,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_xdrressize = ST+AT+1+NFSSVC_MAXBLKSIZE_V2/4,
},
[NFSPROC_WRITECACHE] = {
+ .pc_func = nfsd_proc_writecache,
.pc_decode = nfssvc_decode_void,
.pc_encode = nfssvc_encode_void,
.pc_argsize = sizeof(struct nfsd_void),
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 89cb484f1cfb..ad38633392a0 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -417,8 +417,7 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
return;
nfsd_shutdown_net(net);
- printk(KERN_WARNING "nfsd: last server has exited, flushing export "
- "cache\n");
+ pr_info("nfsd: last server has exited, flushing export cache\n");
nfsd_export_flush(net);
}
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 80cededcd10d..28e7f86c8c94 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1206,6 +1206,9 @@ nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp,
iap->ia_mode = 0;
iap->ia_mode = (iap->ia_mode & S_IALLUGO) | type;
+ if (!IS_POSIXACL(dirp))
+ iap->ia_mode &= ~current_umask();
+
err = 0;
host_err = 0;
switch (type) {
@@ -1439,6 +1442,9 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
goto out;
}
+ if (!IS_POSIXACL(dirp))
+ iap->ia_mode &= ~current_umask();
+
host_err = vfs_create(dirp, dchild, iap->ia_mode, true);
if (host_err < 0) {
fh_drop_write(fhp);
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 445eef41bfaf..91b58c897f92 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2780,6 +2780,8 @@ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
if (!nilfs->ns_writer)
return -ENOMEM;
+ inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL);
+
err = nilfs_segctor_start_thread(nilfs->ns_writer);
if (err) {
kfree(nilfs->ns_writer);
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index a18b8d7a3075..ca3405f73264 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -114,6 +114,10 @@ static bool fanotify_should_send_event(struct fsnotify_iter_info *iter_info,
if (!fsnotify_iter_should_report_type(iter_info, type))
continue;
mark = iter_info->marks[type];
+
+ /* Apply ignore mask regardless of ISDIR and ON_CHILD flags */
+ marks_ignored_mask |= mark->ignored_mask;
+
/*
* If the event is for a child and this mark doesn't care about
* events on a child, don't send it!
@@ -124,7 +128,6 @@ static bool fanotify_should_send_event(struct fsnotify_iter_info *iter_info,
continue;
marks_mask |= mark->mask;
- marks_ignored_mask |= mark->ignored_mask;
}
if (d_is_dir(path->dentry) &&
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index bd3221cbdd95..a2fb866ff76e 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -654,6 +654,12 @@ static int ntfs_read_locked_inode(struct inode *vi)
}
a = ctx->attr;
/* Get the standard information attribute value. */
+ if ((u8 *)a + le16_to_cpu(a->data.resident.value_offset)
+ + le32_to_cpu(a->data.resident.value_length) >
+ (u8 *)ctx->mrec + vol->mft_record_size) {
+ ntfs_error(vi->i_sb, "Corrupt standard information attribute in inode.");
+ goto unm_err_out;
+ }
si = (STANDARD_INFORMATION*)((u8*)a +
le16_to_cpu(a->data.resident.value_offset));
@@ -1835,6 +1841,12 @@ int ntfs_read_inode_mount(struct inode *vi)
brelse(bh);
}
+ if (le32_to_cpu(m->bytes_allocated) != vol->mft_record_size) {
+ ntfs_error(sb, "Incorrect mft record size %u in superblock, should be %u.",
+ le32_to_cpu(m->bytes_allocated), vol->mft_record_size);
+ goto err_out;
+ }
+
/* Apply the mst fixups. */
if (post_read_mst_fixup((NTFS_RECORD*)m, vol->mft_record_size)) {
/* FIXME: Try to use the $MFTMirr now. */
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index a342f008e42f..ff0e083ce2a1 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -7403,6 +7403,10 @@ int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct ocfs2_inline_data *idata = &di->id2.i_data;
+ /* No need to punch hole beyond i_size. */
+ if (start >= i_size_read(inode))
+ return 0;
+
if (end > i_size_read(inode))
end = i_size_read(inode);
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 543efa3e5655..b6948813eb06 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -2311,7 +2311,7 @@ static int ocfs2_dio_end_io_write(struct inode *inode,
struct ocfs2_alloc_context *meta_ac = NULL;
handle_t *handle = NULL;
loff_t end = offset + bytes;
- int ret = 0, credits = 0, locked = 0;
+ int ret = 0, credits = 0;
ocfs2_init_dealloc_ctxt(&dealloc);
@@ -2322,13 +2322,6 @@ static int ocfs2_dio_end_io_write(struct inode *inode,
!dwc->dw_orphaned)
goto out;
- /* ocfs2_file_write_iter will get i_mutex, so we need not lock if we
- * are in that context. */
- if (dwc->dw_writer_pid != task_pid_nr(current)) {
- inode_lock(inode);
- locked = 1;
- }
-
ret = ocfs2_inode_lock(inode, &di_bh, 1);
if (ret < 0) {
mlog_errno(ret);
@@ -2409,8 +2402,6 @@ out:
if (meta_ac)
ocfs2_free_alloc_context(meta_ac);
ocfs2_run_deallocs(osb, &dealloc);
- if (locked)
- inode_unlock(inode);
ocfs2_dio_free_write_ctx(inode, dwc);
return ret;
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 9b2ed62dd638..19b0d358a0d6 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -2154,7 +2154,7 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g
o2hb_nego_timeout_handler,
reg, NULL, &reg->hr_handler_list);
if (ret)
- goto free;
+ goto remove_item;
ret = o2net_register_handler(O2HB_NEGO_APPROVE_MSG, reg->hr_key,
sizeof(struct o2hb_nego_msg),
@@ -2173,6 +2173,12 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g
unregister_handler:
o2net_unregister_handler_list(&reg->hr_handler_list);
+remove_item:
+ spin_lock(&o2hb_live_lock);
+ list_del(&reg->hr_all_item);
+ if (o2hb_global_heartbeat_active())
+ clear_bit(reg->hr_region_num, o2hb_region_bitmap);
+ spin_unlock(&o2hb_live_lock);
free:
kfree(reg);
return ERR_PTR(ret);
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 178cb9e6772a..8149fb6f1f0d 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -682,6 +682,12 @@ static void ocfs2_nfs_sync_lock_res_init(struct ocfs2_lock_res *res,
&ocfs2_nfs_sync_lops, osb);
}
+static void ocfs2_nfs_sync_lock_init(struct ocfs2_super *osb)
+{
+ ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
+ init_rwsem(&osb->nfs_sync_rwlock);
+}
+
void ocfs2_trim_fs_lock_res_init(struct ocfs2_super *osb)
{
struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
@@ -2851,14 +2857,25 @@ int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex)
if (ocfs2_is_hard_readonly(osb))
return -EROFS;
+ if (ex)
+ down_write(&osb->nfs_sync_rwlock);
+ else
+ down_read(&osb->nfs_sync_rwlock);
+
if (ocfs2_mount_local(osb))
return 0;
status = ocfs2_cluster_lock(osb, lockres, ex ? LKM_EXMODE : LKM_PRMODE,
0, 0);
- if (status < 0)
+ if (status < 0) {
mlog(ML_ERROR, "lock on nfs sync lock failed %d\n", status);
+ if (ex)
+ up_write(&osb->nfs_sync_rwlock);
+ else
+ up_read(&osb->nfs_sync_rwlock);
+ }
+
return status;
}
@@ -2869,6 +2886,10 @@ void ocfs2_nfs_sync_unlock(struct ocfs2_super *osb, int ex)
if (!ocfs2_mount_local(osb))
ocfs2_cluster_unlock(osb, lockres,
ex ? LKM_EXMODE : LKM_PRMODE);
+ if (ex)
+ up_write(&osb->nfs_sync_rwlock);
+ else
+ up_read(&osb->nfs_sync_rwlock);
}
int ocfs2_trim_fs_lock(struct ocfs2_super *osb,
@@ -3314,7 +3335,7 @@ int ocfs2_dlm_init(struct ocfs2_super *osb)
local:
ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
- ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
+ ocfs2_nfs_sync_lock_init(osb);
ocfs2_orphan_scan_lock_res_init(&osb->osb_orphan_scan.os_lockres, osb);
osb->cconn = conn;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index fbd70111a2f1..5c507569ef70 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1252,22 +1252,24 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
goto bail_unlock;
}
}
+ down_write(&OCFS2_I(inode)->ip_alloc_sem);
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS +
2 * ocfs2_quota_trans_credits(sb));
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
mlog_errno(status);
- goto bail_unlock;
+ goto bail_unlock_alloc;
}
status = __dquot_transfer(inode, transfer_to);
if (status < 0)
goto bail_commit;
} else {
+ down_write(&OCFS2_I(inode)->ip_alloc_sem);
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
mlog_errno(status);
- goto bail_unlock;
+ goto bail_unlock_alloc;
}
}
@@ -1280,6 +1282,8 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
bail_commit:
ocfs2_commit_trans(osb, handle);
+bail_unlock_alloc:
+ up_write(&OCFS2_I(inode)->ip_alloc_sem);
bail_unlock:
if (status && inode_locked) {
ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 4f86ac0027b5..b9f62d29355b 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -338,8 +338,8 @@ struct ocfs2_super
spinlock_t osb_lock;
u32 s_next_generation;
unsigned long osb_flags;
- s16 s_inode_steal_slot;
- s16 s_meta_steal_slot;
+ u16 s_inode_steal_slot;
+ u16 s_meta_steal_slot;
atomic_t s_num_inodes_stolen;
atomic_t s_num_meta_stolen;
@@ -406,6 +406,7 @@ struct ocfs2_super
struct ocfs2_lock_res osb_super_lockres;
struct ocfs2_lock_res osb_rename_lockres;
struct ocfs2_lock_res osb_nfs_sync_lockres;
+ struct rw_semaphore nfs_sync_rwlock;
struct ocfs2_lock_res osb_trim_fs_lockres;
struct ocfs2_dlm_debug *osb_dlm_debug;
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 7071ad0dec90..d50b7f2c7395 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -303,7 +303,7 @@
#define OCFS2_MAX_SLOTS 255
/* Slot map indicator for an empty slot */
-#define OCFS2_INVALID_SLOT -1
+#define OCFS2_INVALID_SLOT ((u16)-1)
#define OCFS2_VOL_UUID_LEN 16
#define OCFS2_MAX_VOL_LABEL_LEN 64
@@ -339,8 +339,8 @@ struct ocfs2_system_inode_info {
enum {
BAD_BLOCK_SYSTEM_INODE = 0,
GLOBAL_INODE_ALLOC_SYSTEM_INODE,
+#define OCFS2_FIRST_ONLINE_SYSTEM_INODE GLOBAL_INODE_ALLOC_SYSTEM_INODE
SLOT_MAP_SYSTEM_INODE,
-#define OCFS2_FIRST_ONLINE_SYSTEM_INODE SLOT_MAP_SYSTEM_INODE
HEARTBEAT_SYSTEM_INODE,
GLOBAL_BITMAP_SYSTEM_INODE,
USER_QUOTA_SYSTEM_INODE,
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index f7c972fbed6a..0230b4ece0f0 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -893,9 +893,9 @@ static void __ocfs2_set_steal_slot(struct ocfs2_super *osb, int slot, int type)
{
spin_lock(&osb->osb_lock);
if (type == INODE_ALLOC_SYSTEM_INODE)
- osb->s_inode_steal_slot = slot;
+ osb->s_inode_steal_slot = (u16)slot;
else if (type == EXTENT_ALLOC_SYSTEM_INODE)
- osb->s_meta_steal_slot = slot;
+ osb->s_meta_steal_slot = (u16)slot;
spin_unlock(&osb->osb_lock);
}
@@ -2841,9 +2841,12 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
goto bail;
}
- inode_alloc_inode =
- ocfs2_get_system_file_inode(osb, INODE_ALLOC_SYSTEM_INODE,
- suballoc_slot);
+ if (suballoc_slot == (u16)OCFS2_INVALID_SLOT)
+ inode_alloc_inode = ocfs2_get_system_file_inode(osb,
+ GLOBAL_INODE_ALLOC_SYSTEM_INODE, suballoc_slot);
+ else
+ inode_alloc_inode = ocfs2_get_system_file_inode(osb,
+ INODE_ALLOC_SYSTEM_INODE, suballoc_slot);
if (!inode_alloc_inode) {
/* the error code could be inaccurate, but we are not able to
* get the correct one. */
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 3415e0b09398..09bc2cf5f61c 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -92,7 +92,7 @@ struct mount_options
unsigned long commit_interval;
unsigned long mount_opt;
unsigned int atime_quantum;
- signed short slot;
+ unsigned short slot;
int localalloc_opt;
unsigned int resv_level;
int dir_resv_level;
@@ -1384,7 +1384,7 @@ static int ocfs2_parse_options(struct super_block *sb,
goto bail;
}
if (option)
- mopt->slot = (s16)option;
+ mopt->slot = (u16)option;
break;
case Opt_commit:
if (match_int(&args[0], &option)) {
@@ -1747,6 +1747,7 @@ static void ocfs2_inode_init_once(void *data)
oi->ip_blkno = 0ULL;
oi->ip_clusters = 0;
+ oi->ip_next_orphan = NULL;
ocfs2_resv_init_once(&oi->ip_la_data_resv);
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index ffc73600216b..30abafcd4ecc 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -43,7 +43,7 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
{
ssize_t list_size, size, value_size = 0;
char *buf, *name, *value = NULL;
- int uninitialized_var(error);
+ int error = 0;
size_t slen;
if (!(old->d_inode->i_opflags & IOP_XATTR) ||
@@ -79,6 +79,14 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
if (ovl_is_private_xattr(name))
continue;
+
+ error = security_inode_copy_up_xattr(name);
+ if (error < 0 && error != -EOPNOTSUPP)
+ break;
+ if (error == 1) {
+ error = 0;
+ continue; /* Discard */
+ }
retry:
size = vfs_getxattr(old, name, value, value_size);
if (size == -ERANGE)
@@ -102,13 +110,6 @@ retry:
goto retry;
}
- error = security_inode_copy_up_xattr(name);
- if (error < 0 && error != -EOPNOTSUPP)
- break;
- if (error == 1) {
- error = 0;
- continue; /* Discard */
- }
error = vfs_setxattr(new, name, value, size, 0);
if (error)
break;
@@ -823,7 +824,7 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
int ovl_copy_up_flags(struct dentry *dentry, int flags)
{
int err = 0;
- const struct cred *old_cred = ovl_override_creds(dentry->d_sb);
+ const struct cred *old_cred;
bool disconnected = (dentry->d_flags & DCACHE_DISCONNECTED);
/*
@@ -834,6 +835,7 @@ int ovl_copy_up_flags(struct dentry *dentry, int flags)
if (WARN_ON(disconnected && d_is_dir(dentry)))
return -EIO;
+ old_cred = ovl_override_creds(dentry->d_sb);
while (!err) {
struct dentry *next;
struct dentry *parent = NULL;
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 800bcad67325..25be2ab6ff6c 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -946,8 +946,8 @@ static char *ovl_get_redirect(struct dentry *dentry, bool abs_redirect)
buflen -= thislen;
memcpy(&buf[buflen], name, thislen);
- tmp = dget_dlock(d->d_parent);
spin_unlock(&d->d_lock);
+ tmp = dget_parent(d);
dput(d);
d = tmp;
diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
index 6fe303850c9e..ba6c7c59261a 100644
--- a/fs/overlayfs/export.c
+++ b/fs/overlayfs/export.c
@@ -485,7 +485,7 @@ static struct dentry *ovl_lookup_real_inode(struct super_block *sb,
if (IS_ERR_OR_NULL(this))
return this;
- if (WARN_ON(ovl_dentry_real_at(this, layer->idx) != real)) {
+ if (ovl_dentry_real_at(this, layer->idx) != real) {
dput(this);
this = ERR_PTR(-EIO);
}
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index fa5ac5de807c..73c3e2c21edb 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -24,13 +24,16 @@ static char ovl_whatisit(struct inode *inode, struct inode *realinode)
return 'm';
}
+/* No atime modificaton nor notify on underlying */
+#define OVL_OPEN_FLAGS (O_NOATIME | FMODE_NONOTIFY)
+
static struct file *ovl_open_realfile(const struct file *file,
struct inode *realinode)
{
struct inode *inode = file_inode(file);
struct file *realfile;
const struct cred *old_cred;
- int flags = file->f_flags | O_NOATIME | FMODE_NONOTIFY;
+ int flags = file->f_flags | OVL_OPEN_FLAGS;
old_cred = ovl_override_creds(inode->i_sb);
realfile = open_with_fake_path(&file->f_path, flags, realinode,
@@ -51,8 +54,7 @@ static int ovl_change_flags(struct file *file, unsigned int flags)
struct inode *inode = file_inode(file);
int err;
- /* No atime modificaton on underlying */
- flags |= O_NOATIME | FMODE_NONOTIFY;
+ flags |= OVL_OPEN_FLAGS;
/* If some flag changed that cannot be changed then something's amiss */
if (WARN_ON((file->f_flags ^ flags) & ~OVL_SETFL_MASK))
@@ -105,7 +107,7 @@ static int ovl_real_fdget_meta(const struct file *file, struct fd *real,
}
/* Did the flags change since open? */
- if (unlikely((file->f_flags ^ real->file->f_flags) & ~O_NOATIME))
+ if (unlikely((file->f_flags ^ real->file->f_flags) & ~OVL_OPEN_FLAGS))
return ovl_change_flags(real->file, file->f_flags);
return 0;
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index a138bb3bc2a5..08e60a6df77c 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -340,7 +340,9 @@ int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
goto out;
if (!value && !upperdentry) {
+ old_cred = ovl_override_creds(dentry->d_sb);
err = vfs_getxattr(realdentry, name, NULL, 0);
+ revert_creds(old_cred);
if (err < 0)
goto out_drop_write;
}
@@ -884,7 +886,7 @@ struct inode *ovl_get_inode(struct super_block *sb,
struct dentry *lowerdentry = lowerpath ? lowerpath->dentry : NULL;
bool bylower = ovl_hash_bylower(sb, upperdentry, lowerdentry,
oip->index);
- int fsid = bylower ? oip->lowerpath->layer->fsid : 0;
+ int fsid = bylower ? lowerpath->layer->fsid : 0;
bool is_dir, metacopy = false;
unsigned long ino = 0;
int err = oip->newinode ? -EEXIST : -ENOMEM;
@@ -934,6 +936,8 @@ struct inode *ovl_get_inode(struct super_block *sb,
err = -ENOMEM;
goto out_err;
}
+ ino = realinode->i_ino;
+ fsid = lowerpath->layer->fsid;
}
ovl_fill_inode(inode, realinode->i_mode, realinode->i_rdev, ino, fsid);
ovl_inode_init(inode, upperdentry, lowerdentry, oip->lowerdata);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 127df4a85c8a..c97d8d251eb9 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -82,7 +82,7 @@ static void ovl_dentry_release(struct dentry *dentry)
static struct dentry *ovl_d_real(struct dentry *dentry,
const struct inode *inode)
{
- struct dentry *real;
+ struct dentry *real = NULL, *lower;
/* It's an overlay file */
if (inode && d_inode(dentry) == inode)
@@ -101,9 +101,10 @@ static struct dentry *ovl_d_real(struct dentry *dentry,
if (real && !inode && ovl_has_upperdata(d_inode(dentry)))
return real;
- real = ovl_dentry_lowerdata(dentry);
- if (!real)
+ lower = ovl_dentry_lowerdata(dentry);
+ if (!lower)
goto bug;
+ real = lower;
/* Handle recursion */
real = d_real(real, inode);
@@ -111,8 +112,10 @@ static struct dentry *ovl_d_real(struct dentry *dentry,
if (!inode || inode == d_inode(real))
return real;
bug:
- WARN(1, "ovl_d_real(%pd4, %s:%lu): real dentry not found\n", dentry,
- inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0);
+ WARN(1, "%s(%pd4, %s:%lu): real dentry (%p/%lu) not found\n",
+ __func__, dentry, inode ? inode->i_sb->s_id : "NULL",
+ inode ? inode->i_ino : 0, real,
+ real && d_inode(real) ? d_inode(real)->i_ino : 0);
return dentry;
}
@@ -1310,14 +1313,23 @@ static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs,
if (err < 0)
goto out;
+ /*
+ * Check if lower root conflicts with this overlay layers before
+ * checking if it is in-use as upperdir/workdir of "another"
+ * mount, because we do not bother to check in ovl_is_inuse() if
+ * the upperdir/workdir is in fact in-use by our
+ * upperdir/workdir.
+ */
err = ovl_setup_trap(sb, stack[i].dentry, &trap, "lowerdir");
if (err)
goto out;
if (ovl_is_inuse(stack[i].dentry)) {
err = ovl_report_in_use(ofs, "lowerdir");
- if (err)
+ if (err) {
+ iput(trap);
goto out;
+ }
}
mnt = clone_private_mount(&stack[i]);
@@ -1467,7 +1479,8 @@ out_err:
* - upper/work dir of any overlayfs instance
*/
static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs,
- struct dentry *dentry, const char *name)
+ struct dentry *dentry, const char *name,
+ bool is_lower)
{
struct dentry *next = dentry, *parent;
int err = 0;
@@ -1479,7 +1492,7 @@ static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs,
/* Walk back ancestors to root (inclusive) looking for traps */
while (!err && parent != next) {
- if (ovl_lookup_trap_inode(sb, parent)) {
+ if (is_lower && ovl_lookup_trap_inode(sb, parent)) {
err = -ELOOP;
pr_err("overlayfs: overlapping %s path\n", name);
} else if (ovl_is_inuse(parent)) {
@@ -1505,7 +1518,7 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
if (ofs->upper_mnt) {
err = ovl_check_layer(sb, ofs, ofs->upper_mnt->mnt_root,
- "upperdir");
+ "upperdir", false);
if (err)
return err;
@@ -1516,7 +1529,8 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
* workbasedir. In that case, we already have their traps in
* inode cache and we will catch that case on lookup.
*/
- err = ovl_check_layer(sb, ofs, ofs->workbasedir, "workdir");
+ err = ovl_check_layer(sb, ofs, ofs->workbasedir, "workdir",
+ false);
if (err)
return err;
}
@@ -1524,7 +1538,7 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
for (i = 0; i < ofs->numlower; i++) {
err = ovl_check_layer(sb, ofs,
ofs->lower_layers[i].mnt->mnt_root,
- "lowerdir");
+ "lowerdir", true);
if (err)
return err;
}
diff --git a/fs/pnode.c b/fs/pnode.c
index 53d411a371ce..7910ae91f17e 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -266,14 +266,13 @@ static int propagate_one(struct mount *m)
if (IS_ERR(child))
return PTR_ERR(child);
child->mnt.mnt_flags &= ~MNT_LOCKED;
+ read_seqlock_excl(&mount_lock);
mnt_set_mountpoint(m, mp, child);
+ if (m->mnt_master != dest_master)
+ SET_MNT_MARK(m->mnt_master);
+ read_sequnlock_excl(&mount_lock);
last_dest = m;
last_source = child;
- if (m->mnt_master != dest_master) {
- read_seqlock_excl(&mount_lock);
- SET_MNT_MARK(m->mnt_master);
- read_sequnlock_excl(&mount_lock);
- }
hlist_add_head(&child->mnt_hash, list);
return count_mounts(m->mnt_ns, child);
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 3b9b726b1a6c..bc736ea1192a 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1035,7 +1035,6 @@ static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count,
static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
{
- static DEFINE_MUTEX(oom_adj_mutex);
struct mm_struct *mm = NULL;
struct task_struct *task;
int err = 0;
@@ -1075,7 +1074,7 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
struct task_struct *p = find_lock_task_mm(task);
if (p) {
- if (atomic_read(&p->mm->mm_users) > 1) {
+ if (test_bit(MMF_MULTIPROCESS, &p->mm->flags)) {
mm = p->mm;
mmgrab(mm);
}
@@ -2565,6 +2564,10 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
void *page;
int rv;
+ /* A task may only write when it was the opener. */
+ if (file->f_cred != current_real_cred())
+ return -EPERM;
+
rcu_read_lock();
task = pid_task(proc_pid(inode), PIDTYPE_PID);
if (!task) {
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index e39bac94dead..bab10368a04d 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -137,8 +137,12 @@ static int proc_getattr(const struct path *path, struct kstat *stat,
{
struct inode *inode = d_inode(path->dentry);
struct proc_dir_entry *de = PDE(inode);
- if (de && de->nlink)
- set_nlink(inode, de->nlink);
+ if (de) {
+ nlink_t nlink = READ_ONCE(de->nlink);
+ if (nlink > 0) {
+ set_nlink(inode, nlink);
+ }
+ }
generic_fillattr(inode, stat);
return 0;
@@ -337,6 +341,16 @@ static const struct file_operations proc_dir_operations = {
.iterate_shared = proc_readdir,
};
+static int proc_net_d_revalidate(struct dentry *dentry, unsigned int flags)
+{
+ return 0;
+}
+
+const struct dentry_operations proc_net_dentry_ops = {
+ .d_revalidate = proc_net_d_revalidate,
+ .d_delete = always_delete_dentry,
+};
+
/*
* proc directories can do almost nothing..
*/
@@ -361,6 +375,7 @@ struct proc_dir_entry *proc_register(struct proc_dir_entry *dir,
write_unlock(&proc_subdir_lock);
goto out_free_inum;
}
+ dir->nlink++;
write_unlock(&proc_subdir_lock);
return dp;
@@ -458,8 +473,8 @@ struct proc_dir_entry *proc_symlink(const char *name,
}
EXPORT_SYMBOL(proc_symlink);
-struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
- struct proc_dir_entry *parent, void *data)
+struct proc_dir_entry *_proc_mkdir(const char *name, umode_t mode,
+ struct proc_dir_entry *parent, void *data, bool force_lookup)
{
struct proc_dir_entry *ent;
@@ -471,13 +486,20 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
ent->data = data;
ent->proc_fops = &proc_dir_operations;
ent->proc_iops = &proc_dir_inode_operations;
- parent->nlink++;
+ if (force_lookup) {
+ pde_force_lookup(ent);
+ }
ent = proc_register(parent, ent);
- if (!ent)
- parent->nlink--;
}
return ent;
}
+EXPORT_SYMBOL_GPL(_proc_mkdir);
+
+struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
+ struct proc_dir_entry *parent, void *data)
+{
+ return _proc_mkdir(name, mode, parent, data, false);
+}
EXPORT_SYMBOL_GPL(proc_mkdir_data);
struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
@@ -504,10 +526,7 @@ struct proc_dir_entry *proc_create_mount_point(const char *name)
ent->data = NULL;
ent->proc_fops = NULL;
ent->proc_iops = NULL;
- parent->nlink++;
ent = proc_register(parent, ent);
- if (!ent)
- parent->nlink--;
}
return ent;
}
@@ -665,8 +684,12 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
len = strlen(fn);
de = pde_subdir_find(parent, fn, len);
- if (de)
+ if (de) {
rb_erase(&de->subdir_node, &parent->subdir);
+ if (S_ISDIR(de->mode)) {
+ parent->nlink--;
+ }
+ }
write_unlock(&proc_subdir_lock);
if (!de) {
WARN(1, "name '%s'\n", name);
@@ -675,9 +698,6 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
proc_entry_rundown(de);
- if (S_ISDIR(de->mode))
- parent->nlink--;
- de->nlink = 0;
WARN(pde_subdir_first(de),
"%s: removing non-empty directory '%s/%s', leaking at least '%s'\n",
__func__, de->parent->name, de->name, pde_subdir_first(de)->name);
@@ -713,13 +733,12 @@ int remove_proc_subtree(const char *name, struct proc_dir_entry *parent)
de = next;
continue;
}
- write_unlock(&proc_subdir_lock);
-
- proc_entry_rundown(de);
next = de->parent;
if (S_ISDIR(de->mode))
next->nlink--;
- de->nlink = 0;
+ write_unlock(&proc_subdir_lock);
+
+ proc_entry_rundown(de);
if (de == root)
break;
pde_put(de);
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index fc5306a31a1d..31bf3bb8ddae 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -451,7 +451,7 @@ const struct inode_operations proc_link_inode_operations = {
struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
{
- struct inode *inode = new_inode_pseudo(sb);
+ struct inode *inode = new_inode(sb);
if (inode) {
inode->i_ino = de->low_ino;
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 95b14196f284..4f14906ef16b 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -305,3 +305,10 @@ extern unsigned long task_statm(struct mm_struct *,
unsigned long *, unsigned long *,
unsigned long *, unsigned long *);
extern void task_mem(struct seq_file *, struct mm_struct *);
+
+extern const struct dentry_operations proc_net_dentry_ops;
+static inline void pde_force_lookup(struct proc_dir_entry *pde)
+{
+ /* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */
+ pde->proc_dops = &proc_net_dentry_ops;
+}
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index a7b12435519e..096bcc1e7a8a 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -38,22 +38,6 @@ static struct net *get_proc_net(const struct inode *inode)
return maybe_get_net(PDE_NET(PDE(inode)));
}
-static int proc_net_d_revalidate(struct dentry *dentry, unsigned int flags)
-{
- return 0;
-}
-
-static const struct dentry_operations proc_net_dentry_ops = {
- .d_revalidate = proc_net_d_revalidate,
- .d_delete = always_delete_dentry,
-};
-
-static void pde_force_lookup(struct proc_dir_entry *pde)
-{
- /* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */
- pde->proc_dops = &proc_net_dentry_ops;
-}
-
static int seq_open_net(struct inode *inode, struct file *file)
{
unsigned int state_size = PDE(inode)->state_size;
diff --git a/fs/proc/self.c b/fs/proc/self.c
index 127265e5c55f..7922edf70ce1 100644
--- a/fs/proc/self.c
+++ b/fs/proc/self.c
@@ -16,6 +16,13 @@ static const char *proc_self_get_link(struct dentry *dentry,
pid_t tgid = task_tgid_nr_ns(current, ns);
char *name;
+ /*
+ * Not currently supported. Once we can inherit all of struct pid,
+ * we can allow this.
+ */
+ if (current->flags & PF_KTHREAD)
+ return ERR_PTR(-EOPNOTSUPP);
+
if (!tgid)
return ERR_PTR(-ENOENT);
/* max length of unsigned int in decimal + NULL term */
@@ -42,7 +49,7 @@ int proc_setup_self(struct super_block *s)
inode_lock(root_inode);
self = d_alloc_name(s->s_root, "self");
if (self) {
- struct inode *inode = new_inode_pseudo(s);
+ struct inode *inode = new_inode(s);
if (inode) {
inode->i_ino = self_inum;
inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
diff --git a/fs/proc/thread_self.c b/fs/proc/thread_self.c
index b905010ca9eb..ed009aa135dd 100644
--- a/fs/proc/thread_self.c
+++ b/fs/proc/thread_self.c
@@ -42,7 +42,7 @@ int proc_setup_thread_self(struct super_block *s)
inode_lock(root_inode);
thread_self = d_alloc_name(s->s_root, "thread-self");
if (thread_self) {
- struct inode *inode = new_inode_pseudo(s);
+ struct inode *inode = new_inode(s);
if (inode) {
inode->i_ino = thread_self_inum;
inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 5c5f161763c8..c4147e50af98 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -250,7 +250,8 @@ static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
if (start < offset + dump->size) {
tsz = min(offset + (u64)dump->size - start, (u64)size);
buf = dump->buf + start - offset;
- if (remap_vmalloc_range_partial(vma, dst, buf, tsz)) {
+ if (remap_vmalloc_range_partial(vma, dst, buf, 0,
+ tsz)) {
ret = -EFAULT;
goto out_unlock;
}
@@ -607,7 +608,7 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
- kaddr, tsz))
+ kaddr, 0, tsz))
goto fail;
size -= tsz;
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index 6f90d91a8733..6f51c6d7b965 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -99,11 +99,11 @@ static void *pstore_ftrace_seq_next(struct seq_file *s, void *v, loff_t *pos)
struct pstore_private *ps = s->private;
struct pstore_ftrace_seq_data *data = v;
+ (*pos)++;
data->off += REC_SIZE;
if (data->off + REC_SIZE > ps->total_size)
return NULL;
- (*pos)++;
return data;
}
@@ -113,6 +113,9 @@ static int pstore_ftrace_seq_show(struct seq_file *s, void *v)
struct pstore_ftrace_seq_data *data = v;
struct pstore_ftrace_record *rec;
+ if (!data)
+ return 0;
+
rec = (struct pstore_ftrace_record *)(ps->record->buf + data->off);
seq_printf(s, "CPU:%d ts:%llu %08lx %08lx %pf <- %pF\n",
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 4bae3f4fe829..904c2a60f5ba 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -250,6 +250,9 @@ static int pstore_compress(const void *in, void *out,
{
int ret;
+ if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS))
+ return -EINVAL;
+
ret = crypto_comp_compress(tfm, in, inlen, out, &outlen);
if (ret) {
pr_err("crypto_comp_compress failed, ret = %d!\n", ret);
@@ -647,7 +650,7 @@ static void decompress_record(struct pstore_record *record)
int unzipped_len;
char *decompressed;
- if (!record->compressed)
+ if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !record->compressed)
return;
/* Only PSTORE_TYPE_DMESG support compression. */
@@ -802,9 +805,9 @@ static int __init pstore_init(void)
ret = pstore_init_fs();
if (ret)
- return ret;
+ free_buf_for_compression();
- return 0;
+ return ret;
}
late_initcall(pstore_init);
diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c
index bb3f59bcfcf5..656f9ff63edd 100644
--- a/fs/quota/quota_tree.c
+++ b/fs/quota/quota_tree.c
@@ -61,7 +61,7 @@ static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
memset(buf, 0, info->dqi_usable_bs);
return sb->s_op->quota_read(sb, info->dqi_type, buf,
- info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
+ info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
}
static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
@@ -70,7 +70,7 @@ static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
ssize_t ret;
ret = sb->s_op->quota_write(sb, info->dqi_type, buf,
- info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
+ info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
if (ret != info->dqi_usable_bs) {
quota_error(sb, "dquota write failed");
if (ret >= 0)
@@ -283,7 +283,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
blk);
goto out_buf;
}
- dquot->dq_off = (blk << info->dqi_blocksize_bits) +
+ dquot->dq_off = ((loff_t)blk << info->dqi_blocksize_bits) +
sizeof(struct qt_disk_dqdbheader) +
i * info->dqi_entry_size;
kfree(buf);
@@ -558,7 +558,7 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
ret = -EIO;
goto out_buf;
} else {
- ret = (blk << info->dqi_blocksize_bits) + sizeof(struct
+ ret = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct
qt_disk_dqdbheader) + i * info->dqi_entry_size;
}
out_buf:
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
index a73e5b34db41..addfaae8decf 100644
--- a/fs/quota/quota_v2.c
+++ b/fs/quota/quota_v2.c
@@ -158,7 +158,31 @@ static int v2_read_file_info(struct super_block *sb, int type)
qinfo->dqi_entry_size = sizeof(struct v2r1_disk_dqblk);
qinfo->dqi_ops = &v2r1_qtree_ops;
}
+ ret = -EUCLEAN;
+ /* Some sanity checks of the read headers... */
+ if ((loff_t)qinfo->dqi_blocks << qinfo->dqi_blocksize_bits >
+ i_size_read(sb_dqopt(sb)->files[type])) {
+ quota_error(sb, "Number of blocks too big for quota file size (%llu > %llu).",
+ (loff_t)qinfo->dqi_blocks << qinfo->dqi_blocksize_bits,
+ i_size_read(sb_dqopt(sb)->files[type]));
+ goto out_free;
+ }
+ if (qinfo->dqi_free_blk >= qinfo->dqi_blocks) {
+ quota_error(sb, "Free block number too big (%u >= %u).",
+ qinfo->dqi_free_blk, qinfo->dqi_blocks);
+ goto out_free;
+ }
+ if (qinfo->dqi_free_entry >= qinfo->dqi_blocks) {
+ quota_error(sb, "Block with free entry too big (%u >= %u).",
+ qinfo->dqi_free_entry, qinfo->dqi_blocks);
+ goto out_free;
+ }
ret = 0;
+out_free:
+ if (ret) {
+ kfree(info->dqi_priv);
+ info->dqi_priv = NULL;
+ }
out:
up_read(&dqopt->dqio_sem);
return ret;
@@ -283,6 +307,7 @@ static void v2r1_mem2diskdqb(void *dp, struct dquot *dquot)
d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
d->dqb_btime = cpu_to_le64(m->dqb_btime);
d->dqb_id = cpu_to_le32(from_kqid(&init_user_ns, dquot->dq_id));
+ d->dqb_pad = 0;
if (qtree_entry_unused(info, dp))
d->dqb_itime = cpu_to_le64(1);
}
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index 3ac1f2387083..5e1ebbe639eb 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -228,7 +228,7 @@ static unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
if (!pages)
goto out_free;
- nr = find_get_pages(inode->i_mapping, &pgoff, lpages, pages);
+ nr = find_get_pages_contig(inode->i_mapping, pgoff, lpages, pages);
if (nr != lpages)
goto out_free_pages; /* leave if some pages were missing */
diff --git a/fs/readdir.c b/fs/readdir.c
index 443270f635f4..3c5ce8a0ddc9 100644
--- a/fs/readdir.c
+++ b/fs/readdir.c
@@ -132,6 +132,9 @@ static int fillonedir(struct dir_context *ctx, const char *name, int namlen,
if (buf->result)
return -EINVAL;
+ buf->result = verify_dirent_name(name, namlen);
+ if (buf->result < 0)
+ return buf->result;
d_ino = ino;
if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) {
buf->result = -EOVERFLOW;
@@ -398,6 +401,9 @@ static int compat_fillonedir(struct dir_context *ctx, const char *name,
if (buf->result)
return -EINVAL;
+ buf->result = verify_dirent_name(name, namlen);
+ if (buf->result < 0)
+ return buf->result;
d_ino = ino;
if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) {
buf->result = -EOVERFLOW;
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 6419e6dacc39..ac35ddf0dd60 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -1553,11 +1553,7 @@ void reiserfs_read_locked_inode(struct inode *inode,
* set version 1, version 2 could be used too, because stat data
* key is the same in both versions
*/
- key.version = KEY_FORMAT_3_5;
- key.on_disk_key.k_dir_id = dirino;
- key.on_disk_key.k_objectid = inode->i_ino;
- key.on_disk_key.k_offset = 0;
- key.on_disk_key.k_type = 0;
+ _make_cpu_key(&key, KEY_FORMAT_3_5, dirino, inode->i_ino, 0, 0, 3);
/* look for the object's stat data */
retval = search_item(inode->i_sb, &key, &path_to_sd);
@@ -2165,7 +2161,8 @@ out_end_trans:
out_inserted_sd:
clear_nlink(inode);
th->t_trans_id = 0; /* so the caller can't use this handle later */
- unlock_new_inode(inode); /* OK to do even if we hadn't locked it */
+ if (inode->i_state & I_NEW)
+ unlock_new_inode(inode);
iput(inode);
return err;
}
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index 2946713cb00d..5229038852ca 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -454,6 +454,12 @@ static int is_leaf(char *buf, int blocksize, struct buffer_head *bh)
"(second one): %h", ih);
return 0;
}
+ if (is_direntry_le_ih(ih) && (ih_item_len(ih) < (ih_entry_count(ih) * IH_SIZE))) {
+ reiserfs_warning(NULL, "reiserfs-5093",
+ "item entry count seems wrong %h",
+ ih);
+ return 0;
+ }
prev_location = ih_location(ih);
}
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index de5eda33c92a..ec5716dd58c2 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -1264,6 +1264,10 @@ static int reiserfs_parse_options(struct super_block *s,
"turned on.");
return 0;
}
+ if (qf_names[qtype] !=
+ REISERFS_SB(s)->s_qf_names[qtype])
+ kfree(qf_names[qtype]);
+ qf_names[qtype] = NULL;
if (*arg) { /* Some filename specified? */
if (REISERFS_SB(s)->s_qf_names[qtype]
&& strcmp(REISERFS_SB(s)->s_qf_names[qtype],
@@ -1293,10 +1297,6 @@ static int reiserfs_parse_options(struct super_block *s,
else
*mount_options |= 1 << REISERFS_GRPQUOTA;
} else {
- if (qf_names[qtype] !=
- REISERFS_SB(s)->s_qf_names[qtype])
- kfree(qf_names[qtype]);
- qf_names[qtype] = NULL;
if (qtype == USRQUOTA)
*mount_options &= ~(1 << REISERFS_USRQUOTA);
else
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index ee216925a709..0a397f179fd6 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -665,6 +665,13 @@ reiserfs_xattr_get(struct inode *inode, const char *name, void *buffer,
if (get_inode_sd_version(inode) == STAT_DATA_V1)
return -EOPNOTSUPP;
+ /*
+ * priv_root needn't be initialized during mount so allow initial
+ * lookups to succeed.
+ */
+ if (!REISERFS_SB(inode->i_sb)->priv_root)
+ return 0;
+
dentry = xattr_lookup(inode, name, XATTR_REPLACE);
if (IS_ERR(dentry)) {
err = PTR_ERR(dentry);
diff --git a/fs/reiserfs/xattr.h b/fs/reiserfs/xattr.h
index c764352447ba..81bec2c80b25 100644
--- a/fs/reiserfs/xattr.h
+++ b/fs/reiserfs/xattr.h
@@ -43,7 +43,7 @@ void reiserfs_security_free(struct reiserfs_security_handle *sec);
static inline int reiserfs_xattrs_initialized(struct super_block *sb)
{
- return REISERFS_SB(sb)->priv_root != NULL;
+ return REISERFS_SB(sb)->priv_root && REISERFS_SB(sb)->xattr_root;
}
#define xattr_size(size) ((size) + sizeof(struct reiserfs_xattr_header))
diff --git a/fs/romfs/storage.c b/fs/romfs/storage.c
index f86f51f99ace..1dcadd22b440 100644
--- a/fs/romfs/storage.c
+++ b/fs/romfs/storage.c
@@ -221,10 +221,8 @@ int romfs_dev_read(struct super_block *sb, unsigned long pos,
size_t limit;
limit = romfs_maxsize(sb);
- if (pos >= limit)
+ if (pos >= limit || buflen > limit - pos)
return -EIO;
- if (buflen > limit - pos)
- buflen = limit - pos;
#ifdef CONFIG_ROMFS_ON_MTD
if (sb->s_mtd)
diff --git a/fs/select.c b/fs/select.c
index 4a6b6e4b21cb..11a7051075b4 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -1003,10 +1003,9 @@ static long do_restart_poll(struct restart_block *restart_block)
ret = do_sys_poll(ufds, nfds, to);
- if (ret == -EINTR) {
- restart_block->fn = do_restart_poll;
- ret = -ERESTART_RESTARTBLOCK;
- }
+ if (ret == -EINTR)
+ ret = set_restart_fn(restart_block, do_restart_poll);
+
return ret;
}
@@ -1028,7 +1027,6 @@ SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
struct restart_block *restart_block;
restart_block = &current->restart_block;
- restart_block->fn = do_restart_poll;
restart_block->poll.ufds = ufds;
restart_block->poll.nfds = nfds;
@@ -1039,7 +1037,7 @@ SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
} else
restart_block->poll.has_timeout = 0;
- ret = -ERESTART_RESTARTBLOCK;
+ ret = set_restart_fn(restart_block, do_restart_poll);
}
return ret;
}
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 4fcd1498acf5..3c40a3bf772c 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -313,9 +313,10 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
{
sigset_t mask;
- if (sizemask != sizeof(sigset_t) ||
- copy_from_user(&mask, user_mask, sizeof(mask)))
+ if (sizemask != sizeof(sigset_t))
return -EINVAL;
+ if (copy_from_user(&mask, user_mask, sizeof(mask)))
+ return -EFAULT;
return do_signalfd4(ufd, &mask, flags);
}
@@ -324,9 +325,10 @@ SYSCALL_DEFINE3(signalfd, int, ufd, sigset_t __user *, user_mask,
{
sigset_t mask;
- if (sizemask != sizeof(sigset_t) ||
- copy_from_user(&mask, user_mask, sizeof(mask)))
+ if (sizemask != sizeof(sigset_t))
return -EINVAL;
+ if (copy_from_user(&mask, user_mask, sizeof(mask)))
+ return -EFAULT;
return do_signalfd4(ufd, &mask, 0);
}
diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c
index 8073b6532cf0..1d406a2094a5 100644
--- a/fs/squashfs/export.c
+++ b/fs/squashfs/export.c
@@ -54,12 +54,17 @@ static long long squashfs_inode_lookup(struct super_block *sb, int ino_num)
struct squashfs_sb_info *msblk = sb->s_fs_info;
int blk = SQUASHFS_LOOKUP_BLOCK(ino_num - 1);
int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino_num - 1);
- u64 start = le64_to_cpu(msblk->inode_lookup_table[blk]);
+ u64 start;
__le64 ino;
int err;
TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino_num);
+ if (ino_num == 0 || (ino_num - 1) >= msblk->inodes)
+ return -EINVAL;
+
+ start = le64_to_cpu(msblk->inode_lookup_table[blk]);
+
err = squashfs_read_metadata(sb, &ino, &start, &offset, sizeof(ino));
if (err < 0)
return err;
@@ -124,7 +129,10 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
u64 lookup_table_start, u64 next_table, unsigned int inodes)
{
unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes);
+ unsigned int indexes = SQUASHFS_LOOKUP_BLOCKS(inodes);
+ int n;
__le64 *table;
+ u64 start, end;
TRACE("In read_inode_lookup_table, length %d\n", length);
@@ -134,20 +142,41 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
if (inodes == 0)
return ERR_PTR(-EINVAL);
- /* length bytes should not extend into the next table - this check
- * also traps instances where lookup_table_start is incorrectly larger
- * than the next table start
+ /*
+ * The computed size of the lookup table (length bytes) should exactly
+ * match the table start and end points
*/
- if (lookup_table_start + length > next_table)
+ if (length != (next_table - lookup_table_start))
return ERR_PTR(-EINVAL);
table = squashfs_read_table(sb, lookup_table_start, length);
+ if (IS_ERR(table))
+ return table;
/*
- * table[0] points to the first inode lookup table metadata block,
- * this should be less than lookup_table_start
+ * table0], table[1], ... table[indexes - 1] store the locations
+ * of the compressed inode lookup blocks. Each entry should be
+ * less than the next (i.e. table[0] < table[1]), and the difference
+ * between them should be SQUASHFS_METADATA_SIZE or less.
+ * table[indexes - 1] should be less than lookup_table_start, and
+ * again the difference should be SQUASHFS_METADATA_SIZE or less
*/
- if (!IS_ERR(table) && le64_to_cpu(table[0]) >= lookup_table_start) {
+ for (n = 0; n < (indexes - 1); n++) {
+ start = le64_to_cpu(table[n]);
+ end = le64_to_cpu(table[n + 1]);
+
+ if (start >= end
+ || (end - start) >
+ (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
+ kfree(table);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ start = le64_to_cpu(table[indexes - 1]);
+ if (start >= lookup_table_start ||
+ (lookup_table_start - start) >
+ (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
kfree(table);
return ERR_PTR(-EINVAL);
}
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index f1c1430ae721..0bcb83479fcb 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -224,11 +224,11 @@ failure:
* If the skip factor is limited in this way then the file will use multiple
* slots.
*/
-static inline int calculate_skip(int blocks)
+static inline int calculate_skip(u64 blocks)
{
- int skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
+ u64 skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
* SQUASHFS_META_INDEXES);
- return min(SQUASHFS_CACHED_BLKS - 1, skip + 1);
+ return min((u64) SQUASHFS_CACHED_BLKS - 1, skip + 1);
}
diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c
index d38ea3dab951..d2e15baab537 100644
--- a/fs/squashfs/id.c
+++ b/fs/squashfs/id.c
@@ -48,10 +48,15 @@ int squashfs_get_id(struct super_block *sb, unsigned int index,
struct squashfs_sb_info *msblk = sb->s_fs_info;
int block = SQUASHFS_ID_BLOCK(index);
int offset = SQUASHFS_ID_BLOCK_OFFSET(index);
- u64 start_block = le64_to_cpu(msblk->id_table[block]);
+ u64 start_block;
__le32 disk_id;
int err;
+ if (index >= msblk->ids)
+ return -EINVAL;
+
+ start_block = le64_to_cpu(msblk->id_table[block]);
+
err = squashfs_read_metadata(sb, &disk_id, &start_block, &offset,
sizeof(disk_id));
if (err < 0)
@@ -69,7 +74,10 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb,
u64 id_table_start, u64 next_table, unsigned short no_ids)
{
unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids);
+ unsigned int indexes = SQUASHFS_ID_BLOCKS(no_ids);
+ int n;
__le64 *table;
+ u64 start, end;
TRACE("In read_id_index_table, length %d\n", length);
@@ -80,20 +88,38 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb,
return ERR_PTR(-EINVAL);
/*
- * length bytes should not extend into the next table - this check
- * also traps instances where id_table_start is incorrectly larger
- * than the next table start
+ * The computed size of the index table (length bytes) should exactly
+ * match the table start and end points
*/
- if (id_table_start + length > next_table)
+ if (length != (next_table - id_table_start))
return ERR_PTR(-EINVAL);
table = squashfs_read_table(sb, id_table_start, length);
+ if (IS_ERR(table))
+ return table;
/*
- * table[0] points to the first id lookup table metadata block, this
- * should be less than id_table_start
+ * table[0], table[1], ... table[indexes - 1] store the locations
+ * of the compressed id blocks. Each entry should be less than
+ * the next (i.e. table[0] < table[1]), and the difference between them
+ * should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1]
+ * should be less than id_table_start, and again the difference
+ * should be SQUASHFS_METADATA_SIZE or less
*/
- if (!IS_ERR(table) && le64_to_cpu(table[0]) >= id_table_start) {
+ for (n = 0; n < (indexes - 1); n++) {
+ start = le64_to_cpu(table[n]);
+ end = le64_to_cpu(table[n + 1]);
+
+ if (start >= end || (end - start) >
+ (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
+ kfree(table);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ start = le64_to_cpu(table[indexes - 1]);
+ if (start >= id_table_start || (id_table_start - start) >
+ (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
kfree(table);
return ERR_PTR(-EINVAL);
}
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
index 4e6853f084d0..10e93345b615 100644
--- a/fs/squashfs/squashfs_fs.h
+++ b/fs/squashfs/squashfs_fs.h
@@ -30,6 +30,7 @@
/* size of metadata (inode and directory) blocks */
#define SQUASHFS_METADATA_SIZE 8192
+#define SQUASHFS_BLOCK_OFFSET 2
/* default size of block device I/O */
#ifdef CONFIG_SQUASHFS_4K_DEVBLK_SIZE
diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h
index ef69c31947bf..5234c19a0eab 100644
--- a/fs/squashfs/squashfs_fs_sb.h
+++ b/fs/squashfs/squashfs_fs_sb.h
@@ -77,5 +77,6 @@ struct squashfs_sb_info {
unsigned int inodes;
unsigned int fragments;
int xattr_ids;
+ unsigned int ids;
};
#endif
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 40e657386fa5..728b2d72f3f0 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -176,6 +176,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
msblk->directory_table = le64_to_cpu(sblk->directory_table_start);
msblk->inodes = le32_to_cpu(sblk->inodes);
msblk->fragments = le32_to_cpu(sblk->fragments);
+ msblk->ids = le16_to_cpu(sblk->no_ids);
flags = le16_to_cpu(sblk->flags);
TRACE("Found valid superblock on %pg\n", sb->s_bdev);
@@ -187,7 +188,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
TRACE("Block size %d\n", msblk->block_size);
TRACE("Number of inodes %d\n", msblk->inodes);
TRACE("Number of fragments %d\n", msblk->fragments);
- TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids));
+ TRACE("Number of ids %d\n", msblk->ids);
TRACE("sblk->inode_table_start %llx\n", msblk->inode_table);
TRACE("sblk->directory_table_start %llx\n", msblk->directory_table);
TRACE("sblk->fragment_table_start %llx\n",
@@ -244,8 +245,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
allocate_id_index_table:
/* Allocate and read id index table */
msblk->id_table = squashfs_read_id_index_table(sb,
- le64_to_cpu(sblk->id_table_start), next_table,
- le16_to_cpu(sblk->no_ids));
+ le64_to_cpu(sblk->id_table_start), next_table, msblk->ids);
if (IS_ERR(msblk->id_table)) {
ERROR("unable to read id index table\n");
err = PTR_ERR(msblk->id_table);
diff --git a/fs/squashfs/xattr.h b/fs/squashfs/xattr.h
index afe70f815e3d..86b0a0073e51 100644
--- a/fs/squashfs/xattr.h
+++ b/fs/squashfs/xattr.h
@@ -30,8 +30,16 @@ extern int squashfs_xattr_lookup(struct super_block *, unsigned int, int *,
static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb,
u64 start, u64 *xattr_table_start, int *xattr_ids)
{
+ struct squashfs_xattr_id_table *id_table;
+
+ id_table = squashfs_read_table(sb, start, sizeof(*id_table));
+ if (IS_ERR(id_table))
+ return (__le64 *) id_table;
+
+ *xattr_table_start = le64_to_cpu(id_table->xattr_table_start);
+ kfree(id_table);
+
ERROR("Xattrs in filesystem, these will be ignored\n");
- *xattr_table_start = start;
return ERR_PTR(-ENOTSUPP);
}
diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c
index c89607d690c4..7f718d2bf357 100644
--- a/fs/squashfs/xattr_id.c
+++ b/fs/squashfs/xattr_id.c
@@ -44,10 +44,15 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index,
struct squashfs_sb_info *msblk = sb->s_fs_info;
int block = SQUASHFS_XATTR_BLOCK(index);
int offset = SQUASHFS_XATTR_BLOCK_OFFSET(index);
- u64 start_block = le64_to_cpu(msblk->xattr_id_table[block]);
+ u64 start_block;
struct squashfs_xattr_id id;
int err;
+ if (index >= msblk->xattr_ids)
+ return -EINVAL;
+
+ start_block = le64_to_cpu(msblk->xattr_id_table[block]);
+
err = squashfs_read_metadata(sb, &id, &start_block, &offset,
sizeof(id));
if (err < 0)
@@ -63,13 +68,17 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index,
/*
* Read uncompressed xattr id lookup table indexes from disk into memory
*/
-__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start,
+__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
u64 *xattr_table_start, int *xattr_ids)
{
- unsigned int len;
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
+ unsigned int len, indexes;
struct squashfs_xattr_id_table *id_table;
+ __le64 *table;
+ u64 start, end;
+ int n;
- id_table = squashfs_read_table(sb, start, sizeof(*id_table));
+ id_table = squashfs_read_table(sb, table_start, sizeof(*id_table));
if (IS_ERR(id_table))
return (__le64 *) id_table;
@@ -83,13 +92,54 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start,
if (*xattr_ids == 0)
return ERR_PTR(-EINVAL);
- /* xattr_table should be less than start */
- if (*xattr_table_start >= start)
+ len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
+ indexes = SQUASHFS_XATTR_BLOCKS(*xattr_ids);
+
+ /*
+ * The computed size of the index table (len bytes) should exactly
+ * match the table start and end points
+ */
+ start = table_start + sizeof(*id_table);
+ end = msblk->bytes_used;
+
+ if (len != (end - start))
return ERR_PTR(-EINVAL);
- len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
+ table = squashfs_read_table(sb, start, len);
+ if (IS_ERR(table))
+ return table;
+
+ /* table[0], table[1], ... table[indexes - 1] store the locations
+ * of the compressed xattr id blocks. Each entry should be less than
+ * the next (i.e. table[0] < table[1]), and the difference between them
+ * should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1]
+ * should be less than table_start, and again the difference
+ * shouls be SQUASHFS_METADATA_SIZE or less.
+ *
+ * Finally xattr_table_start should be less than table[0].
+ */
+ for (n = 0; n < (indexes - 1); n++) {
+ start = le64_to_cpu(table[n]);
+ end = le64_to_cpu(table[n + 1]);
+
+ if (start >= end || (end - start) >
+ (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
+ kfree(table);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ start = le64_to_cpu(table[indexes - 1]);
+ if (start >= table_start || (table_start - start) >
+ (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
+ kfree(table);
+ return ERR_PTR(-EINVAL);
+ }
- TRACE("In read_xattr_index_table, length %d\n", len);
+ if (*xattr_table_start >= le64_to_cpu(table[0])) {
+ kfree(table);
+ return ERR_PTR(-EINVAL);
+ }
- return squashfs_read_table(sb, start + sizeof(*id_table), len);
+ return table;
}
diff --git a/fs/super.c b/fs/super.c
index f3a8c008e164..9fb4553c46e6 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -1360,36 +1360,11 @@ EXPORT_SYMBOL(__sb_end_write);
*/
int __sb_start_write(struct super_block *sb, int level, bool wait)
{
- bool force_trylock = false;
- int ret = 1;
+ if (!wait)
+ return percpu_down_read_trylock(sb->s_writers.rw_sem + level-1);
-#ifdef CONFIG_LOCKDEP
- /*
- * We want lockdep to tell us about possible deadlocks with freezing
- * but it's it bit tricky to properly instrument it. Getting a freeze
- * protection works as getting a read lock but there are subtle
- * problems. XFS for example gets freeze protection on internal level
- * twice in some cases, which is OK only because we already hold a
- * freeze protection also on higher level. Due to these cases we have
- * to use wait == F (trylock mode) which must not fail.
- */
- if (wait) {
- int i;
-
- for (i = 0; i < level - 1; i++)
- if (percpu_rwsem_is_held(sb->s_writers.rw_sem + i)) {
- force_trylock = true;
- break;
- }
- }
-#endif
- if (wait && !force_trylock)
- percpu_down_read(sb->s_writers.rw_sem + level-1);
- else
- ret = percpu_down_read_trylock(sb->s_writers.rw_sem + level-1);
-
- WARN_ON(force_trylock && !ret);
- return ret;
+ percpu_down_read(sb->s_writers.rw_sem + level-1);
+ return 1;
}
EXPORT_SYMBOL(__sb_start_write);
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 0a7252aecfa5..5166eb40917d 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -15,6 +15,7 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/seq_file.h>
+#include <linux/mm.h>
#include "sysfs.h"
#include "../kernfs/kernfs-internal.h"
@@ -556,3 +557,57 @@ void sysfs_remove_bin_file(struct kobject *kobj,
kernfs_remove_by_name(kobj->sd, attr->attr.name);
}
EXPORT_SYMBOL_GPL(sysfs_remove_bin_file);
+
+/**
+ * sysfs_emit - scnprintf equivalent, aware of PAGE_SIZE buffer.
+ * @buf: start of PAGE_SIZE buffer.
+ * @fmt: format
+ * @...: optional arguments to @format
+ *
+ *
+ * Returns number of characters written to @buf.
+ */
+int sysfs_emit(char *buf, const char *fmt, ...)
+{
+ va_list args;
+ int len;
+
+ if (WARN(!buf || offset_in_page(buf),
+ "invalid sysfs_emit: buf:%p\n", buf))
+ return 0;
+
+ va_start(args, fmt);
+ len = vscnprintf(buf, PAGE_SIZE, fmt, args);
+ va_end(args);
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(sysfs_emit);
+
+/**
+ * sysfs_emit_at - scnprintf equivalent, aware of PAGE_SIZE buffer.
+ * @buf: start of PAGE_SIZE buffer.
+ * @at: offset in @buf to start write in bytes
+ * @at must be >= 0 && < PAGE_SIZE
+ * @fmt: format
+ * @...: optional arguments to @fmt
+ *
+ *
+ * Returns number of characters written starting at &@buf[@at].
+ */
+int sysfs_emit_at(char *buf, int at, const char *fmt, ...)
+{
+ va_list args;
+ int len;
+
+ if (WARN(!buf || offset_in_page(buf) || at < 0 || at >= PAGE_SIZE,
+ "invalid sysfs_emit_at: buf:%p at:%d\n", buf, at))
+ return 0;
+
+ va_start(args, fmt);
+ len = vscnprintf(buf + at, PAGE_SIZE - at, fmt, args);
+ va_end(args);
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(sysfs_emit_at);
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 564e330d05b1..24bbecd4752b 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -1129,6 +1129,7 @@ int dbg_check_dir(struct ubifs_info *c, const struct inode *dir)
err = PTR_ERR(dent);
if (err == -ENOENT)
break;
+ kfree(pdent);
return err;
}
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index d7c0aa0626cd..8fe2ee5462a0 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -220,11 +220,9 @@ static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry,
dbg_gen("'%pd' in dir ino %lu", dentry, dir->i_ino);
- err = fscrypt_prepare_lookup(dir, dentry, flags);
- if (err)
- return ERR_PTR(err);
-
- err = fscrypt_setup_filename(dir, &dentry->d_name, 1, &nm);
+ err = fscrypt_prepare_lookup(dir, dentry, &nm);
+ if (err == -ENOENT)
+ return d_splice_alias(NULL, dentry);
if (err)
return ERR_PTR(err);
@@ -292,6 +290,15 @@ done:
return d_splice_alias(inode, dentry);
}
+static int ubifs_prepare_create(struct inode *dir, struct dentry *dentry,
+ struct fscrypt_name *nm)
+{
+ if (fscrypt_is_nokey_name(dentry))
+ return -ENOKEY;
+
+ return fscrypt_setup_filename(dir, &dentry->d_name, 0, nm);
+}
+
static int ubifs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
bool excl)
{
@@ -315,7 +322,7 @@ static int ubifs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
if (err)
return err;
- err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
+ err = ubifs_prepare_create(dir, dentry, &nm);
if (err)
goto out_budg;
@@ -979,7 +986,7 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
if (err)
return err;
- err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
+ err = ubifs_prepare_create(dir, dentry, &nm);
if (err)
goto out_budg;
@@ -1064,7 +1071,7 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
return err;
}
- err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
+ err = ubifs_prepare_create(dir, dentry, &nm);
if (err) {
kfree(dev);
goto out_budg;
@@ -1148,7 +1155,7 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
if (err)
return err;
- err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
+ err = ubifs_prepare_create(dir, dentry, &nm);
if (err)
goto out_budg;
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 65b4f63349c7..d7d2fdda4bbd 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1391,7 +1391,6 @@ int ubifs_update_time(struct inode *inode, struct timespec64 *time,
struct ubifs_info *c = inode->i_sb->s_fs_info;
struct ubifs_budget_req req = { .dirtied_ino = 1,
.dirtied_ino_d = ALIGN(ui->data_len, 8) };
- int iflags = I_DIRTY_TIME;
int err, release;
err = ubifs_budget_space(c, &req);
@@ -1406,11 +1405,8 @@ int ubifs_update_time(struct inode *inode, struct timespec64 *time,
if (flags & S_MTIME)
inode->i_mtime = *time;
- if (!(inode->i_sb->s_flags & SB_LAZYTIME))
- iflags |= I_DIRTY_SYNC;
-
release = ui->dirty;
- __mark_inode_dirty(inode, iflags);
+ __mark_inode_dirty(inode, I_DIRTY_SYNC);
mutex_unlock(&ui->ui_mutex);
if (release)
ubifs_release_budget(c, &req);
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
index 099bec94b820..9542ebf643a5 100644
--- a/fs/ubifs/io.c
+++ b/fs/ubifs/io.c
@@ -237,7 +237,7 @@ int ubifs_is_mapped(const struct ubifs_info *c, int lnum)
int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
int offs, int quiet, int must_chk_crc)
{
- int err = -EINVAL, type, node_len;
+ int err = -EINVAL, type, node_len, dump_node = 1;
uint32_t crc, node_crc, magic;
const struct ubifs_ch *ch = buf;
@@ -290,10 +290,22 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
out_len:
if (!quiet)
ubifs_err(c, "bad node length %d", node_len);
+ if (type == UBIFS_DATA_NODE && node_len > UBIFS_DATA_NODE_SZ)
+ dump_node = 0;
out:
if (!quiet) {
ubifs_err(c, "bad node at LEB %d:%d", lnum, offs);
- ubifs_dump_node(c, buf);
+ if (dump_node) {
+ ubifs_dump_node(c, buf);
+ } else {
+ int safe_len = min3(node_len, c->leb_size - offs,
+ (int)UBIFS_MAX_DATA_NODE_SZ);
+ pr_err("\tprevent out-of-bounds memory access\n");
+ pr_err("\ttruncated data node length %d\n", safe_len);
+ pr_err("\tcorrupted data node:\n");
+ print_hex_dump(KERN_ERR, "\t", DUMP_PREFIX_OFFSET, 32, 1,
+ buf, safe_len, 0);
+ }
dump_stack();
}
return err;
@@ -319,7 +331,7 @@ void ubifs_pad(const struct ubifs_info *c, void *buf, int pad)
{
uint32_t crc;
- ubifs_assert(c, pad >= 0 && !(pad & 7));
+ ubifs_assert(c, pad >= 0);
if (pad >= UBIFS_PAD_NODE_SZ) {
struct ubifs_ch *ch = buf;
@@ -716,6 +728,10 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
* write-buffer.
*/
memcpy(wbuf->buf + wbuf->used, buf, len);
+ if (aligned_len > len) {
+ ubifs_assert(c, aligned_len - len < 8);
+ ubifs_pad(c, wbuf->buf + wbuf->used + len, aligned_len - len);
+ }
if (aligned_len == wbuf->avail) {
dbg_io("flush jhead %s wbuf to LEB %d:%d",
@@ -808,13 +824,18 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
}
spin_lock(&wbuf->lock);
- if (aligned_len)
+ if (aligned_len) {
/*
* And now we have what's left and what does not take whole
* max. write unit, so write it to the write-buffer and we are
* done.
*/
memcpy(wbuf->buf, buf + written, len);
+ if (aligned_len > len) {
+ ubifs_assert(c, aligned_len - len < 8);
+ ubifs_pad(c, wbuf->buf + len, aligned_len - len);
+ }
+ }
if (c->leb_size - wbuf->offs >= c->max_write_size)
wbuf->size = c->max_write_size;
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index c6f9b2225387..673d1f08b9a4 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -232,7 +232,8 @@ static bool inode_still_linked(struct ubifs_info *c, struct replay_entry *rino)
*/
list_for_each_entry_reverse(r, &c->replay_list, list) {
ubifs_assert(c, r->sqnum >= rino->sqnum);
- if (key_inum(c, &r->key) == key_inum(c, &rino->key))
+ if (key_inum(c, &r->key) == key_inum(c, &rino->key) &&
+ key_type(c, &r->key) == UBIFS_INO_KEY)
return r->deletion == 0;
}
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 4c46ebf0e773..f5500d2a3879 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -132,21 +132,24 @@ void udf_evict_inode(struct inode *inode)
struct udf_inode_info *iinfo = UDF_I(inode);
int want_delete = 0;
- if (!inode->i_nlink && !is_bad_inode(inode)) {
- want_delete = 1;
- udf_setsize(inode, 0);
- udf_update_inode(inode, IS_SYNC(inode));
+ if (!is_bad_inode(inode)) {
+ if (!inode->i_nlink) {
+ want_delete = 1;
+ udf_setsize(inode, 0);
+ udf_update_inode(inode, IS_SYNC(inode));
+ }
+ if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
+ inode->i_size != iinfo->i_lenExtents) {
+ udf_warn(inode->i_sb,
+ "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
+ inode->i_ino, inode->i_mode,
+ (unsigned long long)inode->i_size,
+ (unsigned long long)iinfo->i_lenExtents);
+ }
}
truncate_inode_pages_final(&inode->i_data);
invalidate_inode_buffers(inode);
clear_inode(inode);
- if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
- inode->i_size != iinfo->i_lenExtents) {
- udf_warn(inode->i_sb, "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
- inode->i_ino, inode->i_mode,
- (unsigned long long)inode->i_size,
- (unsigned long long)iinfo->i_lenExtents);
- }
kfree(iinfo->i_ext.i_data);
iinfo->i_ext.i_data = NULL;
udf_clear_extent_cache(inode);
@@ -537,11 +540,14 @@ static int udf_do_extend_file(struct inode *inode,
udf_write_aext(inode, last_pos, &last_ext->extLocation,
last_ext->extLength, 1);
+
/*
- * We've rewritten the last extent but there may be empty
- * indirect extent after it - enter it.
+ * We've rewritten the last extent. If we are going to add
+ * more extents, we may need to enter possible following
+ * empty indirect extent.
*/
- udf_next_aext(inode, last_pos, &tmploc, &tmplen, 0);
+ if (new_block_bytes || prealloc_len)
+ udf_next_aext(inode, last_pos, &tmploc, &tmplen, 0);
}
/* Managed to do everything necessary? */
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 1676a175cd7a..c7f6243f318b 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -1349,6 +1349,12 @@ static int udf_load_sparable_map(struct super_block *sb,
(int)spm->numSparingTables);
return -EIO;
}
+ if (le32_to_cpu(spm->sizeSparingTable) > sb->s_blocksize) {
+ udf_err(sb, "error loading logical volume descriptor: "
+ "Too big sparing table size (%u)\n",
+ le32_to_cpu(spm->sizeSparingTable));
+ return -EIO;
+ }
for (i = 0; i < spm->numSparingTables; i++) {
loc = le32_to_cpu(spm->locSparingTable[i]);
@@ -1679,7 +1685,8 @@ static noinline int udf_process_sequence(
"Pointers (max %u supported)\n",
UDF_MAX_TD_NESTING);
brelse(bh);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
vdp = (struct volDescPtr *)bh->b_data;
@@ -1699,7 +1706,8 @@ static noinline int udf_process_sequence(
curr = get_volume_descriptor_record(ident, bh, &data);
if (IS_ERR(curr)) {
brelse(bh);
- return PTR_ERR(curr);
+ ret = PTR_ERR(curr);
+ goto out;
}
/* Descriptor we don't care about? */
if (!curr)
@@ -1721,28 +1729,31 @@ static noinline int udf_process_sequence(
*/
if (!data.vds[VDS_POS_PRIMARY_VOL_DESC].block) {
udf_err(sb, "Primary Volume Descriptor not found!\n");
- return -EAGAIN;
+ ret = -EAGAIN;
+ goto out;
}
ret = udf_load_pvoldesc(sb, data.vds[VDS_POS_PRIMARY_VOL_DESC].block);
if (ret < 0)
- return ret;
+ goto out;
if (data.vds[VDS_POS_LOGICAL_VOL_DESC].block) {
ret = udf_load_logicalvol(sb,
data.vds[VDS_POS_LOGICAL_VOL_DESC].block,
fileset);
if (ret < 0)
- return ret;
+ goto out;
}
/* Now handle prevailing Partition Descriptors */
for (i = 0; i < data.num_part_descs; i++) {
ret = udf_load_partdesc(sb, data.part_descs_loc[i].rec.block);
if (ret < 0)
- return ret;
+ goto out;
}
-
- return 0;
+ ret = 0;
+out:
+ kfree(data.part_descs_loc);
+ return ret;
}
/*
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index a4e07e910f1b..6e59e45d7bfb 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -100,7 +100,7 @@ static struct inode *ufs_nfs_get_inode(struct super_block *sb, u64 ino, u32 gene
struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
struct inode *inode;
- if (ino < UFS_ROOTINO || ino > uspi->s_ncg * uspi->s_ipg)
+ if (ino < UFS_ROOTINO || ino > (u64)uspi->s_ncg * uspi->s_ipg)
return ERR_PTR(-ESTALE);
inode = ufs_iget(sb, ino);
diff --git a/fs/xattr.c b/fs/xattr.c
index 0d6a6a4af861..470ee0af3200 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -203,10 +203,22 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name,
return error;
}
-
+/**
+ * __vfs_setxattr_locked: set an extended attribute while holding the inode
+ * lock
+ *
+ * @dentry - object to perform setxattr on
+ * @name - xattr name to set
+ * @value - value to set @name to
+ * @size - size of @value
+ * @flags - flags to pass into filesystem operations
+ * @delegated_inode - on return, will contain an inode pointer that
+ * a delegation was broken on, NULL if none.
+ */
int
-vfs_setxattr(struct dentry *dentry, const char *name, const void *value,
- size_t size, int flags)
+__vfs_setxattr_locked(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags,
+ struct inode **delegated_inode)
{
struct inode *inode = dentry->d_inode;
int error;
@@ -215,15 +227,40 @@ vfs_setxattr(struct dentry *dentry, const char *name, const void *value,
if (error)
return error;
- inode_lock(inode);
error = security_inode_setxattr(dentry, name, value, size, flags);
if (error)
goto out;
+ error = try_break_deleg(inode, delegated_inode);
+ if (error)
+ goto out;
+
error = __vfs_setxattr_noperm(dentry, name, value, size, flags);
out:
+ return error;
+}
+EXPORT_SYMBOL_GPL(__vfs_setxattr_locked);
+
+int
+vfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ size_t size, int flags)
+{
+ struct inode *inode = dentry->d_inode;
+ struct inode *delegated_inode = NULL;
+ int error;
+
+retry_deleg:
+ inode_lock(inode);
+ error = __vfs_setxattr_locked(dentry, name, value, size, flags,
+ &delegated_inode);
inode_unlock(inode);
+
+ if (delegated_inode) {
+ error = break_deleg_wait(&delegated_inode);
+ if (!error)
+ goto retry_deleg;
+ }
return error;
}
EXPORT_SYMBOL_GPL(vfs_setxattr);
@@ -377,8 +414,18 @@ __vfs_removexattr(struct dentry *dentry, const char *name)
}
EXPORT_SYMBOL(__vfs_removexattr);
+/**
+ * __vfs_removexattr_locked: set an extended attribute while holding the inode
+ * lock
+ *
+ * @dentry - object to perform setxattr on
+ * @name - name of xattr to remove
+ * @delegated_inode - on return, will contain an inode pointer that
+ * a delegation was broken on, NULL if none.
+ */
int
-vfs_removexattr(struct dentry *dentry, const char *name)
+__vfs_removexattr_locked(struct dentry *dentry, const char *name,
+ struct inode **delegated_inode)
{
struct inode *inode = dentry->d_inode;
int error;
@@ -387,11 +434,14 @@ vfs_removexattr(struct dentry *dentry, const char *name)
if (error)
return error;
- inode_lock(inode);
error = security_inode_removexattr(dentry, name);
if (error)
goto out;
+ error = try_break_deleg(inode, delegated_inode);
+ if (error)
+ goto out;
+
error = __vfs_removexattr(dentry, name);
if (!error) {
@@ -400,12 +450,32 @@ vfs_removexattr(struct dentry *dentry, const char *name)
}
out:
+ return error;
+}
+EXPORT_SYMBOL_GPL(__vfs_removexattr_locked);
+
+int
+vfs_removexattr(struct dentry *dentry, const char *name)
+{
+ struct inode *inode = dentry->d_inode;
+ struct inode *delegated_inode = NULL;
+ int error;
+
+retry_deleg:
+ inode_lock(inode);
+ error = __vfs_removexattr_locked(dentry, name, &delegated_inode);
inode_unlock(inode);
+
+ if (delegated_inode) {
+ error = break_deleg_wait(&delegated_inode);
+ if (!error)
+ goto retry_deleg;
+ }
+
return error;
}
EXPORT_SYMBOL_GPL(vfs_removexattr);
-
/*
* Extended attribute SET operations
*/
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index e1c0c0d2f1b0..b3a9043b0c9e 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -2213,6 +2213,7 @@ xfs_defer_agfl_block(
new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno);
new->xefi_blockcount = 1;
new->xefi_oinfo = *oinfo;
+ new->xefi_skip_discard = false;
trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1);
@@ -2596,6 +2597,13 @@ xfs_agf_verify(
be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp)))
return __this_address;
+ if (be32_to_cpu(agf->agf_length) > mp->m_sb.sb_dblocks)
+ return __this_address;
+
+ if (be32_to_cpu(agf->agf_freeblks) < be32_to_cpu(agf->agf_longest) ||
+ be32_to_cpu(agf->agf_freeblks) > be32_to_cpu(agf->agf_length))
+ return __this_address;
+
if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 ||
be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS ||
@@ -2607,6 +2615,10 @@ xfs_agf_verify(
be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS))
return __this_address;
+ if (xfs_sb_version_hasrmapbt(&mp->m_sb) &&
+ be32_to_cpu(agf->agf_rmap_blocks) > be32_to_cpu(agf->agf_length))
+ return __this_address;
+
/*
* during growfs operations, the perag is not fully initialised,
* so we can't use it for any useful checking. growfs ensures we can't
@@ -2621,6 +2633,11 @@ xfs_agf_verify(
return __this_address;
if (xfs_sb_version_hasreflink(&mp->m_sb) &&
+ be32_to_cpu(agf->agf_refcount_blocks) >
+ be32_to_cpu(agf->agf_length))
+ return __this_address;
+
+ if (xfs_sb_version_hasreflink(&mp->m_sb) &&
(be32_to_cpu(agf->agf_refcount_level) < 1 ||
be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS))
return __this_address;
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 2652d00842d6..efb586ea508b 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -551,8 +551,8 @@ xfs_attr_shortform_create(xfs_da_args_t *args)
ASSERT(ifp->if_flags & XFS_IFINLINE);
}
xfs_idata_realloc(dp, sizeof(*hdr), XFS_ATTR_FORK);
- hdr = (xfs_attr_sf_hdr_t *)ifp->if_u1.if_data;
- hdr->count = 0;
+ hdr = (struct xfs_attr_sf_hdr *)ifp->if_u1.if_data;
+ memset(hdr, 0, sizeof(*hdr));
hdr->totsize = cpu_to_be16(sizeof(*hdr));
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA);
}
@@ -935,8 +935,10 @@ xfs_attr_shortform_verify(
* struct xfs_attr_sf_entry has a variable length.
* Check the fixed-offset parts of the structure are
* within the data buffer.
+ * xfs_attr_sf_entry is defined with a 1-byte variable
+ * array at the end, so we must subtract that off.
*/
- if (((char *)sfep + sizeof(*sfep)) >= endp)
+ if (((char *)sfep + sizeof(*sfep) - 1) >= endp)
return __this_address;
/* Don't allow names with known bad length. */
@@ -1436,7 +1438,9 @@ xfs_attr3_leaf_add_work(
for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
if (ichdr->freemap[i].base == tmp) {
ichdr->freemap[i].base += sizeof(xfs_attr_leaf_entry_t);
- ichdr->freemap[i].size -= sizeof(xfs_attr_leaf_entry_t);
+ ichdr->freemap[i].size -=
+ min_t(uint16_t, ichdr->freemap[i].size,
+ sizeof(xfs_attr_leaf_entry_t));
}
}
ichdr->usedbytes += xfs_attr_leaf_entsize(leaf, args->index);
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 0b7145fdb8aa..fc9950a505e6 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -4920,20 +4920,25 @@ xfs_bmap_del_extent_real(
flags = XFS_ILOG_CORE;
if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
- xfs_fsblock_t bno;
xfs_filblks_t len;
xfs_extlen_t mod;
- bno = div_u64_rem(del->br_startblock, mp->m_sb.sb_rextsize,
- &mod);
- ASSERT(mod == 0);
len = div_u64_rem(del->br_blockcount, mp->m_sb.sb_rextsize,
&mod);
ASSERT(mod == 0);
- error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
- if (error)
- goto done;
+ if (!(bflags & XFS_BMAPI_REMAP)) {
+ xfs_fsblock_t bno;
+
+ bno = div_u64_rem(del->br_startblock,
+ mp->m_sb.sb_rextsize, &mod);
+ ASSERT(mod == 0);
+
+ error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
+ if (error)
+ goto done;
+ }
+
do_fx = 0;
nblks = len * mp->m_sb.sb_rextsize;
qfield = XFS_TRANS_DQ_RTBCOUNT;
@@ -6130,7 +6135,7 @@ xfs_bmap_validate_extent(
isrt = XFS_IS_REALTIME_INODE(ip);
endfsb = irec->br_startblock + irec->br_blockcount - 1;
- if (isrt) {
+ if (isrt && whichfork == XFS_DATA_FORK) {
if (!xfs_verify_rtbno(mp, irec->br_startblock))
return __this_address;
if (!xfs_verify_rtbno(mp, endfsb))
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index 488dc8860fd7..50242ba3cdb7 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -52,9 +52,9 @@ struct xfs_extent_free_item
{
xfs_fsblock_t xefi_startblock;/* starting fs block number */
xfs_extlen_t xefi_blockcount;/* number of blocks in extent */
+ bool xefi_skip_discard;
struct list_head xefi_list;
struct xfs_owner_info xefi_oinfo; /* extent owner */
- bool xefi_skip_discard;
};
#define XFS_BMAP_MAX_NMAP 4
diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c
index f1bb3434f51c..01e99806b941 100644
--- a/fs/xfs/libxfs/xfs_dir2_node.c
+++ b/fs/xfs/libxfs/xfs_dir2_node.c
@@ -214,6 +214,7 @@ __xfs_dir3_free_read(
if (fa) {
xfs_verifier_error(*bpp, -EFSCORRUPTED, fa);
xfs_trans_brelse(tp, *bpp);
+ *bpp = NULL;
return -EFSCORRUPTED;
}
diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
index 245af452840e..ab3e72e702f0 100644
--- a/fs/xfs/libxfs/xfs_rmap.c
+++ b/fs/xfs/libxfs/xfs_rmap.c
@@ -1387,7 +1387,7 @@ xfs_rmap_convert_shared(
* record for our insertion point. This will also give us the record for
* start block contiguity tests.
*/
- error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, flags,
+ error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, oldext,
&PREV, &i);
if (error)
goto done;
diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c
index b228c821bae6..fe7323032e78 100644
--- a/fs/xfs/libxfs/xfs_rtbitmap.c
+++ b/fs/xfs/libxfs/xfs_rtbitmap.c
@@ -1020,7 +1020,6 @@ xfs_rtalloc_query_range(
struct xfs_mount *mp = tp->t_mountp;
xfs_rtblock_t rtstart;
xfs_rtblock_t rtend;
- xfs_rtblock_t rem;
int is_free;
int error = 0;
@@ -1029,13 +1028,12 @@ xfs_rtalloc_query_range(
if (low_rec->ar_startext >= mp->m_sb.sb_rextents ||
low_rec->ar_startext == high_rec->ar_startext)
return 0;
- if (high_rec->ar_startext > mp->m_sb.sb_rextents)
- high_rec->ar_startext = mp->m_sb.sb_rextents;
+ high_rec->ar_startext = min(high_rec->ar_startext,
+ mp->m_sb.sb_rextents - 1);
/* Iterate the bitmap, looking for discrepancies. */
rtstart = low_rec->ar_startext;
- rem = high_rec->ar_startext - rtstart;
- while (rem) {
+ while (rtstart <= high_rec->ar_startext) {
/* Is the first block free? */
error = xfs_rtcheck_range(mp, tp, rtstart, 1, 1, &rtend,
&is_free);
@@ -1044,7 +1042,7 @@ xfs_rtalloc_query_range(
/* How long does the extent go for? */
error = xfs_rtfind_forw(mp, tp, rtstart,
- high_rec->ar_startext - 1, &rtend);
+ high_rec->ar_startext, &rtend);
if (error)
break;
@@ -1057,7 +1055,6 @@ xfs_rtalloc_query_range(
break;
}
- rem -= rtend - rtstart + 1;
rtstart = rtend + 1;
}
diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c
index f99a7aefe418..2b3cc5a8ced1 100644
--- a/fs/xfs/libxfs/xfs_trans_resv.c
+++ b/fs/xfs/libxfs/xfs_trans_resv.c
@@ -198,6 +198,24 @@ xfs_calc_inode_chunk_res(
}
/*
+ * Per-extent log reservation for the btree changes involved in freeing or
+ * allocating a realtime extent. We have to be able to log as many rtbitmap
+ * blocks as needed to mark inuse MAXEXTLEN blocks' worth of realtime extents,
+ * as well as the realtime summary block.
+ */
+unsigned int
+xfs_rtalloc_log_count(
+ struct xfs_mount *mp,
+ unsigned int num_ops)
+{
+ unsigned int blksz = XFS_FSB_TO_B(mp, 1);
+ unsigned int rtbmp_bytes;
+
+ rtbmp_bytes = (MAXEXTLEN / mp->m_sb.sb_rextsize) / NBBY;
+ return (howmany(rtbmp_bytes, blksz) + 1) * num_ops;
+}
+
+/*
* Various log reservation values.
*
* These are based on the size of the file system block because that is what
@@ -219,13 +237,21 @@ xfs_calc_inode_chunk_res(
/*
* In a write transaction we can allocate a maximum of 2
- * extents. This gives:
+ * extents. This gives (t1):
* the inode getting the new extents: inode size
* the inode's bmap btree: max depth * block size
* the agfs of the ags from which the extents are allocated: 2 * sector
* the superblock free block counter: sector size
* the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
- * And the bmap_finish transaction can free bmap blocks in a join:
+ * Or, if we're writing to a realtime file (t2):
+ * the inode getting the new extents: inode size
+ * the inode's bmap btree: max depth * block size
+ * the agfs of the ags from which the extents are allocated: 2 * sector
+ * the superblock free block counter: sector size
+ * the realtime bitmap: ((MAXEXTLEN / rtextsize) / NBBY) bytes
+ * the realtime summary: 1 block
+ * the allocation btrees: 2 trees * (2 * max depth - 1) * block size
+ * And the bmap_finish transaction can free bmap blocks in a join (t3):
* the agfs of the ags containing the blocks: 2 * sector size
* the agfls of the ags containing the blocks: 2 * sector size
* the super block free block counter: sector size
@@ -235,40 +261,72 @@ STATIC uint
xfs_calc_write_reservation(
struct xfs_mount *mp)
{
- return XFS_DQUOT_LOGRES(mp) +
- max((xfs_calc_inode_res(mp, 1) +
+ unsigned int t1, t2, t3;
+ unsigned int blksz = XFS_FSB_TO_B(mp, 1);
+
+ t1 = xfs_calc_inode_res(mp, 1) +
+ xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), blksz) +
+ xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz);
+
+ if (xfs_sb_version_hasrealtime(&mp->m_sb)) {
+ t2 = xfs_calc_inode_res(mp, 1) +
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK),
- XFS_FSB_TO_B(mp, 1)) +
+ blksz) +
xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
- xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2),
- XFS_FSB_TO_B(mp, 1))),
- (xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
- xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2),
- XFS_FSB_TO_B(mp, 1))));
+ xfs_calc_buf_res(xfs_rtalloc_log_count(mp, 1), blksz) +
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1), blksz);
+ } else {
+ t2 = 0;
+ }
+
+ t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz);
+
+ return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3);
}
/*
- * In truncating a file we free up to two extents at once. We can modify:
+ * In truncating a file we free up to two extents at once. We can modify (t1):
* the inode being truncated: inode size
* the inode's bmap btree: (max depth + 1) * block size
- * And the bmap_finish transaction can free the blocks and bmap blocks:
+ * And the bmap_finish transaction can free the blocks and bmap blocks (t2):
* the agf for each of the ags: 4 * sector size
* the agfl for each of the ags: 4 * sector size
* the super block to reflect the freed blocks: sector size
* worst case split in allocation btrees per extent assuming 4 extents:
* 4 exts * 2 trees * (2 * max depth - 1) * block size
+ * Or, if it's a realtime file (t3):
+ * the agf for each of the ags: 2 * sector size
+ * the agfl for each of the ags: 2 * sector size
+ * the super block to reflect the freed blocks: sector size
+ * the realtime bitmap: 2 exts * ((MAXEXTLEN / rtextsize) / NBBY) bytes
+ * the realtime summary: 2 exts * 1 block
+ * worst case split in allocation btrees per extent assuming 2 extents:
+ * 2 exts * 2 trees * (2 * max depth - 1) * block size
*/
STATIC uint
xfs_calc_itruncate_reservation(
struct xfs_mount *mp)
{
- return XFS_DQUOT_LOGRES(mp) +
- max((xfs_calc_inode_res(mp, 1) +
- xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1,
- XFS_FSB_TO_B(mp, 1))),
- (xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
- xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4),
- XFS_FSB_TO_B(mp, 1))));
+ unsigned int t1, t2, t3;
+ unsigned int blksz = XFS_FSB_TO_B(mp, 1);
+
+ t1 = xfs_calc_inode_res(mp, 1) +
+ xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1, blksz);
+
+ t2 = xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4), blksz);
+
+ if (xfs_sb_version_hasrealtime(&mp->m_sb)) {
+ t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
+ xfs_calc_buf_res(xfs_rtalloc_log_count(mp, 2), blksz) +
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz);
+ } else {
+ t3 = 0;
+ }
+
+ return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3);
}
/*
diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c
index e1d11f3223e3..cf005e18d618 100644
--- a/fs/xfs/scrub/bmap.c
+++ b/fs/xfs/scrub/bmap.c
@@ -53,9 +53,27 @@ xchk_setup_inode_bmap(
*/
if (S_ISREG(VFS_I(sc->ip)->i_mode) &&
sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) {
+ struct address_space *mapping = VFS_I(sc->ip)->i_mapping;
+
inode_dio_wait(VFS_I(sc->ip));
- error = filemap_write_and_wait(VFS_I(sc->ip)->i_mapping);
- if (error)
+
+ /*
+ * Try to flush all incore state to disk before we examine the
+ * space mappings for the data fork. Leave accumulated errors
+ * in the mapping for the writer threads to consume.
+ *
+ * On ENOSPC or EIO writeback errors, we continue into the
+ * extent mapping checks because write failures do not
+ * necessarily imply anything about the correctness of the file
+ * metadata. The metadata and the file data could be on
+ * completely separate devices; a media failure might only
+ * affect a subset of the disk, etc. We can handle delalloc
+ * extents in the scrubber, so leaving them in memory is fine.
+ */
+ error = filemap_fdatawrite(mapping);
+ if (!error)
+ error = filemap_fdatawait_keep_errors(mapping);
+ if (error && (error != -ENOSPC && error != -EIO))
goto out;
}
@@ -102,6 +120,8 @@ xchk_bmap_get_rmap(
if (info->whichfork == XFS_ATTR_FORK)
rflags |= XFS_RMAP_ATTR_FORK;
+ if (irec->br_state == XFS_EXT_UNWRITTEN)
+ rflags |= XFS_RMAP_UNWRITTEN;
/*
* CoW staging extents are owned (on disk) by the refcountbt, so
@@ -205,13 +225,13 @@ xchk_bmap_xref_rmap(
* which doesn't track unwritten state.
*/
if (owner != XFS_RMAP_OWN_COW &&
- irec->br_state == XFS_EXT_UNWRITTEN &&
- !(rmap.rm_flags & XFS_RMAP_UNWRITTEN))
+ !!(irec->br_state == XFS_EXT_UNWRITTEN) !=
+ !!(rmap.rm_flags & XFS_RMAP_UNWRITTEN))
xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
- if (info->whichfork == XFS_ATTR_FORK &&
- !(rmap.rm_flags & XFS_RMAP_ATTR_FORK))
+ if (!!(info->whichfork == XFS_ATTR_FORK) !=
+ !!(rmap.rm_flags & XFS_RMAP_ATTR_FORK))
xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
if (rmap.rm_flags & XFS_RMAP_BMBT_BLOCK)
diff --git a/fs/xfs/scrub/btree.c b/fs/xfs/scrub/btree.c
index 4ae959f7ad2c..c924fe3cdad6 100644
--- a/fs/xfs/scrub/btree.c
+++ b/fs/xfs/scrub/btree.c
@@ -450,32 +450,41 @@ xchk_btree_check_minrecs(
int level,
struct xfs_btree_block *block)
{
- unsigned int numrecs;
- int ok_level;
-
- numrecs = be16_to_cpu(block->bb_numrecs);
+ struct xfs_btree_cur *cur = bs->cur;
+ unsigned int root_level = cur->bc_nlevels - 1;
+ unsigned int numrecs = be16_to_cpu(block->bb_numrecs);
/* More records than minrecs means the block is ok. */
- if (numrecs >= bs->cur->bc_ops->get_minrecs(bs->cur, level))
+ if (numrecs >= cur->bc_ops->get_minrecs(cur, level))
return;
/*
- * Certain btree blocks /can/ have fewer than minrecs records. Any
- * level greater than or equal to the level of the highest dedicated
- * btree block are allowed to violate this constraint.
- *
- * For a btree rooted in a block, the btree root can have fewer than
- * minrecs records. If the btree is rooted in an inode and does not
- * store records in the root, the direct children of the root and the
- * root itself can have fewer than minrecs records.
+ * For btrees rooted in the inode, it's possible that the root block
+ * contents spilled into a regular ondisk block because there wasn't
+ * enough space in the inode root. The number of records in that
+ * child block might be less than the standard minrecs, but that's ok
+ * provided that there's only one direct child of the root.
*/
- ok_level = bs->cur->bc_nlevels - 1;
- if (bs->cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
- ok_level--;
- if (level >= ok_level)
+ if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
+ level == cur->bc_nlevels - 2) {
+ struct xfs_btree_block *root_block;
+ struct xfs_buf *root_bp;
+ int root_maxrecs;
+
+ root_block = xfs_btree_get_block(cur, root_level, &root_bp);
+ root_maxrecs = cur->bc_ops->get_dmaxrecs(cur, root_level);
+ if (be16_to_cpu(root_block->bb_numrecs) != 1 ||
+ numrecs <= root_maxrecs)
+ xchk_btree_set_corrupt(bs->sc, cur, level);
return;
+ }
- xchk_btree_set_corrupt(bs->sc, bs->cur, level);
+ /*
+ * Otherwise, only the root level is allowed to have fewer than minrecs
+ * records or keyptrs.
+ */
+ if (level < root_level)
+ xchk_btree_set_corrupt(bs->sc, cur, level);
}
/*
diff --git a/fs/xfs/scrub/dir.c b/fs/xfs/scrub/dir.c
index cd3e4d768a18..33dfcba72c7a 100644
--- a/fs/xfs/scrub/dir.c
+++ b/fs/xfs/scrub/dir.c
@@ -156,6 +156,9 @@ xchk_dir_actor(
xname.type = XFS_DIR3_FT_UNKNOWN;
error = xfs_dir_lookup(sdc->sc->tp, ip, &xname, &lookup_ino, NULL);
+ /* ENOENT means the hash lookup failed and the dir is corrupt */
+ if (error == -ENOENT)
+ error = -EFSCORRUPTED;
if (!xchk_fblock_process_error(sdc->sc, XFS_DATA_FORK, offset,
&error))
goto out;
diff --git a/fs/xfs/scrub/inode.c b/fs/xfs/scrub/inode.c
index e386c9b0b4ab..8d45d60832db 100644
--- a/fs/xfs/scrub/inode.c
+++ b/fs/xfs/scrub/inode.c
@@ -131,8 +131,7 @@ xchk_inode_flags(
goto bad;
/* rt flags require rt device */
- if ((flags & (XFS_DIFLAG_REALTIME | XFS_DIFLAG_RTINHERIT)) &&
- !mp->m_rtdev_targp)
+ if ((flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
goto bad;
/* new rt bitmap flag only valid for rbmino */
diff --git a/fs/xfs/scrub/refcount.c b/fs/xfs/scrub/refcount.c
index e8c82b026083..76e4f16a9fab 100644
--- a/fs/xfs/scrub/refcount.c
+++ b/fs/xfs/scrub/refcount.c
@@ -180,7 +180,6 @@ xchk_refcountbt_process_rmap_fragments(
*/
INIT_LIST_HEAD(&worklist);
rbno = NULLAGBLOCK;
- nr = 1;
/* Make sure the fragments actually /are/ in agbno order. */
bno = 0;
@@ -194,15 +193,14 @@ xchk_refcountbt_process_rmap_fragments(
* Find all the rmaps that start at or before the refc extent,
* and put them on the worklist.
*/
+ nr = 0;
list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
- if (frag->rm.rm_startblock > refchk->bno)
- goto done;
+ if (frag->rm.rm_startblock > refchk->bno || nr > target_nr)
+ break;
bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
if (bno < rbno)
rbno = bno;
list_move_tail(&frag->list, &worklist);
- if (nr == target_nr)
- break;
nr++;
}
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index e638740f1681..3e1dd66bd676 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1823,7 +1823,7 @@ xfs_swap_extents(
if (xfs_inode_has_cow_data(tip)) {
error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
if (error)
- return error;
+ goto out_unlock;
}
/*
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index c1f7c0d5d608..b33a9cd4fe94 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1202,8 +1202,10 @@ xfs_buf_ioend(
bp->b_ops->verify_read(bp);
}
- if (!bp->b_error)
+ if (!bp->b_error) {
+ bp->b_flags &= ~XBF_WRITE_FAIL;
bp->b_flags |= XBF_DONE;
+ }
if (bp->b_iodone)
(*(bp->b_iodone))(bp);
@@ -1263,7 +1265,7 @@ xfs_bwrite(
bp->b_flags |= XBF_WRITE;
bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q |
- XBF_WRITE_FAIL | XBF_DONE);
+ XBF_DONE);
error = xfs_buf_submit(bp);
if (error) {
@@ -2000,7 +2002,7 @@ xfs_buf_delwri_submit_buffers(
* synchronously. Otherwise, drop the buffer from the delwri
* queue and submit async.
*/
- bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_WRITE_FAIL);
+ bp->b_flags &= ~_XBF_DELWRI_Q;
bp->b_flags |= XBF_WRITE;
if (wait_list) {
bp->b_flags &= ~XBF_ASYNC;
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index a1af984e4913..59b2b29542f4 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -1120,13 +1120,12 @@ xfs_qm_dqflush(
dqb = bp->b_addr + dqp->q_bufoffset;
ddqp = &dqb->dd_diskdq;
- /*
- * A simple sanity check in case we got a corrupted dquot.
- */
- fa = xfs_dqblk_verify(mp, dqb, be32_to_cpu(ddqp->d_id), 0);
+ /* sanity check the in-core structure before we flush */
+ fa = xfs_dquot_verify(mp, &dqp->q_core, be32_to_cpu(dqp->q_core.d_id),
+ 0);
if (fa) {
xfs_alert(mp, "corrupt dquot ID 0x%x in memory at %pS",
- be32_to_cpu(ddqp->d_id), fa);
+ be32_to_cpu(dqp->q_core.d_id), fa);
xfs_buf_relse(bp);
xfs_dqfunlock(dqp);
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 259549698ba7..f22acfd53850 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -1095,6 +1095,14 @@ __xfs_filemap_fault(
return ret;
}
+static inline bool
+xfs_is_write_fault(
+ struct vm_fault *vmf)
+{
+ return (vmf->flags & FAULT_FLAG_WRITE) &&
+ (vmf->vma->vm_flags & VM_SHARED);
+}
+
static vm_fault_t
xfs_filemap_fault(
struct vm_fault *vmf)
@@ -1102,7 +1110,7 @@ xfs_filemap_fault(
/* DAX can shortcut the normal fault path on write faults! */
return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
IS_DAX(file_inode(vmf->vma->vm_file)) &&
- (vmf->flags & FAULT_FLAG_WRITE));
+ xfs_is_write_fault(vmf));
}
static vm_fault_t
@@ -1115,7 +1123,7 @@ xfs_filemap_huge_fault(
/* DAX can shortcut the normal fault path on write faults! */
return __xfs_filemap_fault(vmf, pe_size,
- (vmf->flags & FAULT_FLAG_WRITE));
+ xfs_is_write_fault(vmf));
}
static vm_fault_t
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
index 3d76a9e35870..75b57b683d3e 100644
--- a/fs/xfs/xfs_fsmap.c
+++ b/fs/xfs/xfs_fsmap.c
@@ -259,6 +259,9 @@ xfs_getfsmap_helper(
/* Are we just counting mappings? */
if (info->head->fmh_count == 0) {
+ if (info->head->fmh_entries == UINT_MAX)
+ return -ECANCELED;
+
if (rec_daddr > info->next_daddr)
info->head->fmh_entries++;
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 245483cc282b..56e9043bddc7 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -902,7 +902,12 @@ xfs_eofblocks_worker(
{
struct xfs_mount *mp = container_of(to_delayed_work(work),
struct xfs_mount, m_eofblocks_work);
+
+ if (!sb_start_write_trylock(mp->m_super))
+ return;
xfs_icache_free_eofblocks(mp, NULL);
+ sb_end_write(mp->m_super);
+
xfs_queue_eofblocks(mp);
}
@@ -929,7 +934,12 @@ xfs_cowblocks_worker(
{
struct xfs_mount *mp = container_of(to_delayed_work(work),
struct xfs_mount, m_cowblocks_work);
+
+ if (!sb_start_write_trylock(mp->m_super))
+ return;
xfs_icache_free_cowblocks(mp, NULL);
+ sb_end_write(mp->m_super);
+
xfs_queue_cowblocks(mp);
}
@@ -1117,7 +1127,7 @@ restart:
goto out_ifunlock;
xfs_iunpin_wait(ip);
}
- if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) {
+ if (xfs_inode_clean(ip)) {
xfs_ifunlock(ip);
goto reclaim;
}
@@ -1204,6 +1214,7 @@ reclaim:
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_qm_dqdetach(ip);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ ASSERT(xfs_inode_clean(ip));
__xfs_inode_free(ip);
return error;
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 5ed84d6c7059..cd81d6d9848d 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1772,10 +1772,31 @@ xfs_inactive_ifree(
return error;
}
+ /*
+ * We do not hold the inode locked across the entire rolling transaction
+ * here. We only need to hold it for the first transaction that
+ * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the
+ * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode
+ * here breaks the relationship between cluster buffer invalidation and
+ * stale inode invalidation on cluster buffer item journal commit
+ * completion, and can result in leaving dirty stale inodes hanging
+ * around in memory.
+ *
+ * We have no need for serialising this inode operation against other
+ * operations - we freed the inode and hence reallocation is required
+ * and that will serialise on reallocating the space the deferops need
+ * to free. Hence we can unlock the inode on the first commit of
+ * the transaction rather than roll it right through the deferops. This
+ * avoids relogging the XFS_ISTALE inode.
+ *
+ * We check that xfs_ifree() hasn't grown an internal transaction roll
+ * by asserting that the inode is still locked when it returns.
+ */
xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, 0);
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
error = xfs_ifree(tp, ip);
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
if (error) {
/*
* If we fail to free the inode, shut down. The cancel
@@ -1788,7 +1809,6 @@ xfs_inactive_ifree(
xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
}
xfs_trans_cancel(tp);
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
@@ -1806,7 +1826,6 @@ xfs_inactive_ifree(
xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
__func__, error);
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
return 0;
}
@@ -2949,7 +2968,8 @@ xfs_rename(
spaceres);
/*
- * Set up the target.
+ * Check for expected errors before we dirty the transaction
+ * so we can return an error without a transaction abort.
*/
if (target_ip == NULL) {
/*
@@ -2961,6 +2981,46 @@ xfs_rename(
if (error)
goto out_trans_cancel;
}
+ } else {
+ /*
+ * If target exists and it's a directory, check that whether
+ * it can be destroyed.
+ */
+ if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
+ (!xfs_dir_isempty(target_ip) ||
+ (VFS_I(target_ip)->i_nlink > 2))) {
+ error = -EEXIST;
+ goto out_trans_cancel;
+ }
+ }
+
+ /*
+ * Directory entry creation below may acquire the AGF. Remove
+ * the whiteout from the unlinked list first to preserve correct
+ * AGI/AGF locking order. This dirties the transaction so failures
+ * after this point will abort and log recovery will clean up the
+ * mess.
+ *
+ * For whiteouts, we need to bump the link count on the whiteout
+ * inode. After this point, we have a real link, clear the tmpfile
+ * state flag from the inode so it doesn't accidentally get misused
+ * in future.
+ */
+ if (wip) {
+ ASSERT(VFS_I(wip)->i_nlink == 0);
+ error = xfs_iunlink_remove(tp, wip);
+ if (error)
+ goto out_trans_cancel;
+
+ xfs_bumplink(tp, wip);
+ xfs_trans_log_inode(tp, wip, XFS_ILOG_CORE);
+ VFS_I(wip)->i_state &= ~I_LINKABLE;
+ }
+
+ /*
+ * Set up the target.
+ */
+ if (target_ip == NULL) {
/*
* If target does not exist and the rename crosses
* directories, adjust the target directory link count
@@ -2981,22 +3041,6 @@ xfs_rename(
}
} else { /* target_ip != NULL */
/*
- * If target exists and it's a directory, check that both
- * target and source are directories and that target can be
- * destroyed, or that neither is a directory.
- */
- if (S_ISDIR(VFS_I(target_ip)->i_mode)) {
- /*
- * Make sure target dir is empty.
- */
- if (!(xfs_dir_isempty(target_ip)) ||
- (VFS_I(target_ip)->i_nlink > 2)) {
- error = -EEXIST;
- goto out_trans_cancel;
- }
- }
-
- /*
* Link the source inode under the target name.
* If the source inode is a directory and we are moving
* it across directories, its ".." entry will be
@@ -3086,32 +3130,6 @@ xfs_rename(
if (error)
goto out_trans_cancel;
- /*
- * For whiteouts, we need to bump the link count on the whiteout inode.
- * This means that failures all the way up to this point leave the inode
- * on the unlinked list and so cleanup is a simple matter of dropping
- * the remaining reference to it. If we fail here after bumping the link
- * count, we're shutting down the filesystem so we'll never see the
- * intermediate state on disk.
- */
- if (wip) {
- ASSERT(VFS_I(wip)->i_nlink == 0);
- error = xfs_bumplink(tp, wip);
- if (error)
- goto out_trans_cancel;
- error = xfs_iunlink_remove(tp, wip);
- if (error)
- goto out_trans_cancel;
- xfs_trans_log_inode(tp, wip, XFS_ILOG_CORE);
-
- /*
- * Now we have a real link, clear the "I'm a tmpfile" state
- * flag from the inode so it doesn't accidentally get misused in
- * future.
- */
- VFS_I(wip)->i_state &= ~I_LINKABLE;
- }
-
xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
if (new_parent)
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index bad90479ade2..6ffb53edf1b4 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -2182,7 +2182,10 @@ xfs_file_ioctl(
if (error)
return error;
- return xfs_icache_free_eofblocks(mp, &keofb);
+ sb_start_write(mp->m_super);
+ error = xfs_icache_free_eofblocks(mp, &keofb);
+ sb_end_write(mp->m_super);
+ return error;
}
default:
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index e427ad097e2e..6011086b51de 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -849,7 +849,7 @@ xfs_setattr_size(
ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
ASSERT(S_ISREG(inode->i_mode));
ASSERT((iattr->ia_valid & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
- ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
+ ATTR_MTIME_SET|ATTR_TIMES_SET)) == 0);
oldsize = inode->i_size;
newsize = iattr->ia_size;
@@ -895,6 +895,16 @@ xfs_setattr_size(
error = iomap_zero_range(inode, oldsize, newsize - oldsize,
&did_zeroing, &xfs_iomap_ops);
} else {
+ /*
+ * iomap won't detect a dirty page over an unwritten block (or a
+ * cow block over a hole) and subsequently skips zeroing the
+ * newly post-EOF portion of the page. Flush the new EOF to
+ * convert the block before the pagecache truncate.
+ */
+ error = filemap_write_and_wait_range(inode->i_mapping, newsize,
+ newsize);
+ if (error)
+ return error;
error = iomap_truncate_page(inode, newsize, &did_zeroing,
&xfs_iomap_ops);
}
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 7bba551cbf90..8b1b0862e869 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -2712,7 +2712,6 @@ xlog_state_do_callback(
int funcdidcallbacks; /* flag: function did callbacks */
int repeats; /* for issuing console warnings if
* looping too many times */
- int wake = 0;
spin_lock(&log->l_icloglock);
first_iclog = iclog = log->l_iclog;
@@ -2914,11 +2913,9 @@ xlog_state_do_callback(
#endif
if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR))
- wake = 1;
- spin_unlock(&log->l_icloglock);
-
- if (wake)
wake_up_all(&log->l_flush_wait);
+
+ spin_unlock(&log->l_icloglock);
}
@@ -4026,7 +4023,9 @@ xfs_log_force_umount(
* item committed callback functions will do this again under lock to
* avoid races.
*/
+ spin_lock(&log->l_cilp->xc_push_lock);
wake_up_all(&log->l_cilp->xc_commit_wait);
+ spin_unlock(&log->l_cilp->xc_push_lock);
xlog_state_do_callback(log, XFS_LI_ABORTED, NULL);
#ifdef XFSERRORDEBUG
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index f44c3599527d..1c9bced3e860 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -141,7 +141,7 @@ xfs_fs_map_blocks(
goto out_unlock;
error = invalidate_inode_pages2(inode->i_mapping);
if (WARN_ON_ONCE(error))
- return error;
+ goto out_unlock;
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + length);
offset_fsb = XFS_B_TO_FSBT(mp, offset);
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index f3c393f309e1..0b159a79a17c 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -1010,6 +1010,7 @@ xfs_reflink_remap_extent(
xfs_filblks_t rlen;
xfs_filblks_t unmap_len;
xfs_off_t newlen;
+ int64_t qres;
int error;
unmap_len = irec->br_startoff + irec->br_blockcount - destoff;
@@ -1032,13 +1033,19 @@ xfs_reflink_remap_extent(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
- /* If we're not just clearing space, then do we have enough quota? */
- if (real_extent) {
- error = xfs_trans_reserve_quota_nblks(tp, ip,
- irec->br_blockcount, 0, XFS_QMOPT_RES_REGBLKS);
- if (error)
- goto out_cancel;
- }
+ /*
+ * Reserve quota for this operation. We don't know if the first unmap
+ * in the dest file will cause a bmap btree split, so we always reserve
+ * at least enough blocks for that split. If the extent being mapped
+ * in is written, we need to reserve quota for that too.
+ */
+ qres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
+ if (real_extent)
+ qres += irec->br_blockcount;
+ error = xfs_trans_reserve_quota_nblks(tp, ip, qres, 0,
+ XFS_QMOPT_RES_REGBLKS);
+ if (error)
+ goto out_cancel;
trace_xfs_reflink_remap(ip, irec->br_startoff,
irec->br_blockcount, irec->br_startblock);
@@ -1058,6 +1065,7 @@ xfs_reflink_remap_extent(
uirec.br_startblock = irec->br_startblock + rlen;
uirec.br_startoff = irec->br_startoff + rlen;
uirec.br_blockcount = unmap_len - rlen;
+ uirec.br_state = irec->br_state;
unmap_len = rlen;
/* If this isn't a real mapping, we're done. */
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 484eb0adcefb..280965fc9bbd 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -245,6 +245,9 @@ xfs_rtallocate_extent_block(
end = XFS_BLOCKTOBIT(mp, bbno + 1) - 1;
i <= end;
i++) {
+ /* Make sure we don't scan off the end of the rt volume. */
+ maxlen = min(mp->m_sb.sb_rextents, i + maxlen) - i;
+
/*
* See if there's a free extent of maxlen starting at i.
* If it's not so then next will contain the first non-free.
@@ -440,6 +443,14 @@ xfs_rtallocate_extent_near(
*/
if (bno >= mp->m_sb.sb_rextents)
bno = mp->m_sb.sb_rextents - 1;
+
+ /* Make sure we don't run off the end of the rt volume. */
+ maxlen = min(mp->m_sb.sb_rextents, bno + maxlen) - bno;
+ if (maxlen < minlen) {
+ *rtblock = NULLRTBLOCK;
+ return 0;
+ }
+
/*
* Try the exact allocation first.
*/
@@ -987,10 +998,13 @@ xfs_growfs_rt(
xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
/*
- * Update the bitmap inode's size.
+ * Update the bitmap inode's size ondisk and incore. We need
+ * to update the incore size so that inode inactivation won't
+ * punch what it thinks are "posteof" blocks.
*/
mp->m_rbmip->i_d.di_size =
nsbp->sb_rbmblocks * nsbp->sb_blocksize;
+ i_size_write(VFS_I(mp->m_rbmip), mp->m_rbmip->i_d.di_size);
xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
/*
* Get the summary inode into the transaction.
@@ -998,9 +1012,12 @@ xfs_growfs_rt(
xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
/*
- * Update the summary inode's size.
+ * Update the summary inode's size. We need to update the
+ * incore size so that inode inactivation won't punch what it
+ * thinks are "posteof" blocks.
*/
mp->m_rsumip->i_d.di_size = nmp->m_rsumsize;
+ i_size_write(VFS_I(mp->m_rsumip), mp->m_rsumip->i_d.di_size);
xfs_trans_log_inode(tp, mp->m_rsumip, XFS_ILOG_CORE);
/*
* Copy summary data from old to new sizes.
diff --git a/fs/xfs/xfs_sysfs.h b/fs/xfs/xfs_sysfs.h
index e9f810fc6731..43585850f154 100644
--- a/fs/xfs/xfs_sysfs.h
+++ b/fs/xfs/xfs_sysfs.h
@@ -32,9 +32,11 @@ xfs_sysfs_init(
struct xfs_kobj *parent_kobj,
const char *name)
{
+ struct kobject *parent;
+
+ parent = parent_kobj ? &parent_kobj->kobject : NULL;
init_completion(&kobj->complete);
- return kobject_init_and_add(&kobj->kobject, ktype,
- &parent_kobj->kobject, "%s", name);
+ return kobject_init_and_add(&kobj->kobject, ktype, parent, "%s", name);
}
static inline void
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index d3a4e89bf4a0..66f167aefd94 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -520,8 +520,9 @@ xfsaild(
{
struct xfs_ail *ailp = data;
long tout = 0; /* milliseconds */
+ unsigned int noreclaim_flag;
- current->flags |= PF_MEMALLOC;
+ noreclaim_flag = memalloc_noreclaim_save();
set_freezable();
while (1) {
@@ -592,6 +593,7 @@ xfsaild(
tout = xfsaild_push(ailp);
}
+ memalloc_noreclaim_restore(noreclaim_flag);
return 0;
}
diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
index c23257a26c2b..b8f05d5909b5 100644
--- a/fs/xfs/xfs_trans_dquot.c
+++ b/fs/xfs/xfs_trans_dquot.c
@@ -657,7 +657,7 @@ xfs_trans_dqresv(
}
}
if (ninos > 0) {
- total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos;
+ total_count = dqp->q_res_icount + ninos;
timer = be32_to_cpu(dqp->q_core.d_itimer);
warns = be16_to_cpu(dqp->q_core.d_iwarns);
warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit;
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c
index 542927321a61..6fcdf7e449fe 100644
--- a/fs/xfs/xfs_trans_inode.c
+++ b/fs/xfs/xfs_trans_inode.c
@@ -39,6 +39,7 @@ xfs_trans_ijoin(
ASSERT(iip->ili_lock_flags == 0);
iip->ili_lock_flags = lock_flags;
+ ASSERT(!xfs_iflags_test(ip, XFS_ISTALE));
/*
* Get a log_item_desc to point at the new item.
@@ -90,6 +91,7 @@ xfs_trans_log_inode(
ASSERT(ip->i_itemp != NULL);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+ ASSERT(!xfs_iflags_test(ip, XFS_ISTALE));
/*
* Don't bother with i_lock for the I_DIRTY_TIME check here, as races
@@ -97,9 +99,9 @@ xfs_trans_log_inode(
* to log the timestamps, or will clear already cleared fields in the
* worst case.
*/
- if (inode->i_state & (I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED)) {
+ if (inode->i_state & I_DIRTY_TIME) {
spin_lock(&inode->i_lock);
- inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED);
+ inode->i_state &= ~I_DIRTY_TIME;
spin_unlock(&inode->i_lock);
}
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 856c56ef0143..878b8e26c6c5 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -59,11 +59,11 @@ struct acpi_exception_info {
#define AE_OK (acpi_status) 0x0000
-#define ACPI_ENV_EXCEPTION(status) (status & AE_CODE_ENVIRONMENTAL)
-#define ACPI_AML_EXCEPTION(status) (status & AE_CODE_AML)
-#define ACPI_PROG_EXCEPTION(status) (status & AE_CODE_PROGRAMMER)
-#define ACPI_TABLE_EXCEPTION(status) (status & AE_CODE_ACPI_TABLES)
-#define ACPI_CNTL_EXCEPTION(status) (status & AE_CODE_CONTROL)
+#define ACPI_ENV_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_ENVIRONMENTAL)
+#define ACPI_AML_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_AML)
+#define ACPI_PROG_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_PROGRAMMER)
+#define ACPI_TABLE_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_ACPI_TABLES)
+#define ACPI_CNTL_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_CONTROL)
/*
* Environmental exceptions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index ba4dd54f2c82..8b19618bad0a 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -248,6 +248,7 @@ struct acpi_pnp_type {
struct acpi_device_pnp {
acpi_bus_id bus_id; /* Object name */
+ int instance_no; /* Instance number of this object */
struct acpi_pnp_type type; /* ID type */
acpi_bus_address bus_address; /* _ADR */
char *unique_id; /* _UID */
@@ -622,7 +623,6 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev);
bool acpi_pm_device_can_wakeup(struct device *dev);
int acpi_pm_device_sleep_state(struct device *, int *, int);
int acpi_pm_set_device_wakeup(struct device *dev, bool enable);
-int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable);
#else
static inline void acpi_pm_wakeup_event(struct device *dev)
{
@@ -653,10 +653,6 @@ static inline int acpi_pm_set_device_wakeup(struct device *dev, bool enable)
{
return -ENODEV;
}
-static inline int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable)
-{
- return -ENODEV;
-}
#endif
#ifdef CONFIG_ACPI_SLEEP
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 1194a4c78d55..5b9eab15a1e6 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -293,6 +293,14 @@ static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx
}
#endif
+static inline int call_on_cpu(int cpu, long (*fn)(void *), void *arg,
+ bool direct)
+{
+ if (direct || (is_percpu_thread() && cpu == smp_processor_id()))
+ return fn(arg);
+ return work_on_cpu(cpu, fn, arg);
+}
+
/* in processor_perflib.c */
#ifdef CONFIG_CPU_FREQ
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 15fd0277ffa6..5901b0005929 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1115,10 +1115,6 @@ static inline bool arch_has_pfn_modify_check(void)
#endif /* !__ASSEMBLY__ */
-#ifndef io_remap_pfn_range
-#define io_remap_pfn_range remap_pfn_range
-#endif
-
#ifndef has_transparent_hugepage
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define has_transparent_hugepage() 1
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index 849cd8eb5ca0..ea5987bb0b84 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -53,6 +53,9 @@ extern char __ctors_start[], __ctors_end[];
/* Start and end of .opd section - used for function descriptors. */
extern char __start_opd[], __end_opd[];
+/* Start and end of instrumentation protected text section */
+extern char __noinstr_text_start[], __noinstr_text_end[];
+
extern __visible const void __nosave_begin, __nosave_end;
/* Function descriptor handling (if any). Override in asm/sections.h */
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
index 238873739550..5aa8705df87e 100644
--- a/include/asm-generic/topology.h
+++ b/include/asm-generic/topology.h
@@ -48,7 +48,7 @@
#ifdef CONFIG_NEED_MULTIPLE_NODES
#define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask)
#else
- #define cpumask_of_node(node) ((void)node, cpu_online_mask)
+ #define cpumask_of_node(node) ((void)(node), cpu_online_mask)
#endif
#endif
#ifndef pcibus_to_node
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index dd38c97933f1..88484ee023ca 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -279,7 +279,8 @@
#define PAGE_ALIGNED_DATA(page_align) \
. = ALIGN(page_align); \
- *(.data..page_aligned)
+ *(.data..page_aligned) \
+ . = ALIGN(page_align);
#define READ_MOSTLY_DATA(align) \
. = ALIGN(align); \
@@ -306,6 +307,7 @@
*/
#ifndef RO_AFTER_INIT_DATA
#define RO_AFTER_INIT_DATA \
+ . = ALIGN(8); \
__start_ro_after_init = .; \
*(.data..ro_after_init) \
__end_ro_after_init = .;
@@ -361,7 +363,7 @@
} \
\
/* Built-in firmware blobs */ \
- .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
+ .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \
__start_builtin_fw = .; \
KEEP(*(.builtin_fw)) \
__end_builtin_fw = .; \
@@ -481,6 +483,15 @@
}
/*
+ * Non-instrumentable text section
+ */
+#define NOINSTR_TEXT \
+ ALIGN_FUNCTION(); \
+ __noinstr_text_start = .; \
+ *(.noinstr.text) \
+ __noinstr_text_end = .;
+
+/*
* .text section. Map to function alignment to avoid address changes
* during second ld run in second ld pass when generating System.map
*
@@ -490,7 +501,11 @@
*/
#define TEXT_TEXT \
ALIGN_FUNCTION(); \
- *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \
+ *(.text.hot .text.hot.*) \
+ *(TEXT_MAIN .text.fixup) \
+ *(.text.unlikely .text.unlikely.*) \
+ *(.text.unknown .text.unknown.*) \
+ NOINSTR_TEXT \
*(.text..refcount) \
*(.ref.text) \
MEM_KEEP(init.text*) \
@@ -650,7 +665,9 @@
. = ALIGN(bss_align); \
.bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
BSS_FIRST_SECTIONS \
+ . = ALIGN(PAGE_SIZE); \
*(.bss..page_aligned) \
+ . = ALIGN(PAGE_SIZE); \
*(.dynbss) \
*(BSS_MAIN) \
*(COMMON) \
@@ -694,8 +711,13 @@
/* DWARF 4 */ \
.debug_types 0 : { *(.debug_types) } \
/* DWARF 5 */ \
+ .debug_addr 0 : { *(.debug_addr) } \
+ .debug_line_str 0 : { *(.debug_line_str) } \
+ .debug_loclists 0 : { *(.debug_loclists) } \
.debug_macro 0 : { *(.debug_macro) } \
- .debug_addr 0 : { *(.debug_addr) }
+ .debug_names 0 : { *(.debug_names) } \
+ .debug_rnglists 0 : { *(.debug_rnglists) } \
+ .debug_str_offsets 0 : { *(.debug_str_offsets) }
/* Stabs debugging sections. */
#define STABS_DEBUG \
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
index e328b52425a8..1ff78365607c 100644
--- a/include/crypto/acompress.h
+++ b/include/crypto/acompress.h
@@ -152,6 +152,8 @@ static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
* crypto_free_acomp() -- free ACOMPRESS tfm handle
*
* @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_acomp(struct crypto_acomp *tfm)
{
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 1e26f790b03f..c69c545ba39a 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -187,6 +187,8 @@ static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
/**
* crypto_free_aead() - zeroize and free aead handle
* @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_aead(struct crypto_aead *tfm)
{
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
index b5e11de4d497..9817f2e5bff8 100644
--- a/include/crypto/akcipher.h
+++ b/include/crypto/akcipher.h
@@ -174,6 +174,8 @@ static inline struct crypto_akcipher *crypto_akcipher_reqtfm(
* crypto_free_akcipher() - free AKCIPHER tfm handle
*
* @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_akcipher(struct crypto_akcipher *tfm)
{
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 76e432cab75d..552517dcf9e4 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -257,6 +257,8 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
/**
* crypto_free_ahash() - zeroize and free the ahash handle
* @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_ahash(struct crypto_ahash *tfm)
{
@@ -692,6 +694,8 @@ static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
/**
* crypto_free_shash() - zeroize and free the message digest handle
* @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_shash(struct crypto_shash *tfm)
{
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index 482461d8931d..11f107df78dc 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -34,8 +34,8 @@ struct alg_sock {
struct sock *parent;
- unsigned int refcnt;
- unsigned int nokey_refcnt;
+ atomic_t refcnt;
+ atomic_t nokey_refcnt;
const struct af_alg_type *type;
void *private;
diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
index 1bde0a6514fa..1a34630fc371 100644
--- a/include/crypto/kpp.h
+++ b/include/crypto/kpp.h
@@ -159,6 +159,8 @@ static inline void crypto_kpp_set_flags(struct crypto_kpp *tfm, u32 flags)
* crypto_free_kpp() - free KPP tfm handle
*
* @tfm: KPP tfm handle allocated with crypto_alloc_kpp()
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_kpp(struct crypto_kpp *tfm)
{
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index b95ede354a66..a788c1e5a121 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -116,6 +116,8 @@ static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
/**
* crypto_free_rng() - zeroize and free RNG handle
* @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_rng(struct crypto_rng *tfm)
{
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 2f327f090c3e..c7553f8b1bb6 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -206,6 +206,8 @@ static inline struct crypto_tfm *crypto_skcipher_tfm(
/**
* crypto_free_skcipher() - zeroize and free cipher handle
* @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_skcipher(struct crypto_skcipher *tfm)
{
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 97ea41dc678f..e5f641cdab5a 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -81,6 +81,53 @@ enum drm_connector_status {
connector_status_unknown = 3,
};
+/**
+ * enum drm_connector_registration_status - userspace registration status for
+ * a &drm_connector
+ *
+ * This enum is used to track the status of initializing a connector and
+ * registering it with userspace, so that DRM can prevent bogus modesets on
+ * connectors that no longer exist.
+ */
+enum drm_connector_registration_state {
+ /**
+ * @DRM_CONNECTOR_INITIALIZING: The connector has just been created,
+ * but has yet to be exposed to userspace. There should be no
+ * additional restrictions to how the state of this connector may be
+ * modified.
+ */
+ DRM_CONNECTOR_INITIALIZING = 0,
+
+ /**
+ * @DRM_CONNECTOR_REGISTERED: The connector has been fully initialized
+ * and registered with sysfs, as such it has been exposed to
+ * userspace. There should be no additional restrictions to how the
+ * state of this connector may be modified.
+ */
+ DRM_CONNECTOR_REGISTERED = 1,
+
+ /**
+ * @DRM_CONNECTOR_UNREGISTERED: The connector has either been exposed
+ * to userspace and has since been unregistered and removed from
+ * userspace, or the connector was unregistered before it had a chance
+ * to be exposed to userspace (e.g. still in the
+ * @DRM_CONNECTOR_INITIALIZING state). When a connector is
+ * unregistered, there are additional restrictions to how its state
+ * may be modified:
+ *
+ * - An unregistered connector may only have its DPMS changed from
+ * On->Off. Once DPMS is changed to Off, it may not be switched back
+ * to On.
+ * - Modesets are not allowed on unregistered connectors, unless they
+ * would result in disabling its assigned CRTCs. This means
+ * disabling a CRTC on an unregistered connector is OK, but enabling
+ * one is not.
+ * - Removing a CRTC from an unregistered connector is OK, but new
+ * CRTCs may never be assigned to an unregistered connector.
+ */
+ DRM_CONNECTOR_UNREGISTERED = 2,
+};
+
enum subpixel_order {
SubPixelUnknown = 0,
SubPixelHorizontalRGB,
@@ -852,10 +899,12 @@ struct drm_connector {
bool ycbcr_420_allowed;
/**
- * @registered: Is this connector exposed (registered) with userspace?
+ * @registration_state: Is this connector initializing, exposed
+ * (registered) with userspace, or unregistered?
+ *
* Protected by @mutex.
*/
- bool registered;
+ enum drm_connector_registration_state registration_state;
/**
* @modes:
@@ -1165,6 +1214,24 @@ static inline void drm_connector_unreference(struct drm_connector *connector)
drm_connector_put(connector);
}
+/**
+ * drm_connector_is_unregistered - has the connector been unregistered from
+ * userspace?
+ * @connector: DRM connector
+ *
+ * Checks whether or not @connector has been unregistered from userspace.
+ *
+ * Returns:
+ * True if the connector was unregistered, false if the connector is
+ * registered or has not yet been registered with userspace.
+ */
+static inline bool
+drm_connector_is_unregistered(struct drm_connector *connector)
+{
+ return READ_ONCE(connector->registration_state) ==
+ DRM_CONNECTOR_UNREGISTERED;
+}
+
const char *drm_get_connector_status_name(enum drm_connector_status status);
const char *drm_get_subpixel_order_name(enum subpixel_order order);
const char *drm_get_dpms_name(int val);
diff --git a/include/keys/big_key-type.h b/include/keys/big_key-type.h
index e0970a578188..a7207a965466 100644
--- a/include/keys/big_key-type.h
+++ b/include/keys/big_key-type.h
@@ -21,6 +21,6 @@ extern void big_key_free_preparse(struct key_preparsed_payload *prep);
extern void big_key_revoke(struct key *key);
extern void big_key_destroy(struct key *key);
extern void big_key_describe(const struct key *big_key, struct seq_file *m);
-extern long big_key_read(const struct key *key, char __user *buffer, size_t buflen);
+extern long big_key_read(const struct key *key, char *buffer, size_t buflen);
#endif /* _KEYS_BIG_KEY_TYPE_H */
diff --git a/include/keys/user-type.h b/include/keys/user-type.h
index 12babe991594..0d8f3cd3056f 100644
--- a/include/keys/user-type.h
+++ b/include/keys/user-type.h
@@ -45,8 +45,7 @@ extern int user_update(struct key *key, struct key_preparsed_payload *prep);
extern void user_revoke(struct key *key);
extern void user_destroy(struct key *key);
extern void user_describe(const struct key *user, struct seq_file *m);
-extern long user_read(const struct key *key,
- char __user *buffer, size_t buflen);
+extern long user_read(const struct key *key, char *buffer, size_t buflen);
static inline const struct user_key_payload *user_key_payload_rcu(const struct key *key)
{
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index cd412817654f..1a37748766b7 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -230,10 +230,14 @@ void __iomem *__acpi_map_table(unsigned long phys, unsigned long size);
void __acpi_unmap_table(void __iomem *map, unsigned long size);
int early_acpi_boot_init(void);
int acpi_boot_init (void);
+void acpi_boot_table_prepare (void);
void acpi_boot_table_init (void);
int acpi_mps_check (void);
int acpi_numa_init (void);
+int acpi_locate_initial_tables (void);
+void acpi_reserve_initial_tables (void);
+void acpi_table_init_complete (void);
int acpi_table_init (void);
int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
int __init acpi_table_parse_entries(char *id, unsigned long table_size,
@@ -734,9 +738,12 @@ static inline int acpi_boot_init(void)
return 0;
}
+static inline void acpi_boot_table_prepare(void)
+{
+}
+
static inline void acpi_boot_table_init(void)
{
- return;
}
static inline int acpi_mps_check(void)
@@ -812,6 +819,13 @@ static inline int acpi_device_modalias(struct device *dev,
return -ENODEV;
}
+static inline struct platform_device *
+acpi_create_platform_device(struct acpi_device *adev,
+ struct property_entry *properties)
+{
+ return NULL;
+}
+
static inline bool acpi_dma_supported(struct acpi_device *adev)
{
return false;
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index c28a47cbe355..1ef4aca7b953 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -13,6 +13,7 @@
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/blkdev.h>
+#include <linux/device.h>
#include <linux/writeback.h>
#include <linux/blk-cgroup.h>
#include <linux/backing-dev-defs.h>
@@ -498,4 +499,13 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi)
(1 << WB_async_congested));
}
+extern const char *bdi_unknown_name;
+
+static inline const char *bdi_dev_name(struct backing_dev_info *bdi)
+{
+ if (!bdi || !bdi->dev)
+ return bdi_unknown_name;
+ return dev_name(bdi->dev);
+}
+
#endif /* _LINUX_BACKING_DEV_H */
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index 3f1ef4450a7c..775cd10c04b0 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -72,7 +72,7 @@
*/
#define FIELD_FIT(_mask, _val) \
({ \
- __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: "); \
+ __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_FIT: "); \
!((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \
})
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index e02cbca3cfaf..5c1522ed2d7c 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -50,7 +50,7 @@ static inline int get_bitmask_order(unsigned int count)
static __always_inline unsigned long hweight_long(unsigned long w)
{
- return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
+ return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w);
}
/**
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 6e67aeb56928..209ba8e7bd31 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -624,7 +624,7 @@ struct request_queue {
unsigned int sg_reserved_size;
int node;
#ifdef CONFIG_BLK_DEV_IO_TRACE
- struct blk_trace *blk_trace;
+ struct blk_trace __rcu *blk_trace;
struct mutex blk_trace_mutex;
#endif
/*
@@ -637,6 +637,7 @@ struct request_queue {
struct delayed_work requeue_work;
struct mutex sysfs_lock;
+ struct mutex sysfs_dir_lock;
int bypass_depth;
atomic_t mq_freeze_depth;
@@ -743,6 +744,7 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
+#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
extern void blk_set_pm_only(struct request_queue *q);
extern void blk_clear_pm_only(struct request_queue *q);
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 7bb2d8de9f30..3b6ff5902edc 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -51,9 +51,13 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f
**/
#define blk_add_cgroup_trace_msg(q, cg, fmt, ...) \
do { \
- struct blk_trace *bt = (q)->blk_trace; \
+ struct blk_trace *bt; \
+ \
+ rcu_read_lock(); \
+ bt = rcu_dereference((q)->blk_trace); \
if (unlikely(bt)) \
__trace_note_message(bt, cg, fmt, ##__VA_ARGS__);\
+ rcu_read_unlock(); \
} while (0)
#define blk_add_trace_msg(q, fmt, ...) \
blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__)
@@ -61,10 +65,14 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f
static inline bool blk_trace_note_message_enabled(struct request_queue *q)
{
- struct blk_trace *bt = q->blk_trace;
- if (likely(!bt))
- return false;
- return bt->act_mask & BLK_TC_NOTIFY;
+ struct blk_trace *bt;
+ bool ret;
+
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
+ ret = bt && (bt->act_mask & BLK_TC_NOTIFY);
+ rcu_read_unlock();
+ return ret;
}
extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 16f6beef5cad..3b3337333cfd 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -382,7 +382,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
struct bpf_prog *include_prog,
struct bpf_prog_array **new_array);
-#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \
+#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null, set_cg_storage) \
({ \
struct bpf_prog_array_item *_item; \
struct bpf_prog *_prog; \
@@ -395,7 +395,8 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
goto _out; \
_item = &_array->items[0]; \
while ((_prog = READ_ONCE(_item->prog))) { \
- bpf_cgroup_storage_set(_item->cgroup_storage); \
+ if (set_cg_storage) \
+ bpf_cgroup_storage_set(_item->cgroup_storage); \
_ret &= func(_prog, ctx); \
_item++; \
} \
@@ -406,10 +407,10 @@ _out: \
})
#define BPF_PROG_RUN_ARRAY(array, ctx, func) \
- __BPF_PROG_RUN_ARRAY(array, ctx, func, false)
+ __BPF_PROG_RUN_ARRAY(array, ctx, func, false, true)
#define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \
- __BPF_PROG_RUN_ARRAY(array, ctx, func, true)
+ __BPF_PROG_RUN_ARRAY(array, ctx, func, true, false)
#ifdef CONFIG_BPF_SYSCALL
DECLARE_PER_CPU(int, bpf_prog_active);
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 91393724e933..1c8517320ea6 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -144,10 +144,11 @@ struct bpf_verifier_state_list {
};
/* Possible states for alu_state member. */
-#define BPF_ALU_SANITIZE_SRC 1U
-#define BPF_ALU_SANITIZE_DST 2U
+#define BPF_ALU_SANITIZE_SRC (1U << 0)
+#define BPF_ALU_SANITIZE_DST (1U << 1)
#define BPF_ALU_NEG_VALUE (1U << 2)
#define BPF_ALU_NON_POINTER (1U << 3)
+#define BPF_ALU_IMMEDIATE (1U << 4)
#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
BPF_ALU_SANITIZE_DST)
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 96225a77c112..9168fc33a4f7 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -189,6 +189,8 @@ struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
void __brelse(struct buffer_head *);
void __bforget(struct buffer_head *);
void __breadahead(struct block_device *, sector_t block, unsigned int size);
+void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size,
+ gfp_t gfp);
struct buffer_head *__bread_gfp(struct block_device *,
sector_t block, unsigned size, gfp_t gfp);
void invalidate_bh_lrus(void);
@@ -319,6 +321,12 @@ sb_breadahead(struct super_block *sb, sector_t block)
__breadahead(sb->s_bdev, block, sb->s_blocksize);
}
+static inline void
+sb_breadahead_unmovable(struct super_block *sb, sector_t block)
+{
+ __breadahead_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
+}
+
static inline struct buffer_head *
sb_getblk(struct super_block *sb, sector_t block)
{
diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h
index 43d1fd50d433..6099f754aad7 100644
--- a/include/linux/build_bug.h
+++ b/include/linux/build_bug.h
@@ -80,4 +80,9 @@
#endif /* __CHECKER__ */
+#ifdef __GENKSYMS__
+/* genksyms gets confused by _Static_assert */
+#define _Static_assert(expr, ...)
+#endif
+
#endif /* _LINUX_BUILD_BUG_H */
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index fe7a22dd133b..bc1f16e9f3f4 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -119,11 +119,18 @@ static inline bool bvec_iter_rewind(const struct bio_vec *bv,
return true;
}
+static inline void bvec_iter_skip_zero_bvec(struct bvec_iter *iter)
+{
+ iter->bi_bvec_done = 0;
+ iter->bi_idx++;
+}
+
#define for_each_bvec(bvl, bio_vec, iter, start) \
for (iter = (start); \
(iter).bi_size && \
((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
- bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
+ (bvl).bv_len ? (void)bvec_iter_advance((bio_vec), &(iter), \
+ (bvl).bv_len) : bvec_iter_skip_zero_bvec(&(iter)))
/* for iterating one bio from start to end */
#define BVEC_ITER_ALL_INIT (struct bvec_iter) \
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
index b3379a97245c..fd1ae7907250 100644
--- a/include/linux/can/skb.h
+++ b/include/linux/can/skb.h
@@ -49,8 +49,12 @@ static inline void can_skb_reserve(struct sk_buff *skb)
static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
{
- if (sk) {
- sock_hold(sk);
+ /* If the socket has already been closed by user space, the
+ * refcount may already be 0 (and the socket will be freed
+ * after the last TX skb has been freed). So only increase
+ * socket refcount if the refcount is > 0.
+ */
+ if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) {
skb->destructor = sock_efree;
skb->sk = sk;
}
@@ -61,21 +65,17 @@ static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
*/
static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb)
{
- if (skb_shared(skb)) {
- struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
+ struct sk_buff *nskb;
- if (likely(nskb)) {
- can_skb_set_owner(nskb, skb->sk);
- consume_skb(skb);
- return nskb;
- } else {
- kfree_skb(skb);
- return NULL;
- }
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (unlikely(!nskb)) {
+ kfree_skb(skb);
+ return NULL;
}
- /* we can assume to have an unshared skb with proper owner */
- return skb;
+ can_skb_set_owner(nskb, skb->sk);
+ consume_skb(skb);
+ return nskb;
}
#endif /* !_CAN_SKB_H */
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index a01ebb630abc..f92264d1ed4f 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -755,7 +755,9 @@ struct sock_cgroup_data {
union {
#ifdef __LITTLE_ENDIAN
struct {
- u8 is_data;
+ u8 is_data : 1;
+ u8 no_refcnt : 1;
+ u8 unused : 6;
u8 padding;
u16 prioidx;
u32 classid;
@@ -765,7 +767,9 @@ struct sock_cgroup_data {
u32 classid;
u16 prioidx;
u8 padding;
- u8 is_data;
+ u8 unused : 6;
+ u8 no_refcnt : 1;
+ u8 is_data : 1;
} __packed;
#endif
u64 val;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 5ed1a2b285cd..02da4e1def61 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -810,6 +810,7 @@ extern spinlock_t cgroup_sk_update_lock;
void cgroup_sk_alloc_disable(void);
void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
+void cgroup_sk_clone(struct sock_cgroup_data *skcd);
void cgroup_sk_free(struct sock_cgroup_data *skcd);
static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
@@ -823,7 +824,7 @@ static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
*/
v = READ_ONCE(skcd->val);
- if (v & 1)
+ if (v & 3)
return &cgrp_dfl_root.cgrp;
return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
@@ -835,6 +836,7 @@ static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
#else /* CONFIG_CGROUP_DATA */
static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
+static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {}
static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
#endif /* CONFIG_CGROUP_DATA */
diff --git a/include/linux/compat.h b/include/linux/compat.h
index de0c13bdcd2c..189d0e111d57 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -445,8 +445,6 @@ struct compat_kexec_segment;
struct compat_mq_attr;
struct compat_msgbuf;
-extern void compat_exit_robust_list(struct task_struct *curr);
-
#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t))
#define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG)
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index d756f2318efe..2d6e5e4bb5d9 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -39,7 +39,6 @@
* and may be redefined here because they should not be shared with other
* compilers, like ICC.
*/
-#define barrier() __asm__ __volatile__("" : : : "memory")
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
#define __assume_aligned(a, ...) \
__attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 3ebee1ce6f98..a80d6de3c8ad 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -12,26 +12,13 @@
#if GCC_VERSION < 40600
# error Sorry, your compiler is too old - please upgrade it.
-#endif
-
-/* Optimization barrier */
-
-/* The "volatile" is due to gcc bugs */
-#define barrier() __asm__ __volatile__("": : :"memory")
+#elif defined(CONFIG_ARM64) && GCC_VERSION < 50100
/*
- * This version is i.e. to prevent dead stores elimination on @ptr
- * where gcc and llvm may behave differently when otherwise using
- * normal barrier(): while gcc behavior gets along with a normal
- * barrier(), llvm needs an explicit input variable to be assumed
- * clobbered. The issue is as follows: while the inline asm might
- * access any memory it wants, the compiler could have fit all of
- * @ptr into memory registers instead, and since @ptr never escaped
- * from that, it proved that the inline asm wasn't touching any of
- * it. This version works well with both compilers, i.e. we're telling
- * the compiler that the inline asm absolutely may see the contents
- * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63293
+ * https://lore.kernel.org/r/20210107111841.GN1551@shell.armlinux.org.uk
*/
-#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
+# error Sorry, your version of GCC is too old - please use 5.1 or newer.
+#endif
/*
* This macro obfuscates arithmetic on a variable address so that gcc
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index bb22908c79e8..6a53300cbd1e 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -79,11 +79,25 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
/* Optimization barrier */
#ifndef barrier
-# define barrier() __memory_barrier()
+/* The "volatile" is due to gcc bugs */
+# define barrier() __asm__ __volatile__("": : :"memory")
#endif
#ifndef barrier_data
-# define barrier_data(ptr) barrier()
+/*
+ * This version is i.e. to prevent dead stores elimination on @ptr
+ * where gcc and llvm may behave differently when otherwise using
+ * normal barrier(): while gcc behavior gets along with a normal
+ * barrier(), llvm needs an explicit input variable to be assumed
+ * clobbered. The issue is as follows: while the inline asm might
+ * access any memory it wants, the compiler could have fit all of
+ * @ptr into memory registers instead, and since @ptr never escaped
+ * from that, it proved that the inline asm wasn't touching any of
+ * it. This version works well with both compilers, i.e. we're telling
+ * the compiler that the inline asm absolutely may see the contents
+ * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
+ */
+# define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
#endif
/* workaround for GCC PR82365 if needed */
@@ -115,11 +129,65 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
".pushsection .discard.unreachable\n\t" \
".long 999b - .\n\t" \
".popsection\n\t"
+
+#ifdef CONFIG_DEBUG_ENTRY
+/* Begin/end of an instrumentation safe region */
+#define instrumentation_begin() ({ \
+ asm volatile("%c0:\n\t" \
+ ".pushsection .discard.instr_begin\n\t" \
+ ".long %c0b - .\n\t" \
+ ".popsection\n\t" : : "i" (__COUNTER__)); \
+})
+
+/*
+ * Because instrumentation_{begin,end}() can nest, objtool validation considers
+ * _begin() a +1 and _end() a -1 and computes a sum over the instructions.
+ * When the value is greater than 0, we consider instrumentation allowed.
+ *
+ * There is a problem with code like:
+ *
+ * noinstr void foo()
+ * {
+ * instrumentation_begin();
+ * ...
+ * if (cond) {
+ * instrumentation_begin();
+ * ...
+ * instrumentation_end();
+ * }
+ * bar();
+ * instrumentation_end();
+ * }
+ *
+ * If instrumentation_end() would be an empty label, like all the other
+ * annotations, the inner _end(), which is at the end of a conditional block,
+ * would land on the instruction after the block.
+ *
+ * If we then consider the sum of the !cond path, we'll see that the call to
+ * bar() is with a 0-value, even though, we meant it to happen with a positive
+ * value.
+ *
+ * To avoid this, have _end() be a NOP instruction, this ensures it will be
+ * part of the condition block and does not escape.
+ */
+#define instrumentation_end() ({ \
+ asm volatile("%c0: nop\n\t" \
+ ".pushsection .discard.instr_end\n\t" \
+ ".long %c0b - .\n\t" \
+ ".popsection\n\t" : : "i" (__COUNTER__)); \
+})
+#endif /* CONFIG_DEBUG_ENTRY */
+
#else
#define annotate_reachable()
#define annotate_unreachable()
#endif
+#ifndef instrumentation_begin
+#define instrumentation_begin() do { } while(0)
+#define instrumentation_end() do { } while(0)
+#endif
+
#ifndef ASM_UNREACHABLE
# define ASM_UNREACHABLE
#endif
@@ -345,10 +413,16 @@ static inline void *offset_to_ptr(const int *off)
* compiler has support to do so.
*/
#define compiletime_assert(condition, msg) \
- _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
+ _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
#define compiletime_assert_atomic_type(t) \
compiletime_assert(__native_word(t), \
"Need native word sized stores/loads for atomicity.")
+/*
+ * This is needed in functions which generate the stack canary, see
+ * arch/x86/kernel/smpboot.c::start_secondary() for an example.
+ */
+#define prevent_tail_call_optimization() mb()
+
#endif /* __LINUX_COMPILER_H */
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 2b8ed70c4c77..c01100318b25 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -234,6 +234,12 @@ struct ftrace_likely_data {
#define notrace __attribute__((no_instrument_function))
#endif
+#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+/* Section for code which can't be instrumented at all */
+#define noinstr \
+ noinline notrace __attribute((__section__(".noinstr.text")))
+#endif
+
/*
* it doesn't make sense on ARM (currently the only user of __naked)
* to trace naked functions because then mcount is called without
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
index fea64f2692a0..8b5bc3a47bf5 100644
--- a/include/linux/console_struct.h
+++ b/include/linux/console_struct.h
@@ -62,6 +62,7 @@ struct vc_data {
unsigned int vc_rows;
unsigned int vc_size_row; /* Bytes per row */
unsigned int vc_scan_lines; /* # of scan lines */
+ unsigned int vc_cell_height; /* CRTC character cell height */
unsigned long vc_origin; /* [!] Start of real screen */
unsigned long vc_scr_end; /* [!] End of real screen */
unsigned long vc_visible_origin; /* [!] Top of visible window */
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 0880baefd85f..02b1b40fea5b 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -210,7 +210,7 @@ struct dentry_operations {
#define DCACHE_MAY_FREE 0x00800000
#define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */
-#define DCACHE_ENCRYPTED_WITH_KEY 0x02000000 /* dir is encrypted with a valid key */
+#define DCACHE_ENCRYPTED_NAME 0x02000000 /* Encrypted name (dir key was unavailable) */
#define DCACHE_OP_REAL 0x04000000
#define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 3b0ba54cc4d5..3bc1034c57e6 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -54,6 +54,8 @@ static const struct file_operations __fops = { \
.llseek = no_llseek, \
}
+typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
+
#if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_lookup(const char *name, struct dentry *parent);
@@ -75,7 +77,6 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent);
struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
const char *dest);
-typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
struct dentry *debugfs_create_automount(const char *name,
struct dentry *parent,
debugfs_automount_t f,
@@ -204,7 +205,7 @@ static inline struct dentry *debugfs_create_symlink(const char *name,
static inline struct dentry *debugfs_create_automount(const char *name,
struct dentry *parent,
- struct vfsmount *(*f)(void *),
+ debugfs_automount_t f,
void *data)
{
return ERR_PTR(-ENODEV);
diff --git a/include/linux/devfreq_cooling.h b/include/linux/devfreq_cooling.h
index 4635f95000a4..79a6e37a1d6f 100644
--- a/include/linux/devfreq_cooling.h
+++ b/include/linux/devfreq_cooling.h
@@ -75,7 +75,7 @@ void devfreq_cooling_unregister(struct thermal_cooling_device *dfc);
#else /* !CONFIG_DEVFREQ_THERMAL */
-struct thermal_cooling_device *
+static inline struct thermal_cooling_device *
of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df,
struct devfreq_cooling_power *dfc_power)
{
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 91f9f95ad506..cde6708644ae 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -315,6 +315,11 @@ struct dm_target {
* on max_io_len boundary.
*/
bool split_discard_bios:1;
+
+ /*
+ * Set if we need to limit the number of in-flight bios when swapping.
+ */
+ bool limit_swap_bios:1;
};
/* Each target can link one of these into the table */
@@ -419,6 +424,7 @@ const char *dm_device_name(struct mapped_device *md);
int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
struct gendisk *dm_disk(struct mapped_device *md);
int dm_suspended(struct dm_target *ti);
+int dm_post_suspending(struct dm_target *ti);
int dm_noflush_suspending(struct dm_target *ti);
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
void dm_remap_zone_report(struct dm_target *ti, struct bio *bio,
diff --git a/include/linux/dm-bufio.h b/include/linux/dm-bufio.h
index 3c8b7d274bd9..45ba37aaf6b7 100644
--- a/include/linux/dm-bufio.h
+++ b/include/linux/dm-bufio.h
@@ -138,6 +138,7 @@ void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n);
unsigned dm_bufio_get_block_size(struct dm_bufio_client *c);
sector_t dm_bufio_get_device_size(struct dm_bufio_client *c);
+struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c);
sector_t dm_bufio_get_block_number(struct dm_buffer *b);
void *dm_bufio_get_block_data(struct dm_buffer *b);
void *dm_bufio_get_aux_data(struct dm_buffer *b);
diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h
index eec7928ff8fe..99580c22f91a 100644
--- a/include/linux/eeprom_93xx46.h
+++ b/include/linux/eeprom_93xx46.h
@@ -16,6 +16,8 @@ struct eeprom_93xx46_platform_data {
#define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ (1 << 0)
/* Instructions such as EWEN are (addrlen + 2) in length. */
#define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH (1 << 1)
+/* Add extra cycle after address during a read */
+#define EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE BIT(2)
/*
* optional hooks to control additional logic
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 6797811bf1e6..9a5d4b499271 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -996,7 +996,11 @@ extern void *efi_get_pal_addr (void);
extern void efi_map_pal_code (void);
extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
extern void efi_gettimeofday (struct timespec64 *ts);
+#ifdef CONFIG_EFI
extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
+#else
+static inline void efi_enter_virtual_mode (void) {}
+#endif
#ifdef CONFIG_X86
extern void efi_free_boot_services(void);
extern efi_status_t efi_query_variable_store(u32 attributes,
diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h
index 4cad0e784b28..b81f9e1d74b0 100644
--- a/include/linux/elfcore.h
+++ b/include/linux/elfcore.h
@@ -58,6 +58,7 @@ static inline int elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregse
}
#endif
+#if defined(CONFIG_UM) || defined(CONFIG_IA64)
/*
* These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out
* extra segments containing the gate DSO contents. Dumping its
@@ -72,5 +73,26 @@ elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset);
extern int
elf_core_write_extra_data(struct coredump_params *cprm);
extern size_t elf_core_extra_data_size(void);
+#else
+static inline Elf_Half elf_core_extra_phdrs(void)
+{
+ return 0;
+}
+
+static inline int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
+{
+ return 1;
+}
+
+static inline int elf_core_write_extra_data(struct coredump_params *cprm)
+{
+ return 1;
+}
+
+static inline size_t elf_core_extra_data_size(void)
+{
+ return 0;
+}
+#endif
#endif /* _LINUX_ELFCORE_H */
diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h
index f236f5b931b2..7fdd7f355b52 100644
--- a/include/linux/elfnote.h
+++ b/include/linux/elfnote.h
@@ -54,7 +54,7 @@
.popsection ;
#define ELFNOTE(name, type, desc) \
- ELFNOTE_START(name, type, "") \
+ ELFNOTE_START(name, type, "a") \
desc ; \
ELFNOTE_END
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 7f033b1ea568..fdef4c784d03 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -279,6 +279,29 @@ static inline void devm_extcon_unregister_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb) { }
+static inline int extcon_register_notifier_all(struct extcon_dev *edev,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int extcon_unregister_notifier_all(struct extcon_dev *edev,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int devm_extcon_register_notifier_all(struct device *dev,
+ struct extcon_dev *edev,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline void devm_extcon_unregister_notifier_all(struct device *dev,
+ struct extcon_dev *edev,
+ struct notifier_block *nb) { }
+
static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
{
return ERR_PTR(-ENODEV);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 3705c6f10b17..117f9380069a 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -746,18 +746,18 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
#define __bpf_call_base_args \
((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \
- __bpf_call_base)
+ (void *)__bpf_call_base)
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
void bpf_jit_compile(struct bpf_prog *prog);
bool bpf_helper_changes_pkt_data(void *func);
-static inline bool bpf_dump_raw_ok(void)
+static inline bool bpf_dump_raw_ok(const struct cred *cred)
{
/* Reconstruction of call-sites is dependent on kallsyms,
* thus make dump the same restriction.
*/
- return kallsyms_show_value() == 1;
+ return kallsyms_show_value(cred);
}
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
diff --git a/include/linux/font.h b/include/linux/font.h
index d6821769dd1e..f85e70bd4793 100644
--- a/include/linux/font.h
+++ b/include/linux/font.h
@@ -57,4 +57,17 @@ extern const struct font_desc *get_default_font(int xres, int yres,
/* Max. length for the name of a predefined font */
#define MAX_FONT_NAME 32
+/* Extra word getters */
+#define REFCOUNT(fd) (((int *)(fd))[-1])
+#define FNTSIZE(fd) (((int *)(fd))[-2])
+#define FNTCHARCNT(fd) (((int *)(fd))[-3])
+#define FNTSUM(fd) (((int *)(fd))[-4])
+
+#define FONT_EXTRA_WORDS 4
+
+struct font_data {
+ unsigned int extra[FONT_EXTRA_WORDS];
+ const unsigned char data[];
+} __packed;
+
#endif /* _VIDEO_FONT_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 15b8e02880c3..b6a955ba6173 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -934,7 +934,7 @@ struct file_handle {
__u32 handle_bytes;
int handle_type;
/* file identifier */
- unsigned char f_handle[0];
+ unsigned char f_handle[];
};
static inline struct file *get_file(struct file *f)
@@ -2050,6 +2050,10 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
*
* I_CREATING New object's inode in the middle of setting up.
*
+ * I_SYNC_QUEUED Inode is queued in b_io or b_more_io writeback lists.
+ * Used to detect that mark_inode_dirty() should not move
+ * inode between dirty lists.
+ *
* Q: What is the difference between I_WILL_FREE and I_FREEING?
*/
#define I_DIRTY_SYNC (1 << 0)
@@ -2067,11 +2071,10 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
#define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP)
#define I_LINKABLE (1 << 10)
#define I_DIRTY_TIME (1 << 11)
-#define __I_DIRTY_TIME_EXPIRED 12
-#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED)
#define I_WB_SWITCH (1 << 13)
#define I_OVL_INUSE (1 << 14)
#define I_CREATING (1 << 15)
+#define I_SYNC_QUEUED (1 << 17)
#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index 952ab97af325..c1e4a615bd1c 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -32,6 +32,7 @@ struct fscrypt_name {
u32 hash;
u32 minor_hash;
struct fscrypt_str crypto_buf;
+ bool is_ciphertext_name;
};
#define FSTR_INIT(n, l) { .name = n, .len = l }
@@ -89,7 +90,7 @@ static inline int fscrypt_require_key(struct inode *inode)
* in an encrypted directory tree use the same encryption policy.
*
* Return: 0 on success, -ENOKEY if the directory's encryption key is missing,
- * -EPERM if the link would result in an inconsistent encryption policy, or
+ * -EXDEV if the link would result in an inconsistent encryption policy, or
* another -errno code.
*/
static inline int fscrypt_prepare_link(struct dentry *old_dentry,
@@ -97,7 +98,7 @@ static inline int fscrypt_prepare_link(struct dentry *old_dentry,
struct dentry *dentry)
{
if (IS_ENCRYPTED(dir))
- return __fscrypt_prepare_link(d_inode(old_dentry), dir);
+ return __fscrypt_prepare_link(d_inode(old_dentry), dir, dentry);
return 0;
}
@@ -119,7 +120,7 @@ static inline int fscrypt_prepare_link(struct dentry *old_dentry,
* We also verify that the rename will not violate the constraint that all files
* in an encrypted directory tree use the same encryption policy.
*
- * Return: 0 on success, -ENOKEY if an encryption key is missing, -EPERM if the
+ * Return: 0 on success, -ENOKEY if an encryption key is missing, -EXDEV if the
* rename would cause inconsistent encryption policies, or another -errno code.
*/
static inline int fscrypt_prepare_rename(struct inode *old_dir,
@@ -138,27 +139,32 @@ static inline int fscrypt_prepare_rename(struct inode *old_dir,
* fscrypt_prepare_lookup - prepare to lookup a name in a possibly-encrypted directory
* @dir: directory being searched
* @dentry: filename being looked up
- * @flags: lookup flags
+ * @fname: (output) the name to use to search the on-disk directory
*
- * Prepare for ->lookup() in a directory which may be encrypted. Lookups can be
- * done with or without the directory's encryption key; without the key,
+ * Prepare for ->lookup() in a directory which may be encrypted by determining
+ * the name that will actually be used to search the directory on-disk. Lookups
+ * can be done with or without the directory's encryption key; without the key,
* filenames are presented in encrypted form. Therefore, we'll try to set up
* the directory's encryption key, but even without it the lookup can continue.
*
- * To allow invalidating stale dentries if the directory's encryption key is
- * added later, we also install a custom ->d_revalidate() method and use the
- * DCACHE_ENCRYPTED_WITH_KEY flag to indicate whether a given dentry is a
- * plaintext name (flag set) or a ciphertext name (flag cleared).
+ * This also installs a custom ->d_revalidate() method which will invalidate the
+ * dentry if it was created without the key and the key is later added.
*
- * Return: 0 on success, -errno if a problem occurred while setting up the
- * encryption key
+ * Return: 0 on success; -ENOENT if key is unavailable but the filename isn't a
+ * correctly formed encoded ciphertext name, so a negative dentry should be
+ * created; or another -errno code.
*/
static inline int fscrypt_prepare_lookup(struct inode *dir,
struct dentry *dentry,
- unsigned int flags)
+ struct fscrypt_name *fname)
{
if (IS_ENCRYPTED(dir))
- return __fscrypt_prepare_lookup(dir, dentry);
+ return __fscrypt_prepare_lookup(dir, dentry, fname);
+
+ memset(fname, 0, sizeof(*fname));
+ fname->usr_fname = &dentry->d_name;
+ fname->disk_name.name = (unsigned char *)dentry->d_name.name;
+ fname->disk_name.len = dentry->d_name.len;
return 0;
}
diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h
index ee8b43e4c15a..93304cfeb601 100644
--- a/include/linux/fscrypt_notsupp.h
+++ b/include/linux/fscrypt_notsupp.h
@@ -24,6 +24,11 @@ static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
return false;
}
+static inline bool fscrypt_is_nokey_name(const struct dentry *dentry)
+{
+ return false;
+}
+
/* crypto.c */
static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work)
{
@@ -112,7 +117,7 @@ static inline int fscrypt_setup_filename(struct inode *dir,
if (IS_ENCRYPTED(dir))
return -EOPNOTSUPP;
- memset(fname, 0, sizeof(struct fscrypt_name));
+ memset(fname, 0, sizeof(*fname));
fname->usr_fname = iname;
fname->disk_name.name = (unsigned char *)iname->name;
fname->disk_name.len = iname->len;
@@ -183,8 +188,8 @@ static inline int fscrypt_file_open(struct inode *inode, struct file *filp)
return 0;
}
-static inline int __fscrypt_prepare_link(struct inode *inode,
- struct inode *dir)
+static inline int __fscrypt_prepare_link(struct inode *inode, struct inode *dir,
+ struct dentry *dentry)
{
return -EOPNOTSUPP;
}
@@ -199,7 +204,8 @@ static inline int __fscrypt_prepare_rename(struct inode *old_dir,
}
static inline int __fscrypt_prepare_lookup(struct inode *dir,
- struct dentry *dentry)
+ struct dentry *dentry,
+ struct fscrypt_name *fname)
{
return -EOPNOTSUPP;
}
diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h
index 6456c6b2005f..0409c14ae1de 100644
--- a/include/linux/fscrypt_supp.h
+++ b/include/linux/fscrypt_supp.h
@@ -58,6 +58,35 @@ static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
inode->i_sb->s_cop->dummy_context(inode);
}
+/**
+ * fscrypt_is_nokey_name() - test whether a dentry is a no-key name
+ * @dentry: the dentry to check
+ *
+ * This returns true if the dentry is a no-key dentry. A no-key dentry is a
+ * dentry that was created in an encrypted directory that hasn't had its
+ * encryption key added yet. Such dentries may be either positive or negative.
+ *
+ * When a filesystem is asked to create a new filename in an encrypted directory
+ * and the new filename's dentry is a no-key dentry, it must fail the operation
+ * with ENOKEY. This includes ->create(), ->mkdir(), ->mknod(), ->symlink(),
+ * ->rename(), and ->link(). (However, ->rename() and ->link() are already
+ * handled by fscrypt_prepare_rename() and fscrypt_prepare_link().)
+ *
+ * This is necessary because creating a filename requires the directory's
+ * encryption key, but just checking for the key on the directory inode during
+ * the final filesystem operation doesn't guarantee that the key was available
+ * during the preceding dentry lookup. And the key must have already been
+ * available during the dentry lookup in order for it to have been checked
+ * whether the filename already exists in the directory and for the new file's
+ * dentry not to be invalidated due to it incorrectly having the no-key flag.
+ *
+ * Return: %true if the dentry is a no-key name
+ */
+static inline bool fscrypt_is_nokey_name(const struct dentry *dentry)
+{
+ return dentry->d_flags & DCACHE_ENCRYPTED_NAME;
+}
+
/* crypto.c */
extern void fscrypt_enqueue_decrypt_work(struct work_struct *);
extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
@@ -184,13 +213,15 @@ extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
/* hooks.c */
extern int fscrypt_file_open(struct inode *inode, struct file *filp);
-extern int __fscrypt_prepare_link(struct inode *inode, struct inode *dir);
+extern int __fscrypt_prepare_link(struct inode *inode, struct inode *dir,
+ struct dentry *dentry);
extern int __fscrypt_prepare_rename(struct inode *old_dir,
struct dentry *old_dentry,
struct inode *new_dir,
struct dentry *new_dentry,
unsigned int flags);
-extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry);
+extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry,
+ struct fscrypt_name *fname);
extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len,
unsigned int max_len,
struct fscrypt_str *disk_link);
diff --git a/include/linux/futex.h b/include/linux/futex.h
index a61bf436dcf3..b70df27d7e85 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -2,7 +2,9 @@
#ifndef _LINUX_FUTEX_H
#define _LINUX_FUTEX_H
+#include <linux/sched.h>
#include <linux/ktime.h>
+
#include <uapi/linux/futex.h>
struct inode;
@@ -51,15 +53,35 @@ union futex_key {
#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = 0ULL } }
#ifdef CONFIG_FUTEX
-extern void exit_robust_list(struct task_struct *curr);
+enum {
+ FUTEX_STATE_OK,
+ FUTEX_STATE_EXITING,
+ FUTEX_STATE_DEAD,
+};
-long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
- u32 __user *uaddr2, u32 val2, u32 val3);
-#else
-static inline void exit_robust_list(struct task_struct *curr)
+static inline void futex_init_task(struct task_struct *tsk)
{
+ tsk->robust_list = NULL;
+#ifdef CONFIG_COMPAT
+ tsk->compat_robust_list = NULL;
+#endif
+ INIT_LIST_HEAD(&tsk->pi_state_list);
+ tsk->pi_state_cache = NULL;
+ tsk->futex_state = FUTEX_STATE_OK;
+ mutex_init(&tsk->futex_exit_mutex);
}
+void futex_exit_recursive(struct task_struct *tsk);
+void futex_exit_release(struct task_struct *tsk);
+void futex_exec_release(struct task_struct *tsk);
+
+long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
+ u32 __user *uaddr2, u32 val2, u32 val3);
+#else
+static inline void futex_init_task(struct task_struct *tsk) { }
+static inline void futex_exit_recursive(struct task_struct *tsk) { }
+static inline void futex_exit_release(struct task_struct *tsk) { }
+static inline void futex_exec_release(struct task_struct *tsk) { }
static inline long do_futex(u32 __user *uaddr, int op, u32 val,
ktime_t *timeout, u32 __user *uaddr2,
u32 val2, u32 val3)
@@ -68,12 +90,4 @@ static inline long do_futex(u32 __user *uaddr, int op, u32 val,
}
#endif
-#ifdef CONFIG_FUTEX_PI
-extern void exit_pi_state_list(struct task_struct *curr);
-#else
-static inline void exit_pi_state_list(struct task_struct *curr)
-{
-}
-#endif
-
#endif
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index f13272d84332..a488098f8638 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -402,10 +402,11 @@ static inline void free_part_info(struct hd_struct *part)
extern void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part);
/* block/genhd.c */
-extern void device_add_disk(struct device *parent, struct gendisk *disk);
+extern void device_add_disk(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups);
static inline void add_disk(struct gendisk *disk)
{
- device_add_disk(NULL, disk);
+ device_add_disk(NULL, disk, NULL);
}
extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk);
static inline void add_disk_no_queue_reg(struct gendisk *disk)
@@ -729,9 +730,11 @@ static inline sector_t part_nr_sects_read(struct hd_struct *part)
static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
{
#if BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_SMP)
+ preempt_disable();
write_seqcount_begin(&part->nr_sects_seq);
part->nr_sects = size;
write_seqcount_end(&part->nr_sects_seq);
+ preempt_enable();
#elif BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_PREEMPT)
preempt_disable();
part->nr_sects = size;
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 8506637f070d..4dcce83ca378 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -270,6 +270,8 @@ struct hid_item {
#define HID_CP_SELECTION 0x000c0080
#define HID_CP_MEDIASELECTION 0x000c0087
#define HID_CP_SELECTDISC 0x000c00ba
+#define HID_CP_VOLUMEUP 0x000c00e9
+#define HID_CP_VOLUMEDOWN 0x000c00ea
#define HID_CP_PLAYBACKSPEED 0x000c00f1
#define HID_CP_PROXIMITY 0x000c0109
#define HID_CP_SPEAKERSYSTEM 0x000c0160
@@ -956,34 +958,49 @@ static inline void hid_device_io_stop(struct hid_device *hid) {
* @max: maximal valid usage->code to consider later (out parameter)
* @type: input event type (EV_KEY, EV_REL, ...)
* @c: code which corresponds to this usage and type
+ *
+ * The value pointed to by @bit will be set to NULL if either @type is
+ * an unhandled event type, or if @c is out of range for @type. This
+ * can be used as an error condition.
*/
static inline void hid_map_usage(struct hid_input *hidinput,
struct hid_usage *usage, unsigned long **bit, int *max,
- __u8 type, __u16 c)
+ __u8 type, unsigned int c)
{
struct input_dev *input = hidinput->input;
-
- usage->type = type;
- usage->code = c;
+ unsigned long *bmap = NULL;
+ unsigned int limit = 0;
switch (type) {
case EV_ABS:
- *bit = input->absbit;
- *max = ABS_MAX;
+ bmap = input->absbit;
+ limit = ABS_MAX;
break;
case EV_REL:
- *bit = input->relbit;
- *max = REL_MAX;
+ bmap = input->relbit;
+ limit = REL_MAX;
break;
case EV_KEY:
- *bit = input->keybit;
- *max = KEY_MAX;
+ bmap = input->keybit;
+ limit = KEY_MAX;
break;
case EV_LED:
- *bit = input->ledbit;
- *max = LED_MAX;
+ bmap = input->ledbit;
+ limit = LED_MAX;
break;
}
+
+ if (unlikely(c > limit || !bmap)) {
+ pr_warn_ratelimited("%s: Invalid code %d type %d\n",
+ input->name, c, type);
+ *bit = NULL;
+ return;
+ }
+
+ usage->type = type;
+ usage->code = c;
+ *max = limit;
+ *bit = bmap;
}
/**
@@ -997,7 +1014,8 @@ static inline void hid_map_usage_clear(struct hid_input *hidinput,
__u8 type, __u16 c)
{
hid_map_usage(hidinput, usage, bit, max, type, c);
- clear_bit(c, *bit);
+ if (*bit)
+ clear_bit(usage->code, *bit);
}
/**
diff --git a/include/linux/hil_mlc.h b/include/linux/hil_mlc.h
index 774f7d3b8f6a..369221fd5518 100644
--- a/include/linux/hil_mlc.h
+++ b/include/linux/hil_mlc.h
@@ -103,7 +103,7 @@ struct hilse_node {
/* Methods for back-end drivers, e.g. hp_sdc_mlc */
typedef int (hil_mlc_cts) (hil_mlc *mlc);
-typedef void (hil_mlc_out) (hil_mlc *mlc);
+typedef int (hil_mlc_out) (hil_mlc *mlc);
typedef int (hil_mlc_in) (hil_mlc *mlc, suseconds_t timeout);
struct hil_mlc_devinfo {
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index d34112fb3d52..c129c1c14c5f 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -124,7 +124,7 @@ void free_huge_page(struct page *page);
void hugetlb_fix_reserve_counts(struct inode *inode);
extern struct mutex *hugetlb_fault_mutex_table;
u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
- pgoff_t idx, unsigned long address);
+ pgoff_t idx);
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
@@ -541,6 +541,9 @@ static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr
set_huge_pte_at(mm, addr, ptep, pte);
}
#endif
+
+void set_page_huge_active(struct page *page);
+
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
#define alloc_huge_page(v, a, r) NULL
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index c43e694fef7d..35461d49d3ae 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -428,6 +428,8 @@ enum vmbus_channel_message_type {
CHANNELMSG_19 = 19,
CHANNELMSG_20 = 20,
CHANNELMSG_TL_CONNECT_REQUEST = 21,
+ CHANNELMSG_22 = 22,
+ CHANNELMSG_TL_CONNECT_RESULT = 23,
CHANNELMSG_COUNT
};
diff --git a/include/linux/i2c-algo-pca.h b/include/linux/i2c-algo-pca.h
index d03071732db4..7c522fdd9ea7 100644
--- a/include/linux/i2c-algo-pca.h
+++ b/include/linux/i2c-algo-pca.h
@@ -53,6 +53,20 @@
#define I2C_PCA_CON_SI 0x08 /* Serial Interrupt */
#define I2C_PCA_CON_CR 0x07 /* Clock Rate (MASK) */
+/**
+ * struct pca_i2c_bus_settings - The configured PCA i2c bus settings
+ * @mode: Configured i2c bus mode
+ * @tlow: Configured SCL LOW period
+ * @thi: Configured SCL HIGH period
+ * @clock_freq: The configured clock frequency
+ */
+struct pca_i2c_bus_settings {
+ int mode;
+ int tlow;
+ int thi;
+ int clock_freq;
+};
+
struct i2c_algo_pca_data {
void *data; /* private low level data */
void (*write_byte) (void *data, int reg, int val);
@@ -64,6 +78,7 @@ struct i2c_algo_pca_data {
* For PCA9665, use the frequency you want here. */
unsigned int i2c_clock;
unsigned int chip;
+ struct pca_i2c_bus_settings bus_settings;
};
int i2c_pca_add_bus(struct i2c_adapter *);
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 7e748648c7d3..6fda0458745d 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -662,6 +662,8 @@ struct i2c_adapter_quirks {
#define I2C_AQ_NO_ZERO_LEN_READ BIT(5)
#define I2C_AQ_NO_ZERO_LEN_WRITE BIT(6)
#define I2C_AQ_NO_ZERO_LEN (I2C_AQ_NO_ZERO_LEN_READ | I2C_AQ_NO_ZERO_LEN_WRITE)
+/* adapter cannot do repeated START */
+#define I2C_AQ_NO_REP_START BIT(7)
/*
* i2c_adapter is the structure used to identify a physical i2c bus along
diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
index a8f888976137..0be0d68fbb00 100644
--- a/include/linux/icmpv6.h
+++ b/include/linux/icmpv6.h
@@ -3,6 +3,7 @@
#define _LINUX_ICMPV6_H
#include <linux/skbuff.h>
+#include <linux/ipv6.h>
#include <uapi/linux/icmpv6.h>
static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
@@ -13,21 +14,64 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
#include <linux/netdevice.h>
#if IS_ENABLED(CONFIG_IPV6)
-extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info);
typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info,
- const struct in6_addr *force_saddr);
+ const struct in6_addr *force_saddr,
+ const struct inet6_skb_parm *parm);
+void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ const struct in6_addr *force_saddr,
+ const struct inet6_skb_parm *parm);
+#if IS_BUILTIN(CONFIG_IPV6)
+static inline void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ const struct inet6_skb_parm *parm)
+{
+ icmp6_send(skb, type, code, info, NULL, parm);
+}
+static inline int inet6_register_icmp_sender(ip6_icmp_send_t *fn)
+{
+ BUILD_BUG_ON(fn != icmp6_send);
+ return 0;
+}
+static inline int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn)
+{
+ BUILD_BUG_ON(fn != icmp6_send);
+ return 0;
+}
+#else
+extern void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ const struct inet6_skb_parm *parm);
extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn);
extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn);
+#endif
+
+static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
+{
+ __icmpv6_send(skb, type, code, info, IP6CB(skb));
+}
+
int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
unsigned int data_len);
+#if IS_ENABLED(CONFIG_NF_NAT)
+void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info);
+#else
+static inline void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info)
+{
+ struct inet6_skb_parm parm = { 0 };
+ __icmpv6_send(skb_in, type, code, info, &parm);
+}
+#endif
+
#else
static inline void icmpv6_send(struct sk_buff *skb,
u8 type, u8 code, __u32 info)
{
+}
+static inline void icmpv6_ndo_send(struct sk_buff *skb,
+ u8 type, u8 code, __u32 info)
+{
}
#endif
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index c83478271c2e..778d3ef939d8 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -623,6 +623,15 @@ static inline bool ieee80211_is_qos_nullfunc(__le16 fc)
}
/**
+ * ieee80211_is_any_nullfunc - check if frame is regular or QoS nullfunc frame
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline bool ieee80211_is_any_nullfunc(__le16 fc)
+{
+ return (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc));
+}
+
+/**
* ieee80211_is_bufferable_mmpdu - check if frame is bufferable MMPDU
* @fc: frame control field in little-endian byteorder
*/
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index 2e55e4cdbd8a..1261559d70b4 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -43,13 +43,14 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
if (likely(success)) {
struct vlan_pcpu_stats *pcpu_stats;
- pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
+ pcpu_stats = get_cpu_ptr(vlan->pcpu_stats);
u64_stats_update_begin(&pcpu_stats->syncp);
pcpu_stats->rx_packets++;
pcpu_stats->rx_bytes += len;
if (multicast)
pcpu_stats->rx_multicast++;
u64_stats_update_end(&pcpu_stats->syncp);
+ put_cpu_ptr(vlan->pcpu_stats);
} else {
this_cpu_inc(vlan->pcpu_stats->rx_errors);
}
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 83ea4df6ab81..60afff19d7fc 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -30,6 +30,8 @@
#define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */
#define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */
+#define VLAN_MAX_DEPTH 8 /* Max. number of nested VLAN tags parsed */
+
/*
* struct vlan_hdr - vlan header
* @h_vlan_TCI: priority and VLAN ID
@@ -558,10 +560,10 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
* Returns the EtherType of the packet, regardless of whether it is
* vlan encapsulated (normal or hardware accelerated) or not.
*/
-static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
+static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
int *depth)
{
- unsigned int vlan_depth = skb->mac_len;
+ unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH;
/* if type is 802.1Q/AD then the header should already be
* present at mac_len - VLAN_HLEN (if mac_len > 0), or at
@@ -576,13 +578,12 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
vlan_depth = ETH_HLEN;
}
do {
- struct vlan_hdr *vh;
+ struct vlan_hdr vhdr, *vh;
- if (unlikely(!pskb_may_pull(skb,
- vlan_depth + VLAN_HLEN)))
+ vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr);
+ if (unlikely(!vh || !--parse_depth))
return 0;
- vh = (struct vlan_hdr *)(skb->data + vlan_depth);
type = vh->h_vlan_encapsulated_proto;
vlan_depth += VLAN_HLEN;
} while (eth_type_vlan(type));
@@ -601,11 +602,25 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
* Returns the EtherType of the packet, regardless of whether it is
* vlan encapsulated (normal or hardware accelerated) or not.
*/
-static inline __be16 vlan_get_protocol(struct sk_buff *skb)
+static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
{
return __vlan_get_protocol(skb, skb->protocol, NULL);
}
+/* A getter for the SKB protocol field which will handle VLAN tags consistently
+ * whether VLAN acceleration is enabled or not.
+ */
+static inline __be16 skb_protocol(const struct sk_buff *skb, bool skip_vlan)
+{
+ if (!skip_vlan)
+ /* VLAN acceleration strips the VLAN header from the skb and
+ * moves it to skb->vlan_proto
+ */
+ return skb_vlan_tag_present(skb) ? skb->vlan_proto : skb->protocol;
+
+ return vlan_get_protocol(skb);
+}
+
static inline void vlan_set_encap_proto(struct sk_buff *skb,
struct vlan_hdr *vhdr)
{
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index a74cb177dc6f..136ce51548a8 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -599,7 +599,7 @@ void iio_device_unregister(struct iio_dev *indio_dev);
* 0 on success, negative error number on failure.
*/
#define devm_iio_device_register(dev, indio_dev) \
- __devm_iio_device_register((dev), (indio_dev), THIS_MODULE);
+ __devm_iio_device_register((dev), (indio_dev), THIS_MODULE)
int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
struct module *this_mod);
void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev);
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index b1b4411b4c6b..786df33c0020 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -308,8 +308,8 @@ enum {
#define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK)
#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
-#define QI_DEV_EIOTLB_GLOB(g) ((u64)g)
-#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
+#define QI_DEV_EIOTLB_GLOB(g) ((u64)(g) & 0x1)
+#define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32)
#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
@@ -472,6 +472,8 @@ struct intel_iommu {
struct iommu_device iommu; /* IOMMU core code handle */
int node;
u32 flags; /* Software defined flags */
+
+ struct dmar_drhd_unit *drhd;
};
/* PCI domain-device relationship */
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 58df02bd93c9..fa46183b163b 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -120,9 +120,12 @@ io_mapping_init_wc(struct io_mapping *iomap,
resource_size_t base,
unsigned long size)
{
+ iomap->iomem = ioremap_wc(base, size);
+ if (!iomap->iomem)
+ return NULL;
+
iomap->base = base;
iomap->size = size;
- iomap->iomem = ioremap_wc(base, size);
#if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */
iomap->prot = pgprot_noncached_wc(PAGE_KERNEL);
#elif defined(pgprot_writecombine)
diff --git a/include/linux/io.h b/include/linux/io.h
index 32e30e8fb9db..da39ff89df65 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -75,6 +75,8 @@ static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr)
void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
resource_size_t size);
+void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset,
+ resource_size_t size);
void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
resource_size_t size);
void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index dba15ca8e60b..1dcd9198beb7 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -8,6 +8,7 @@
enum {
ICQ_EXITED = 1 << 2,
+ ICQ_DESTROYED = 1 << 3,
};
/*
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 3555d54bf79a..e93ecacb5eaf 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -108,6 +108,7 @@ struct iomap_ops {
struct iomap_page {
atomic_t read_count;
atomic_t write_count;
+ spinlock_t uptodate_lock;
DECLARE_BITMAP(uptodate, PAGE_SIZE / 512);
};
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 8415bf1a9776..0ebd180e7f91 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -3,6 +3,7 @@
#define _IPV6_H
#include <uapi/linux/ipv6.h>
+#include <uapi/linux/icmpv6.h>
#define ipv6_optlen(p) (((p)->hdrlen+1) << 3)
#define ipv6_authlen(p) (((p)->hdrlen+2) << 2)
@@ -83,7 +84,6 @@ struct ipv6_params {
__s32 autoconf;
};
extern struct ipv6_params ipv6_defaults;
-#include <linux/icmpv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 6ecaf056ab63..a042faefb9b7 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -210,6 +210,8 @@ struct irq_data {
* IRQD_CAN_RESERVE - Can use reservation mode
* IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change
* required
+ * IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call
+ * irq_chip::irq_set_affinity() when deactivated.
*/
enum {
IRQD_TRIGGER_MASK = 0xf,
@@ -233,6 +235,7 @@ enum {
IRQD_DEFAULT_TRIGGER_SET = (1 << 25),
IRQD_CAN_RESERVE = (1 << 26),
IRQD_MSI_NOMASK_QUIRK = (1 << 27),
+ IRQD_AFFINITY_ON_ACTIVATE = (1 << 29),
};
#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
@@ -407,6 +410,16 @@ static inline bool irqd_msi_nomask_quirk(struct irq_data *d)
return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK;
}
+static inline void irqd_set_affinity_on_activate(struct irq_data *d)
+{
+ __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE;
+}
+
+static inline bool irqd_affinity_on_activate(struct irq_data *d)
+{
+ return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE;
+}
+
#undef __irqd_to_state
static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 657a83b943f0..1f96ce2b47df 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -18,6 +18,7 @@
#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)
+struct cred;
struct module;
static inline int is_kernel_inittext(unsigned long addr)
@@ -98,7 +99,7 @@ int lookup_symbol_name(unsigned long addr, char *symname);
int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
/* How and when do we show kallsyms values? */
-extern int kallsyms_show_value(void);
+extern bool kallsyms_show_value(const struct cred *cred);
#else /* !CONFIG_KALLSYMS */
@@ -158,7 +159,7 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
return -ERANGE;
}
-static inline int kallsyms_show_value(void)
+static inline bool kallsyms_show_value(const struct cred *cred)
{
return false;
}
diff --git a/include/linux/kdev_t.h b/include/linux/kdev_t.h
index 85b5151911cf..4856706fbfeb 100644
--- a/include/linux/kdev_t.h
+++ b/include/linux/kdev_t.h
@@ -21,61 +21,61 @@
})
/* acceptable for old filesystems */
-static inline bool old_valid_dev(dev_t dev)
+static __always_inline bool old_valid_dev(dev_t dev)
{
return MAJOR(dev) < 256 && MINOR(dev) < 256;
}
-static inline u16 old_encode_dev(dev_t dev)
+static __always_inline u16 old_encode_dev(dev_t dev)
{
return (MAJOR(dev) << 8) | MINOR(dev);
}
-static inline dev_t old_decode_dev(u16 val)
+static __always_inline dev_t old_decode_dev(u16 val)
{
return MKDEV((val >> 8) & 255, val & 255);
}
-static inline u32 new_encode_dev(dev_t dev)
+static __always_inline u32 new_encode_dev(dev_t dev)
{
unsigned major = MAJOR(dev);
unsigned minor = MINOR(dev);
return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12);
}
-static inline dev_t new_decode_dev(u32 dev)
+static __always_inline dev_t new_decode_dev(u32 dev)
{
unsigned major = (dev & 0xfff00) >> 8;
unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00);
return MKDEV(major, minor);
}
-static inline u64 huge_encode_dev(dev_t dev)
+static __always_inline u64 huge_encode_dev(dev_t dev)
{
return new_encode_dev(dev);
}
-static inline dev_t huge_decode_dev(u64 dev)
+static __always_inline dev_t huge_decode_dev(u64 dev)
{
return new_decode_dev(dev);
}
-static inline int sysv_valid_dev(dev_t dev)
+static __always_inline int sysv_valid_dev(dev_t dev)
{
return MAJOR(dev) < (1<<14) && MINOR(dev) < (1<<18);
}
-static inline u32 sysv_encode_dev(dev_t dev)
+static __always_inline u32 sysv_encode_dev(dev_t dev)
{
return MINOR(dev) | (MAJOR(dev) << 18);
}
-static inline unsigned sysv_major(u32 dev)
+static __always_inline unsigned sysv_major(u32 dev)
{
return (dev >> 18) & 0x3fff;
}
-static inline unsigned sysv_minor(u32 dev)
+static __always_inline unsigned sysv_minor(u32 dev)
{
return dev & 0x3ffff;
}
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 9e4e638fb505..fe9f6f2dd811 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -260,6 +260,11 @@ struct kimage {
/* Information for loading purgatory */
struct purgatory_info purgatory_info;
#endif
+
+#ifdef CONFIG_IMA_KEXEC
+ /* Virtual address of IMA measurement buffer for kexec syscall */
+ void *ima_buffer;
+#endif
};
/* kexec interface functions */
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index d3c5ae8ad498..3341ddac2348 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -125,7 +125,7 @@ struct key_type {
* much is copied into the buffer
* - shouldn't do the copy if the buffer is NULL
*/
- long (*read)(const struct key *key, char __user *buffer, size_t buflen);
+ long (*read)(const struct key *key, char *buffer, size_t buflen);
/* handle request_key() for this type instead of invoking
* /sbin/request-key (optional)
diff --git a/include/linux/key.h b/include/linux/key.h
index e58ee10f6e58..3683c6a6fca3 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -249,6 +249,7 @@ extern struct key *key_alloc(struct key_type *type,
#define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */
#define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */
#define KEY_ALLOC_UID_KEYRING 0x0010 /* allocating a user or user session keyring */
+#define KEY_ALLOC_SET_KEEP 0x0020 /* Set the KEEP flag on the key/keyring */
extern void key_revoke(struct key *key);
extern void key_invalidate(struct key *key);
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index e465bb15912d..6be5545d3584 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -317,7 +317,7 @@ extern void gdbstub_exit(int status);
extern int kgdb_single_step;
extern atomic_t kgdb_active;
#define in_dbg_master() \
- (raw_smp_processor_id() == atomic_read(&kgdb_active))
+ (irqs_disabled() && (smp_processor_id() == atomic_read(&kgdb_active)))
extern bool dbg_is_early;
extern void __init dbg_late_init(void);
#else /* ! CONFIG_KGDB */
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index 082d1d2a5216..dc9a2eecc8b8 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -15,6 +15,7 @@ extern int __khugepaged_enter(struct mm_struct *mm);
extern void __khugepaged_exit(struct mm_struct *mm);
extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
unsigned long vm_flags);
+extern void khugepaged_min_free_kbytes_update(void);
#define khugepaged_enabled() \
(transparent_hugepage_flags & \
@@ -73,6 +74,10 @@ static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
{
return 0;
}
+
+static inline void khugepaged_min_free_kbytes_update(void)
+{
+}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* _LINUX_KHUGEPAGED_H */
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 9adb92ad24d3..c28204e22b54 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -245,7 +245,7 @@ extern void kprobes_inc_nmissed_count(struct kprobe *p);
extern bool arch_within_kprobe_blacklist(unsigned long addr);
extern int arch_populate_kprobe_blacklist(void);
extern bool arch_kprobe_on_func_entry(unsigned long offset);
-extern bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
+extern int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
extern bool within_kprobe_blacklist(unsigned long addr);
extern int kprobe_add_ksym_blacklist(unsigned long entry);
@@ -363,6 +363,10 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
return this_cpu_ptr(&kprobe_ctlblk);
}
+extern struct kprobe kprobe_busy;
+void kprobe_busy_begin(void);
+void kprobe_busy_end(void);
+
kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset);
int register_kprobe(struct kprobe *p);
void unregister_kprobe(struct kprobe *p);
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index c1961761311d..72308c38e06c 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -32,6 +32,9 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
unsigned int cpu,
const char *namefmt);
+void kthread_set_per_cpu(struct task_struct *k, int cpu);
+bool kthread_is_per_cpu(struct task_struct *k);
+
/**
* kthread_run - create and wake a thread.
* @threadfn: the function to run until signal_pending(current).
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 0f99ecc01bc7..a0de4c7dc9d3 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -206,6 +206,32 @@ enum {
READING_SHADOW_PAGE_TABLES,
};
+#define KVM_UNMAPPED_PAGE ((void *) 0x500 + POISON_POINTER_DELTA)
+
+struct kvm_host_map {
+ /*
+ * Only valid if the 'pfn' is managed by the host kernel (i.e. There is
+ * a 'struct page' for it. When using mem= kernel parameter some memory
+ * can be used as guest memory but they are not managed by host
+ * kernel).
+ * If 'pfn' is not managed by the host kernel, this field is
+ * initialized to KVM_UNMAPPED_PAGE.
+ */
+ struct page *page;
+ void *hva;
+ kvm_pfn_t pfn;
+ kvm_pfn_t gfn;
+};
+
+/*
+ * Used to check if the mapping is valid or not. Never use 'kvm_host_map'
+ * directly to check for that.
+ */
+static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
+{
+ return !!map->hva;
+}
+
/*
* Sometimes a large or cross-page mmio needs to be broken up into separate
* exits for userspace servicing.
@@ -682,6 +708,7 @@ void kvm_set_pfn_dirty(kvm_pfn_t pfn);
void kvm_set_pfn_accessed(kvm_pfn_t pfn);
void kvm_get_pfn(kvm_pfn_t pfn);
+void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache);
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int len);
int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
@@ -711,7 +738,13 @@ struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
+int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
+int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
+ struct gfn_to_pfn_cache *cache, bool atomic);
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
+void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
+int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
+ struct gfn_to_pfn_cache *cache, bool dirty, bool atomic);
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
@@ -966,7 +999,7 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn)
start = slot + 1;
}
- if (gfn >= memslots[start].base_gfn &&
+ if (start < slots->used_slots && gfn >= memslots[start].base_gfn &&
gfn < memslots[start].base_gfn + memslots[start].npages) {
atomic_set(&slots->lru_slot, start);
return &memslots[start];
@@ -1294,8 +1327,8 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
}
#endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
-int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
- unsigned long start, unsigned long end, bool blockable);
+void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+ unsigned long start, unsigned long end);
#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 8bf259dae9f6..a38729c8296f 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -32,7 +32,7 @@ struct kvm_memslots;
enum kvm_mr_change;
-#include <asm/types.h>
+#include <linux/types.h>
/*
* Address types:
@@ -63,4 +63,11 @@ struct gfn_to_hva_cache {
struct kvm_memory_slot *memslot;
};
+struct gfn_to_pfn_cache {
+ u64 generation;
+ gfn_t gfn;
+ kvm_pfn_t pfn;
+ bool dirty;
+};
+
#endif /* __KVM_TYPES_H__ */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 75a916d7ab2a..3d076aca7ac2 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -38,6 +38,7 @@
#include <linux/acpi.h>
#include <linux/cdrom.h>
#include <linux/sched.h>
+#include <linux/async.h>
/*
* Define if arch has non-standard setup. This is a _PCI_ standard
@@ -438,6 +439,7 @@ enum {
ATA_HORKAGE_NO_DMA_LOG = (1 << 23), /* don't use DMA for log read */
ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */
ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */
+ ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */
@@ -501,6 +503,7 @@ enum hsm_task_states {
};
enum ata_completion_errors {
+ AC_ERR_OK = 0, /* no error */
AC_ERR_DEV = (1 << 0), /* device reported error */
AC_ERR_HSM = (1 << 1), /* host state machine violation */
AC_ERR_TIMEOUT = (1 << 2), /* timeout */
@@ -887,6 +890,8 @@ struct ata_port {
struct timer_list fastdrain_timer;
unsigned long fastdrain_cnt;
+ async_cookie_t cookie;
+
int em_message_type;
void *private_data;
@@ -908,9 +913,9 @@ struct ata_port_operations {
/*
* Command execution
*/
- int (*qc_defer)(struct ata_queued_cmd *qc);
- int (*check_atapi_dma)(struct ata_queued_cmd *qc);
- void (*qc_prep)(struct ata_queued_cmd *qc);
+ int (*qc_defer)(struct ata_queued_cmd *qc);
+ int (*check_atapi_dma)(struct ata_queued_cmd *qc);
+ enum ata_completion_errors (*qc_prep)(struct ata_queued_cmd *qc);
unsigned int (*qc_issue)(struct ata_queued_cmd *qc);
bool (*qc_fill_rtf)(struct ata_queued_cmd *qc);
@@ -1177,7 +1182,7 @@ extern int ata_xfer_mode2shift(unsigned long xfer_mode);
extern const char *ata_mode_string(unsigned long xfer_mask);
extern unsigned long ata_id_xfermask(const u16 *id);
extern int ata_std_qc_defer(struct ata_queued_cmd *qc);
-extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
+extern enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc);
extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
unsigned int n_elem);
extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
@@ -1912,9 +1917,9 @@ extern const struct ata_port_operations ata_bmdma_port_ops;
.sg_tablesize = LIBATA_MAX_PRD, \
.dma_boundary = ATA_DMA_BOUNDARY
-extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc);
+extern enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc);
extern unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc);
-extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc);
+extern enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc);
extern unsigned int ata_bmdma_port_intr(struct ata_port *ap,
struct ata_queued_cmd *qc);
extern irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance);
diff --git a/include/linux/log2.h b/include/linux/log2.h
index 2af7f77866d0..78496801cddf 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -177,7 +177,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
#define roundup_pow_of_two(n) \
( \
__builtin_constant_p(n) ? ( \
- (n == 1) ? 1 : \
+ ((n) == 1) ? 1 : \
(1UL << (ilog2((n) - 1) + 1)) \
) : \
__roundup_pow_of_two(n) \
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index 1eb6f244588d..9a488497ebc2 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -21,11 +21,12 @@
#define MARVELL_PHY_ID_88E1545 0x01410ea0
#define MARVELL_PHY_ID_88E3016 0x01410e60
-/* The MV88e6390 Ethernet switch contains embedded PHYs. These PHYs do
+/* These Ethernet switch families contain embedded PHYs, but they do
* not have a model ID. So the switch driver traps reads to the ID2
* register and returns the switch family ID
*/
-#define MARVELL_PHY_ID_88E6390 0x01410f90
+#define MARVELL_PHY_ID_88E6341_FAMILY 0x01410f41
+#define MARVELL_PHY_ID_88E6390_FAMILY 0x01410f90
#define MARVELL_PHY_FAMILY_ID(id) ((id) >> 4)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index ae64fced188d..dc89a964c1f3 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -902,6 +902,7 @@ struct mlx5_cmd_work_ent {
struct delayed_work cb_timeout_work;
void *context;
int idx;
+ struct completion handling;
struct completion done;
struct mlx5_cmd *cmd;
struct work_struct work;
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 76b76b6aa83d..5f711b2983db 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -357,11 +357,11 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reserved_at_60[0x18];
u8 log_max_ft_num[0x8];
- u8 reserved_at_80[0x18];
+ u8 reserved_at_80[0x10];
+ u8 log_max_flow_counter[0x8];
u8 log_max_destination[0x8];
- u8 log_max_flow_counter[0x8];
- u8 reserved_at_a8[0x10];
+ u8 reserved_at_a0[0x18];
u8 log_max_flow[0x8];
u8 reserved_at_c0[0x40];
@@ -672,7 +672,14 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 swp[0x1];
u8 swp_csum[0x1];
u8 swp_lso[0x1];
- u8 reserved_at_23[0xd];
+ u8 cqe_checksum_full[0x1];
+ u8 tunnel_stateless_geneve_tx[0x1];
+ u8 tunnel_stateless_mpls_over_udp[0x1];
+ u8 tunnel_stateless_mpls_over_gre[0x1];
+ u8 tunnel_stateless_vxlan_gpe[0x1];
+ u8 tunnel_stateless_ipv4_over_vxlan[0x1];
+ u8 tunnel_stateless_ip_over_ip[0x1];
+ u8 reserved_at_2a[0x6];
u8 max_vxlan_udp_ports[0x8];
u8 reserved_at_38[0x6];
u8 max_geneve_opt_len[0x1];
@@ -8922,7 +8929,7 @@ struct mlx5_ifc_pbmc_reg_bits {
struct mlx5_ifc_bufferx_reg_bits buffer[10];
- u8 reserved_at_2e0[0x40];
+ u8 reserved_at_2e0[0x80];
};
struct mlx5_ifc_qtct_reg_bits {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 45f10f5896b7..f6ecf41aea83 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -601,7 +601,13 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
}
extern void kvfree(const void *addr);
+extern void kvfree_sensitive(const void *addr, size_t len);
+/*
+ * Mapcount of compound page as a whole, does not include mapped sub-pages.
+ *
+ * Must be called only for compound pages or any their tail sub-pages.
+ */
static inline int compound_mapcount(struct page *page)
{
VM_BUG_ON_PAGE(!PageCompound(page), page);
@@ -621,10 +627,16 @@ static inline void page_mapcount_reset(struct page *page)
int __page_mapcount(struct page *page);
+/*
+ * Mapcount of 0-order page; when compound sub-page, includes
+ * compound_mapcount().
+ *
+ * Result is undefined for pages which cannot be mapped into userspace.
+ * For example SLAB or special types of pages. See function page_has_type().
+ * They use this place in struct page differently.
+ */
static inline int page_mapcount(struct page *page)
{
- VM_BUG_ON_PAGE(PageSlab(page), page);
-
if (unlikely(PageCompound(page)))
return __page_mapcount(page);
return atomic_read(&page->_mapcount) + 1;
@@ -2167,7 +2179,7 @@ static inline void zero_resv_unavail(void) {}
extern void set_dma_reserve(unsigned long new_dma_reserve);
extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
- enum memmap_context, struct vmem_altmap *);
+ enum meminit_context, struct vmem_altmap *);
extern void setup_per_zone_wmarks(void);
extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void);
@@ -2549,6 +2561,15 @@ static inline vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma,
return VM_FAULT_NOPAGE;
}
+#ifndef io_remap_pfn_range
+static inline int io_remap_pfn_range(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn,
+ unsigned long size, pgprot_t prot)
+{
+ return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot));
+}
+#endif
+
static inline vm_fault_t vmf_error(int err)
{
if (err == -ENOMEM)
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 8ef330027b13..3f8e84a80b4a 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -227,7 +227,7 @@ struct mmc_queue_req;
* MMC Physical partitions
*/
struct mmc_part {
- unsigned int size; /* partition size (in bytes) */
+ u64 size; /* partition size (in bytes) */
unsigned int part_cfg; /* partition type */
char name[MAX_MMC_PART_NAME_LEN];
bool force_ro; /* to make boot parts RO by default */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index d6791e2df30a..fa02014eba8e 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -638,6 +638,8 @@ typedef struct pglist_data {
/*
* Must be held any time you expect node_start_pfn, node_present_pages
* or node_spanned_pages stay constant.
+ * Also synchronizes pgdat->first_deferred_pfn during deferred page
+ * init.
*
* pgdat_resize_lock() and pgdat_resize_unlock() are provided to
* manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG
@@ -757,10 +759,15 @@ bool zone_watermark_ok(struct zone *z, unsigned int order,
unsigned int alloc_flags);
bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
unsigned long mark, int classzone_idx);
-enum memmap_context {
- MEMMAP_EARLY,
- MEMMAP_HOTPLUG,
+/*
+ * Memory initialization context, use to differentiate memory added by
+ * the platform statically or via memory hotplug interface.
+ */
+enum meminit_context {
+ MEMINIT_EARLY,
+ MEMINIT_HOTPLUG,
};
+
extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
unsigned long size);
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 867db9b9384c..610cdf8082f2 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -299,7 +299,7 @@ struct pcmcia_device_id {
#define INPUT_DEVICE_ID_LED_MAX 0x0f
#define INPUT_DEVICE_ID_SND_MAX 0x07
#define INPUT_DEVICE_ID_FF_MAX 0x7f
-#define INPUT_DEVICE_ID_SW_MAX 0x0f
+#define INPUT_DEVICE_ID_SW_MAX 0x10
#define INPUT_DEVICE_ID_PROP_MAX 0x1f
#define INPUT_DEVICE_ID_MATCH_BUS 1
@@ -621,6 +621,10 @@ struct mips_cdmm_device_id {
/*
* MODULE_DEVICE_TABLE expects this struct to be called x86cpu_device_id.
* Although gcc seems to ignore this error, clang fails without this define.
+ *
+ * Note: The ordering of the struct is different from upstream because the
+ * static initializers in kernels < 5.7 still use C89 style while upstream
+ * has been converted to proper C99 initializers.
*/
#define x86cpu_device_id x86_cpu_id
struct x86_cpu_id {
@@ -629,6 +633,7 @@ struct x86_cpu_id {
__u16 model;
__u16 feature; /* bit index */
kernel_ulong_t driver_data;
+ __u16 steppings;
};
#define X86_FEATURE_MATCH(x) \
@@ -637,6 +642,7 @@ struct x86_cpu_id {
#define X86_VENDOR_ANY 0xffff
#define X86_FAMILY_ANY 0
#define X86_MODEL_ANY 0
+#define X86_STEPPING_ANY 0
#define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */
/*
diff --git a/include/linux/module.h b/include/linux/module.h
index 9915397715fc..008cfc08a664 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -359,6 +359,7 @@ struct module {
unsigned int num_gpl_syms;
const struct kernel_symbol *gpl_syms;
const s32 *gpl_crcs;
+ bool using_gplonly_symbols;
#ifdef CONFIG_UNUSED_SYMBOLS
/* unused exported symbols. */
@@ -529,34 +530,14 @@ struct module *find_module(const char *name);
struct symsearch {
const struct kernel_symbol *start, *stop;
const s32 *crcs;
- enum {
+ enum mod_license {
NOT_GPL_ONLY,
GPL_ONLY,
WILL_BE_GPL_ONLY,
- } licence;
+ } license;
bool unused;
};
-/*
- * Search for an exported symbol by name.
- *
- * Must be called with module_mutex held or preemption disabled.
- */
-const struct kernel_symbol *find_symbol(const char *name,
- struct module **owner,
- const s32 **crc,
- bool gplok,
- bool warn);
-
-/*
- * Walk the exported symbol table
- *
- * Must be called with module_mutex held or preemption disabled.
- */
-bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
- struct module *owner,
- void *data), void *data);
-
/* Returns 0 and fills in value, defined and namebuf, or -ERANGE if
symnum out of range. */
int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
@@ -604,7 +585,6 @@ static inline void __module_get(struct module *module)
#define symbol_put_addr(p) do { } while (0)
#endif /* CONFIG_MODULE_UNLOAD */
-int ref_module(struct module *a, struct module *b);
/* This is a #define so the string doesn't get put in every .o file */
#define module_name(mod) \
diff --git a/include/linux/msi.h b/include/linux/msi.h
index be8ec813dbfb..5dd171849a27 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -118,6 +118,12 @@ struct msi_desc {
list_for_each_entry((desc), dev_to_msi_list((dev)), list)
#define for_each_msi_entry_safe(desc, tmp, dev) \
list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
+#define for_each_msi_vector(desc, __irq, dev) \
+ for_each_msi_entry((desc), (dev)) \
+ if ((desc)->irq) \
+ for (__irq = (desc)->irq; \
+ __irq < ((desc)->irq + (desc)->nvec_used); \
+ __irq++)
#ifdef CONFIG_PCI_MSI
#define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
diff --git a/include/linux/mtd/pfow.h b/include/linux/mtd/pfow.h
index 122f3439e1af..c65d7a3be3c6 100644
--- a/include/linux/mtd/pfow.h
+++ b/include/linux/mtd/pfow.h
@@ -128,7 +128,7 @@ static inline void print_drs_error(unsigned dsr)
if (!(dsr & DSR_AVAILABLE))
printk(KERN_NOTICE"DSR.15: (0) Device not Available\n");
- if (prog_status & 0x03)
+ if ((prog_status & 0x03) == 0x03)
printk(KERN_NOTICE"DSR.9,8: (11) Attempt to program invalid "
"half with 41h command\n");
else if (prog_status & 0x02)
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index efb2345359bb..a4be6b2bcc35 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -24,15 +24,16 @@
#include <linux/of.h>
#include <linux/types.h>
+struct nand_chip;
struct nand_flash_dev;
/* Scan and identify a NAND device */
-int nand_scan_with_ids(struct mtd_info *mtd, int max_chips,
+int nand_scan_with_ids(struct nand_chip *chip, int max_chips,
struct nand_flash_dev *ids);
-static inline int nand_scan(struct mtd_info *mtd, int max_chips)
+static inline int nand_scan(struct nand_chip *chip, int max_chips)
{
- return nand_scan_with_ids(mtd, max_chips, NULL);
+ return nand_scan_with_ids(chip, max_chips, NULL);
}
/* Internal helper for board drivers which need to override command function */
@@ -1740,7 +1741,7 @@ int nand_write_data_op(struct nand_chip *chip, const void *buf,
*/
void nand_cleanup(struct nand_chip *chip);
/* Unregister the MTD device and calls nand_cleanup() */
-void nand_release(struct mtd_info *mtd);
+void nand_release(struct nand_chip *chip);
/* Default extended ID decoding function */
void nand_decode_ext_id(struct nand_chip *chip);
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 3093dd162424..8f7cdf83f359 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -184,7 +184,7 @@ extern void mutex_lock_io(struct mutex *lock);
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
-# define mutex_lock_io_nested(lock, subclass) mutex_lock(lock)
+# define mutex_lock_io_nested(lock, subclass) mutex_lock_io(lock)
#endif
/*
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 84bbdcbb199a..ca5f053c6b66 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -274,6 +274,7 @@ struct header_ops {
const struct net_device *dev,
const unsigned char *haddr);
bool (*validate)(const char *ll_header, unsigned int len);
+ __be16 (*parse_protocol)(const struct sk_buff *skb);
};
/* These flag bits are private to the generic network queueing
@@ -2620,14 +2621,6 @@ void netdev_freemem(struct net_device *dev);
void synchronize_net(void);
int init_dummy_netdev(struct net_device *dev);
-DECLARE_PER_CPU(int, xmit_recursion);
-#define XMIT_RECURSION_LIMIT 10
-
-static inline int dev_recursion_level(void)
-{
- return this_cpu_read(xmit_recursion);
-}
-
struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
@@ -2903,6 +2896,15 @@ static inline int dev_parse_header(const struct sk_buff *skb,
return dev->header_ops->parse(skb, haddr);
}
+static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb)
+{
+ const struct net_device *dev = skb->dev;
+
+ if (!dev->header_ops || !dev->header_ops->parse_protocol)
+ return 0;
+ return dev->header_ops->parse_protocol(skb);
+}
+
/* ll_header must have at least hard_header_len allocated */
static inline bool dev_validate_header(const struct net_device *dev,
char *ll_header, int len)
@@ -2967,6 +2969,11 @@ struct softnet_data {
#ifdef CONFIG_XFRM_OFFLOAD
struct sk_buff_head xfrm_backlog;
#endif
+ /* written and read only by owning cpu: */
+ struct {
+ u16 recursion;
+ u8 more;
+ } xmit;
#ifdef CONFIG_RPS
/* input_queue_head should be written by cpu owning this struct,
* and only read by other cpus. Worth using a cache line.
@@ -3002,6 +3009,28 @@ static inline void input_queue_tail_incr_save(struct softnet_data *sd,
DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
+static inline int dev_recursion_level(void)
+{
+ return this_cpu_read(softnet_data.xmit.recursion);
+}
+
+#define XMIT_RECURSION_LIMIT 8
+static inline bool dev_xmit_recursion(void)
+{
+ return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
+ XMIT_RECURSION_LIMIT);
+}
+
+static inline void dev_xmit_recursion_inc(void)
+{
+ __this_cpu_inc(softnet_data.xmit.recursion);
+}
+
+static inline void dev_xmit_recursion_dec(void)
+{
+ __this_cpu_dec(softnet_data.xmit.recursion);
+}
+
void __netif_schedule(struct Qdisc *q);
void netif_schedule_queue(struct netdev_queue *txq);
@@ -3947,6 +3976,7 @@ static inline void netif_tx_disable(struct net_device *dev)
local_bh_disable();
cpu = smp_processor_id();
+ spin_lock(&dev->tx_global_lock);
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
@@ -3954,6 +3984,7 @@ static inline void netif_tx_disable(struct net_device *dev)
netif_tx_stop_queue(txq);
__netif_tx_unlock(txq);
}
+ spin_unlock(&dev->tx_global_lock);
local_bh_enable();
}
@@ -4314,6 +4345,11 @@ static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
return ops->ndo_start_xmit(skb, dev);
}
+static inline bool netdev_xmit_more(void)
+{
+ return __this_cpu_read(softnet_data.xmit.more);
+}
+
static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, bool more)
{
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 72cb19c3db6a..9460a5635c90 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -300,7 +300,7 @@ NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
- list_del(&skb->list);
+ skb_list_del_init(skb);
if (nf_hook(pf, hook, net, sk, skb, in, out, okfn) == 1)
list_add_tail(&skb->list, &sublist);
}
diff --git a/include/linux/netfilter/nf_conntrack_pptp.h b/include/linux/netfilter/nf_conntrack_pptp.h
index 833a5b2255ea..ade993809ebc 100644
--- a/include/linux/netfilter/nf_conntrack_pptp.h
+++ b/include/linux/netfilter/nf_conntrack_pptp.h
@@ -5,7 +5,7 @@
#include <linux/netfilter/nf_conntrack_common.h>
-extern const char *const pptp_msg_name[];
+const char *pptp_msg_name(u_int16_t msg);
/* state of the control session */
enum pptp_ctrlsess_state {
diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h
index 9a33f171aa82..625f491b95de 100644
--- a/include/linux/netfilter/nf_conntrack_sctp.h
+++ b/include/linux/netfilter/nf_conntrack_sctp.h
@@ -9,6 +9,8 @@ struct ip_ct_sctp {
enum sctp_conntrack state;
__be32 vtag[IP_CT_DIR_MAX];
+ u8 last_dir;
+ u8 flags;
};
#endif /* _NF_CONNTRACK_SCTP_H */
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index cf09ab37b45b..e713476ff29d 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -43,8 +43,7 @@ int nfnetlink_has_listeners(struct net *net, unsigned int group);
int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
unsigned int group, int echo, gfp_t flags);
int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error);
-int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
- int flags);
+int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid);
static inline u16 nfnl_msg_type(u8 subsys, u8 msg_type)
{
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 9077b3ebea08..0ade4d1e4dd9 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -377,7 +377,7 @@ static inline unsigned int xt_write_recseq_begin(void)
* since addend is most likely 1
*/
__this_cpu_add(xt_recseq.sequence, addend);
- smp_wmb();
+ smp_mb();
return addend;
}
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index 95ab5cc64422..45ff1330b339 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -16,7 +16,7 @@ struct ip_rt_info {
u_int32_t mark;
};
-int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type);
+int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned addr_type);
struct nf_queue_entry;
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index c0dc4dd78887..47a2de582f57 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -36,7 +36,7 @@ struct nf_ipv6_ops {
};
#ifdef CONFIG_NETFILTER
-int ip6_route_me_harder(struct net *net, struct sk_buff *skb);
+int ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb);
__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u_int8_t protocol);
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index ad69430fd0eb..5162fc1533c2 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -142,6 +142,8 @@ extern void nfs_unlock_and_release_request(struct nfs_page *);
extern int nfs_page_group_lock(struct nfs_page *);
extern void nfs_page_group_unlock(struct nfs_page *);
extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
+extern int nfs_page_set_headlock(struct nfs_page *req);
+extern void nfs_page_clear_headlock(struct nfs_page *req);
extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *);
/*
diff --git a/include/linux/node.h b/include/linux/node.h
index 708939bae9aa..a79ec4492650 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -32,11 +32,13 @@ extern struct node *node_devices[];
typedef void (*node_registration_func_t)(struct node *);
#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA)
-extern int link_mem_sections(int nid, unsigned long start_pfn,
- unsigned long end_pfn);
+int link_mem_sections(int nid, unsigned long start_pfn,
+ unsigned long end_pfn,
+ enum meminit_context context);
#else
static inline int link_mem_sections(int nid, unsigned long start_pfn,
- unsigned long end_pfn)
+ unsigned long end_pfn,
+ enum meminit_context context)
{
return 0;
}
@@ -61,7 +63,8 @@ static inline int register_one_node(int nid)
if (error)
return error;
/* link memory sections under this node */
- error = link_mem_sections(nid, start_pfn, end_pfn);
+ error = link_mem_sections(nid, start_pfn, end_pfn,
+ MEMINIT_EARLY);
}
return error;
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index 2f3ae41c212d..496ff759f84c 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -282,8 +282,6 @@ struct nvme_fc_remote_port {
*
* Host/Initiator Transport Entrypoints/Parameters:
*
- * @module: The LLDD module using the interface
- *
* @localport_delete: The LLDD initiates deletion of a localport via
* nvme_fc_deregister_localport(). However, the teardown is
* asynchronous. This routine is called upon the completion of the
@@ -397,8 +395,6 @@ struct nvme_fc_remote_port {
* Value is Mandatory. Allowed to be zero.
*/
struct nvme_fc_port_template {
- struct module *module;
-
/* initiator-based functions */
void (*localport_delete)(struct nvme_fc_local_port *);
void (*remoteport_delete)(struct nvme_fc_remote_port *);
diff --git a/include/linux/of.h b/include/linux/of.h
index d4f14b0302b6..6429f00341d1 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -1258,6 +1258,7 @@ static inline int of_get_available_child_count(const struct device_node *np)
#define _OF_DECLARE(table, name, compat, fn, fn_type) \
static const struct of_device_id __of_table_##name \
__used __section(__##table##_of_table) \
+ __aligned(__alignof__(struct of_device_id)) \
= { .compatible = compat, \
.data = (fn == (fn_type)NULL) ? fn : fn }
#else
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 69864a547663..3f649be179da 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -45,6 +45,7 @@ struct oom_control {
};
extern struct mutex oom_lock;
+extern struct mutex oom_adj_mutex;
static inline void set_current_oom_origin(void)
{
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
index 15eb85de9226..4564a175e681 100644
--- a/include/linux/overflow.h
+++ b/include/linux/overflow.h
@@ -3,6 +3,7 @@
#define __LINUX_OVERFLOW_H
#include <linux/compiler.h>
+#include <linux/limits.h>
/*
* In the fallback code below, we need to compute the minimum and
diff --git a/include/linux/padata.h b/include/linux/padata.h
index 5d13d25da2c8..d803397a28f7 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -24,7 +24,6 @@
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/list.h>
-#include <linux/timer.h>
#include <linux/notifier.h>
#include <linux/kobject.h>
@@ -85,18 +84,14 @@ struct padata_serial_queue {
* @serial: List to wait for serialization after reordering.
* @pwork: work struct for parallelization.
* @swork: work struct for serialization.
- * @pd: Backpointer to the internal control structure.
* @work: work struct for parallelization.
- * @reorder_work: work struct for reordering.
* @num_obj: Number of objects that are processed by this cpu.
* @cpu_index: Index of the cpu.
*/
struct padata_parallel_queue {
struct padata_list parallel;
struct padata_list reorder;
- struct parallel_data *pd;
struct work_struct work;
- struct work_struct reorder_work;
atomic_t num_obj;
int cpu_index;
};
@@ -122,10 +117,10 @@ struct padata_cpumask {
* @reorder_objects: Number of objects waiting in the reorder queues.
* @refcnt: Number of objects holding a reference on this parallel_data.
* @max_seq_nr: Maximal used sequence number.
+ * @cpu: Next CPU to be processed.
* @cpumask: The cpumasks in use for parallel and serial workers.
+ * @reorder_work: work struct for reordering.
* @lock: Reorder lock.
- * @processed: Number of already processed objects.
- * @timer: Reorder timer.
*/
struct parallel_data {
struct padata_instance *pinst;
@@ -134,10 +129,10 @@ struct parallel_data {
atomic_t reorder_objects;
atomic_t refcnt;
atomic_t seq_nr;
+ int cpu;
struct padata_cpumask cpumask;
+ struct work_struct reorder_work;
spinlock_t lock ____cacheline_aligned;
- unsigned int processed;
- struct timer_list timer;
};
/**
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index 37dab8116901..931fda3e5e0d 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -69,6 +69,7 @@ struct pci_epc_ops {
* @bitmap: bitmap to manage the PCI address space
* @pages: number of bits representing the address region
* @page_size: size of each page
+ * @lock: mutex to protect bitmap
*/
struct pci_epc_mem {
phys_addr_t phys_base;
@@ -76,6 +77,8 @@ struct pci_epc_mem {
unsigned long *bitmap;
size_t page_size;
int pages;
+ /* mutex to protect against concurrent access for memory allocation*/
+ struct mutex lock;
};
/**
diff --git a/include/linux/pci.h b/include/linux/pci.h
index b1f297f4b7b0..2fda9893962d 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -373,6 +373,9 @@ struct pci_dev {
bool match_driver; /* Skip attaching driver */
unsigned int transparent:1; /* Subtractive decode bridge */
+ unsigned int io_window:1; /* Bridge has I/O window */
+ unsigned int pref_window:1; /* Bridge has pref mem window */
+ unsigned int pref_64_window:1; /* Pref mem window is 64-bit */
unsigned int multifunction:1; /* Multi-function device */
unsigned int is_busmaster:1; /* Is busmaster */
@@ -1141,7 +1144,6 @@ int pci_enable_rom(struct pci_dev *pdev);
void pci_disable_rom(struct pci_dev *pdev);
void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
-void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size);
/* Power management related routines */
int pci_save_state(struct pci_dev *dev);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index d157983b84cf..c0dd2f749d3f 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -117,6 +117,10 @@
#define PCI_CLASS_SERIAL_USB_DEVICE 0x0c03fe
#define PCI_CLASS_SERIAL_FIBER 0x0c04
#define PCI_CLASS_SERIAL_SMBUS 0x0c05
+#define PCI_CLASS_SERIAL_IPMI 0x0c07
+#define PCI_CLASS_SERIAL_IPMI_SMIC 0x0c0700
+#define PCI_CLASS_SERIAL_IPMI_KCS 0x0c0701
+#define PCI_CLASS_SERIAL_IPMI_BT 0x0c0702
#define PCI_BASE_CLASS_WIRELESS 0x0d
#define PCI_CLASS_WIRELESS_RF_CONTROLLER 0x0d10
@@ -144,6 +148,8 @@
/* Vendors and devices. Sort key: vendor first, device next. */
+#define PCI_VENDOR_ID_LOONGSON 0x0014
+
#define PCI_VENDOR_ID_TTTECH 0x0357
#define PCI_DEVICE_ID_TTTECH_MC322 0x000a
@@ -541,6 +547,11 @@
#define PCI_DEVICE_ID_AMD_16H_NB_F4 0x1534
#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F3 0x1583
#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F4 0x1584
+#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
+#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
+#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493
+#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443
+#define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
@@ -1133,6 +1144,8 @@
#define PCI_VENDOR_ID_TCONRAD 0x10da
#define PCI_DEVICE_ID_TCONRAD_TOKENRING 0x0508
+#define PCI_VENDOR_ID_ROHM 0x10db
+
#define PCI_VENDOR_ID_NVIDIA 0x10de
#define PCI_DEVICE_ID_NVIDIA_TNT 0x0020
#define PCI_DEVICE_ID_NVIDIA_TNT2 0x0028
@@ -1327,6 +1340,7 @@
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP78S_SMBUS 0x0752
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_SMBUS 0x07D8
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_320M 0x08A0
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS 0x0AA2
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA 0x0D85
@@ -1819,6 +1833,12 @@
#define PCI_VENDOR_ID_NVIDIA_SGS 0x12d2
#define PCI_DEVICE_ID_NVIDIA_SGS_RIVA128 0x0018
+#define PCI_VENDOR_ID_PERICOM 0x12D8
+#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951
+#define PCI_DEVICE_ID_PERICOM_PI7C9X7952 0x7952
+#define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954
+#define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958
+
#define PCI_SUBVENDOR_ID_CHASE_PCIFAST 0x12E0
#define PCI_SUBDEVICE_ID_CHASE_PCIFAST4 0x0031
#define PCI_SUBDEVICE_ID_CHASE_PCIFAST8 0x0021
@@ -1941,6 +1961,8 @@
#define PCI_VENDOR_ID_DIGIGRAM 0x1369
#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_SERIAL_SUBSYSTEM 0xc001
#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_CAE_SERIAL_SUBSYSTEM 0xc002
+#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ESE_SERIAL_SUBSYSTEM 0xc021
+#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ESE_CAE_SERIAL_SUBSYSTEM 0xc022
#define PCI_VENDOR_ID_KAWASAKI 0x136b
#define PCI_DEVICE_ID_MCHIP_KL5A72002 0xff01
@@ -2122,6 +2144,7 @@
#define PCI_VENDOR_ID_MYRICOM 0x14c1
#define PCI_VENDOR_ID_MEDIATEK 0x14c3
+#define PCI_DEVICE_ID_MEDIATEK_7629 0x7629
#define PCI_VENDOR_ID_TITAN 0x14D2
#define PCI_DEVICE_ID_TITAN_010L 0x8001
@@ -2354,6 +2377,12 @@
#define PCI_DEVICE_ID_CENATEK_IDE 0x0001
#define PCI_VENDOR_ID_SYNOPSYS 0x16c3
+#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd
+#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce
+#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31 0xabcf
+#define PCI_DEVICE_ID_SYNOPSYS_EDDA 0xedda
+
+#define PCI_VENDOR_ID_USR 0x16ec
#define PCI_VENDOR_ID_VITESSE 0x1725
#define PCI_DEVICE_ID_VITESSE_VSC7174 0x7174
@@ -2389,6 +2418,8 @@
#define PCI_DEVICE_ID_RDC_R6061 0x6061
#define PCI_DEVICE_ID_RDC_D1010 0x1010
+#define PCI_VENDOR_ID_GLI 0x17a0
+
#define PCI_VENDOR_ID_LENOVO 0x17aa
#define PCI_VENDOR_ID_QCOM 0x17cb
@@ -2539,8 +2570,6 @@
#define PCI_VENDOR_ID_HUAWEI 0x19e5
#define PCI_VENDOR_ID_NETRONOME 0x19ee
-#define PCI_DEVICE_ID_NETRONOME_NFP3200 0x3200
-#define PCI_DEVICE_ID_NETRONOME_NFP3240 0x3240
#define PCI_DEVICE_ID_NETRONOME_NFP4000 0x4000
#define PCI_DEVICE_ID_NETRONOME_NFP5000 0x5000
#define PCI_DEVICE_ID_NETRONOME_NFP6000 0x6000
@@ -2556,6 +2585,8 @@
#define PCI_VENDOR_ID_ASMEDIA 0x1b21
+#define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS 0x1c36
+
#define PCI_VENDOR_ID_CIRCUITCO 0x1cc8
#define PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD 0x0001
@@ -2981,6 +3012,7 @@
#define PCI_DEVICE_ID_INTEL_84460GX 0x84ea
#define PCI_DEVICE_ID_INTEL_IXP4XX 0x8500
#define PCI_DEVICE_ID_INTEL_IXP2800 0x9004
+#define PCI_DEVICE_ID_INTEL_VMD_9A0B 0x9a0b
#define PCI_DEVICE_ID_INTEL_S21152BB 0xb152
#define PCI_VENDOR_ID_SCALEMP 0x8686
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 4f052496cdfd..0a4f54dd4737 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -78,9 +78,9 @@ static inline s64 percpu_counter_read(struct percpu_counter *fbc)
*/
static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
{
- s64 ret = fbc->count;
+ /* Prevent reloads of fbc->count */
+ s64 ret = READ_ONCE(fbc->count);
- barrier(); /* Prevent reloads of fbc->count */
if (ret >= 0)
return ret;
return 0;
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index 3b12fd28af78..fc4df3ccefc9 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -220,10 +220,8 @@ struct pnp_card {
#define global_to_pnp_card(n) list_entry(n, struct pnp_card, global_list)
#define protocol_to_pnp_card(n) list_entry(n, struct pnp_card, protocol_list)
#define to_pnp_card(n) container_of(n, struct pnp_card, dev)
-#define pnp_for_each_card(card) \
- for((card) = global_to_pnp_card(pnp_cards.next); \
- (card) != global_to_pnp_card(&pnp_cards); \
- (card) = global_to_pnp_card((card)->global_list.next))
+#define pnp_for_each_card(card) \
+ list_for_each_entry(card, &pnp_cards, global_list)
struct pnp_card_link {
struct pnp_card *card;
@@ -276,14 +274,9 @@ struct pnp_dev {
#define card_to_pnp_dev(n) list_entry(n, struct pnp_dev, card_list)
#define protocol_to_pnp_dev(n) list_entry(n, struct pnp_dev, protocol_list)
#define to_pnp_dev(n) container_of(n, struct pnp_dev, dev)
-#define pnp_for_each_dev(dev) \
- for((dev) = global_to_pnp_dev(pnp_global.next); \
- (dev) != global_to_pnp_dev(&pnp_global); \
- (dev) = global_to_pnp_dev((dev)->global_list.next))
-#define card_for_each_dev(card,dev) \
- for((dev) = card_to_pnp_dev((card)->devices.next); \
- (dev) != card_to_pnp_dev(&(card)->devices); \
- (dev) = card_to_pnp_dev((dev)->card_list.next))
+#define pnp_for_each_dev(dev) list_for_each_entry(dev, &pnp_global, global_list)
+#define card_for_each_dev(card, dev) \
+ list_for_each_entry(dev, &(card)->devices, card_list)
#define pnp_dev_name(dev) (dev)->name
static inline void *pnp_get_drvdata(struct pnp_dev *pdev)
@@ -437,14 +430,10 @@ struct pnp_protocol {
};
#define to_pnp_protocol(n) list_entry(n, struct pnp_protocol, protocol_list)
-#define protocol_for_each_card(protocol,card) \
- for((card) = protocol_to_pnp_card((protocol)->cards.next); \
- (card) != protocol_to_pnp_card(&(protocol)->cards); \
- (card) = protocol_to_pnp_card((card)->protocol_list.next))
-#define protocol_for_each_dev(protocol,dev) \
- for((dev) = protocol_to_pnp_dev((protocol)->devices.next); \
- (dev) != protocol_to_pnp_dev(&(protocol)->devices); \
- (dev) = protocol_to_pnp_dev((dev)->protocol_list.next))
+#define protocol_for_each_card(protocol, card) \
+ list_for_each_entry(card, &(protocol)->cards, protocol_list)
+#define protocol_for_each_dev(protocol, dev) \
+ list_for_each_entry(dev, &(protocol)->devices, protocol_list)
extern struct bus_type pnp_bus_type;
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index d6355f49fbae..13d5dd4eb40b 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -49,7 +49,6 @@ struct bq27xxx_reg_cache {
int capacity;
int energy;
int flags;
- int power_avg;
int health;
};
diff --git a/include/linux/prandom.h b/include/linux/prandom.h
new file mode 100644
index 000000000000..cc1e71334e53
--- /dev/null
+++ b/include/linux/prandom.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * include/linux/prandom.h
+ *
+ * Include file for the fast pseudo-random 32-bit
+ * generation.
+ */
+#ifndef _LINUX_PRANDOM_H
+#define _LINUX_PRANDOM_H
+
+#include <linux/types.h>
+#include <linux/percpu.h>
+
+u32 prandom_u32(void);
+void prandom_bytes(void *buf, size_t nbytes);
+void prandom_seed(u32 seed);
+void prandom_reseed_late(void);
+
+#if BITS_PER_LONG == 64
+/*
+ * The core SipHash round function. Each line can be executed in
+ * parallel given enough CPU resources.
+ */
+#define PRND_SIPROUND(v0, v1, v2, v3) ( \
+ v0 += v1, v1 = rol64(v1, 13), v2 += v3, v3 = rol64(v3, 16), \
+ v1 ^= v0, v0 = rol64(v0, 32), v3 ^= v2, \
+ v0 += v3, v3 = rol64(v3, 21), v2 += v1, v1 = rol64(v1, 17), \
+ v3 ^= v0, v1 ^= v2, v2 = rol64(v2, 32) \
+)
+
+#define PRND_K0 (0x736f6d6570736575 ^ 0x6c7967656e657261)
+#define PRND_K1 (0x646f72616e646f6d ^ 0x7465646279746573)
+
+#elif BITS_PER_LONG == 32
+/*
+ * On 32-bit machines, we use HSipHash, a reduced-width version of SipHash.
+ * This is weaker, but 32-bit machines are not used for high-traffic
+ * applications, so there is less output for an attacker to analyze.
+ */
+#define PRND_SIPROUND(v0, v1, v2, v3) ( \
+ v0 += v1, v1 = rol32(v1, 5), v2 += v3, v3 = rol32(v3, 8), \
+ v1 ^= v0, v0 = rol32(v0, 16), v3 ^= v2, \
+ v0 += v3, v3 = rol32(v3, 7), v2 += v1, v1 = rol32(v1, 13), \
+ v3 ^= v0, v1 ^= v2, v2 = rol32(v2, 16) \
+)
+#define PRND_K0 0x6c796765
+#define PRND_K1 0x74656462
+
+#else
+#error Unsupported BITS_PER_LONG
+#endif
+
+struct rnd_state {
+ __u32 s1, s2, s3, s4;
+};
+
+u32 prandom_u32_state(struct rnd_state *state);
+void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
+void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
+
+#define prandom_init_once(pcpu_state) \
+ DO_ONCE(prandom_seed_full_state, (pcpu_state))
+
+/**
+ * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
+ * @ep_ro: right open interval endpoint
+ *
+ * Returns a pseudo-random number that is in interval [0, ep_ro). Note
+ * that the result depends on PRNG being well distributed in [0, ~0U]
+ * u32 space. Here we use maximally equidistributed combined Tausworthe
+ * generator, that is, prandom_u32(). This is useful when requesting a
+ * random index of an array containing ep_ro elements, for example.
+ *
+ * Returns: pseudo-random number in interval [0, ep_ro)
+ */
+static inline u32 prandom_u32_max(u32 ep_ro)
+{
+ return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
+}
+
+/*
+ * Handle minimum values for seeds
+ */
+static inline u32 __seed(u32 x, u32 m)
+{
+ return (x < m) ? x + m : x;
+}
+
+/**
+ * prandom_seed_state - set seed for prandom_u32_state().
+ * @state: pointer to state structure to receive the seed.
+ * @seed: arbitrary 64-bit value to use as a seed.
+ */
+static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
+{
+ u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
+
+ state->s1 = __seed(i, 2U);
+ state->s2 = __seed(i, 8U);
+ state->s3 = __seed(i, 16U);
+ state->s4 = __seed(i, 128U);
+}
+
+/* Pseudo random number generator from numerical recipes. */
+static inline u32 next_pseudo_random32(u32 seed)
+{
+ return seed * 1664525 + 1013904223;
+}
+
+#endif
diff --git a/include/linux/printk.h b/include/linux/printk.h
index cf3eccfe1543..6dd867e39365 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -206,7 +206,6 @@ __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...);
void dump_stack_print_info(const char *log_lvl);
void show_regs_print_info(const char *log_lvl);
extern asmlinkage void dump_stack(void) __cold;
-extern void printk_safe_init(void);
extern void printk_safe_flush(void);
extern void printk_safe_flush_on_panic(void);
#else
@@ -273,10 +272,6 @@ static inline asmlinkage void dump_stack(void)
{
}
-static inline void printk_safe_init(void)
-{
-}
-
static inline void printk_safe_flush(void)
{
}
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index d0e1f1522a78..5141657a0f7f 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -21,6 +21,7 @@ extern void proc_flush_task(struct task_struct *);
extern struct proc_dir_entry *proc_symlink(const char *,
struct proc_dir_entry *, const char *);
+struct proc_dir_entry *_proc_mkdir(const char *, umode_t, struct proc_dir_entry *, void *, bool);
extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
struct proc_dir_entry *, void *);
@@ -89,6 +90,11 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
static inline struct proc_dir_entry *proc_mkdir(const char *name,
struct proc_dir_entry *parent) {return NULL;}
static inline struct proc_dir_entry *proc_create_mount_point(const char *name) { return NULL; }
+static inline struct proc_dir_entry *_proc_mkdir(const char *name, umode_t mode,
+ struct proc_dir_entry *parent, void *data, bool force_lookup)
+{
+ return NULL;
+}
static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
@@ -121,7 +127,7 @@ struct net;
static inline struct proc_dir_entry *proc_net_mkdir(
struct net *net, const char *name, struct proc_dir_entry *parent)
{
- return proc_mkdir_data(name, 0, parent, net);
+ return _proc_mkdir(name, 0, parent, net, true);
}
struct ns_common;
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 2dd0a9ed5b36..6d15040c642c 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -97,6 +97,11 @@ struct qed_chain_u32 {
u32 cons_idx;
};
+struct addr_tbl_entry {
+ void *virt_addr;
+ dma_addr_t dma_map;
+};
+
struct qed_chain {
/* fastpath portion of the chain - required for commands such
* as produce / consume.
@@ -107,10 +112,11 @@ struct qed_chain {
/* Fastpath portions of the PBL [if exists] */
struct {
- /* Table for keeping the virtual addresses of the chain pages,
- * respectively to the physical addresses in the pbl table.
+ /* Table for keeping the virtual and physical addresses of the
+ * chain pages, respectively to the physical addresses
+ * in the pbl table.
*/
- void **pp_virt_addr_tbl;
+ struct addr_tbl_entry *pp_addr_tbl;
union {
struct qed_chain_pbl_u16 u16;
@@ -201,28 +207,34 @@ static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain)
static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
{
+ u16 elem_per_page = p_chain->elem_per_page;
+ u32 prod = p_chain->u.chain16.prod_idx;
+ u32 cons = p_chain->u.chain16.cons_idx;
u16 used;
- used = (u16) (((u32)0x10000 +
- (u32)p_chain->u.chain16.prod_idx) -
- (u32)p_chain->u.chain16.cons_idx);
+ if (prod < cons)
+ prod += (u32)U16_MAX + 1;
+
+ used = (u16)(prod - cons);
if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
- used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
- p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
+ used -= prod / elem_per_page - cons / elem_per_page;
return (u16)(p_chain->capacity - used);
}
static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
{
+ u16 elem_per_page = p_chain->elem_per_page;
+ u64 prod = p_chain->u.chain32.prod_idx;
+ u64 cons = p_chain->u.chain32.cons_idx;
u32 used;
- used = (u32) (((u64)0x100000000ULL +
- (u64)p_chain->u.chain32.prod_idx) -
- (u64)p_chain->u.chain32.cons_idx);
+ if (prod < cons)
+ prod += (u64)U32_MAX + 1;
+
+ used = (u32)(prod - cons);
if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
- used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
- p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
+ used -= (u32)(prod / elem_per_page - cons / elem_per_page);
return p_chain->capacity - used;
}
@@ -287,7 +299,7 @@ qed_chain_advance_page(struct qed_chain *p_chain,
*(u32 *)page_to_inc = 0;
page_index = *(u32 *)page_to_inc;
}
- *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index];
+ *p_next_elem = p_chain->pbl.pp_addr_tbl[page_index].virt_addr;
}
}
@@ -537,7 +549,7 @@ static inline void qed_chain_init_params(struct qed_chain *p_chain,
p_chain->pbl_sp.p_phys_table = 0;
p_chain->pbl_sp.p_virt_table = NULL;
- p_chain->pbl.pp_virt_addr_tbl = NULL;
+ p_chain->pbl.pp_addr_tbl = NULL;
}
/**
@@ -575,11 +587,11 @@ static inline void qed_chain_init_mem(struct qed_chain *p_chain,
static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
void *p_virt_pbl,
dma_addr_t p_phys_pbl,
- void **pp_virt_addr_tbl)
+ struct addr_tbl_entry *pp_addr_tbl)
{
p_chain->pbl_sp.p_phys_table = p_phys_pbl;
p_chain->pbl_sp.p_virt_table = p_virt_pbl;
- p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
+ p_chain->pbl.pp_addr_tbl = pp_addr_tbl;
}
/**
@@ -644,7 +656,7 @@ static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
break;
case QED_CHAIN_MODE_PBL:
last_page_idx = p_chain->page_cnt - 1;
- p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx];
+ p_virt_addr = p_chain->pbl.pp_addr_tbl[last_page_idx].virt_addr;
break;
}
/* p_virt_addr points at this stage to the last page of the chain */
@@ -716,7 +728,7 @@ static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
page_cnt = qed_chain_get_page_cnt(p_chain);
for (i = 0; i < page_cnt; i++)
- memset(p_chain->pbl.pp_virt_addr_tbl[i], 0,
+ memset(p_chain->pbl.pp_addr_tbl[i].virt_addr, 0,
QED_CHAIN_PAGE_SIZE);
}
diff --git a/include/linux/random.h b/include/linux/random.h
index 445a0ea4ff49..37209b3b22ae 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -106,61 +106,12 @@ declare_get_random_var_wait(long)
unsigned long randomize_page(unsigned long start, unsigned long range);
-u32 prandom_u32(void);
-void prandom_bytes(void *buf, size_t nbytes);
-void prandom_seed(u32 seed);
-void prandom_reseed_late(void);
-
-struct rnd_state {
- __u32 s1, s2, s3, s4;
-};
-
-u32 prandom_u32_state(struct rnd_state *state);
-void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
-void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
-
-#define prandom_init_once(pcpu_state) \
- DO_ONCE(prandom_seed_full_state, (pcpu_state))
-
-/**
- * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
- * @ep_ro: right open interval endpoint
- *
- * Returns a pseudo-random number that is in interval [0, ep_ro). Note
- * that the result depends on PRNG being well distributed in [0, ~0U]
- * u32 space. Here we use maximally equidistributed combined Tausworthe
- * generator, that is, prandom_u32(). This is useful when requesting a
- * random index of an array containing ep_ro elements, for example.
- *
- * Returns: pseudo-random number in interval [0, ep_ro)
- */
-static inline u32 prandom_u32_max(u32 ep_ro)
-{
- return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
-}
-
/*
- * Handle minimum values for seeds
+ * This is designed to be standalone for just prandom
+ * users, but for now we include it from <linux/random.h>
+ * for legacy reasons.
*/
-static inline u32 __seed(u32 x, u32 m)
-{
- return (x < m) ? x + m : x;
-}
-
-/**
- * prandom_seed_state - set seed for prandom_u32_state().
- * @state: pointer to state structure to receive the seed.
- * @seed: arbitrary 64-bit value to use as a seed.
- */
-static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
-{
- u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
-
- state->s1 = __seed(i, 2U);
- state->s2 = __seed(i, 8U);
- state->s3 = __seed(i, 16U);
- state->s4 = __seed(i, 128U);
-}
+#include <linux/prandom.h>
#ifdef CONFIG_ARCH_RANDOM
# include <asm/archrandom.h>
@@ -191,10 +142,4 @@ static inline bool arch_has_random_seed(void)
}
#endif
-/* Pseudo random number generator from numerical recipes. */
-static inline u32 next_pseudo_random32(u32 seed)
-{
- return seed * 1664525 + 1013904223;
-}
-
#endif /* _LINUX_RANDOM_H */
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 988d176472df..d7d6d4eb1794 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -214,7 +214,8 @@ struct page_vma_mapped_walk {
static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
{
- if (pvmw->pte)
+ /* HugeTLB pte is set to the relevant page table entry without pte_mapped. */
+ if (pvmw->pte && !PageHuge(pvmw->page))
pte_unmap(pvmw->pte);
if (pvmw->ptl)
spin_unlock(pvmw->ptl);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0530de9a4efc..5524cd5c6abe 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -887,8 +887,8 @@ struct task_struct {
struct seccomp seccomp;
/* Thread group tracking: */
- u32 parent_exec_id;
- u32 self_exec_id;
+ u64 parent_exec_id;
+ u64 self_exec_id;
/* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
spinlock_t alloc_lock;
@@ -996,6 +996,8 @@ struct task_struct {
#endif
struct list_head pi_state_list;
struct futex_pi_state *pi_state_cache;
+ struct mutex futex_exit_mutex;
+ unsigned int futex_state;
#endif
#ifdef CONFIG_PERF_EVENTS
struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
@@ -1377,7 +1379,6 @@ extern struct pid *cad_pid;
*/
#define PF_IDLE 0x00000002 /* I am an IDLE thread */
#define PF_EXITING 0x00000004 /* Getting shut down */
-#define PF_EXITPIDONE 0x00000008 /* PI exit done on shut down */
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
#define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
index ecdc6542070f..dfd82eab2902 100644
--- a/include/linux/sched/coredump.h
+++ b/include/linux/sched/coredump.h
@@ -72,6 +72,7 @@ static inline int get_dumpable(struct mm_struct *mm)
#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
#define MMF_OOM_VICTIM 25 /* mm is the oom victim */
#define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */
+#define MMF_MULTIPROCESS 27 /* mm is shared between processes */
#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index e9d4e389aed9..ef54f4b3f1e4 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -49,6 +49,8 @@ static inline void mmdrop(struct mm_struct *mm)
__mmdrop(mm);
}
+void mmdrop(struct mm_struct *mm);
+
/*
* This has to be called after a get_task_mm()/mmget_not_zero()
* followed by taking the mmap_sem for writing before modifying the
@@ -117,8 +119,10 @@ extern struct mm_struct *get_task_mm(struct task_struct *task);
* succeeds.
*/
extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
-/* Remove the current tasks stale references to the old mm_struct */
-extern void mm_release(struct task_struct *, struct mm_struct *);
+/* Remove the current tasks stale references to the old mm_struct on exit() */
+extern void exit_mm_release(struct task_struct *, struct mm_struct *);
+/* Remove the current tasks stale references to the old mm_struct on exec() */
+extern void exec_mm_release(struct task_struct *, struct mm_struct *);
#ifdef CONFIG_MEMCG
extern void mm_update_next_owner(struct mm_struct *mm);
@@ -163,7 +167,8 @@ static inline bool in_vfork(struct task_struct *tsk)
* another oom-unkillable task does this it should blame itself.
*/
rcu_read_lock();
- ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
+ ret = tsk->vfork_done &&
+ rcu_dereference(tsk->real_parent)->mm == tsk->mm;
rcu_read_unlock();
return ret;
diff --git a/include/linux/security.h b/include/linux/security.h
index d2240605edc4..454cc963d145 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -787,7 +787,7 @@ static inline int security_inode_killpriv(struct dentry *dentry)
static inline int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc)
{
- return -EOPNOTSUPP;
+ return cap_inode_getsecurity(inode, name, buffer, alloc);
}
static inline int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h
index aa5deb041c25..7cc952282e8b 100644
--- a/include/linux/seq_buf.h
+++ b/include/linux/seq_buf.h
@@ -30,7 +30,7 @@ static inline void seq_buf_clear(struct seq_buf *s)
}
static inline void
-seq_buf_init(struct seq_buf *s, unsigned char *buf, unsigned int size)
+seq_buf_init(struct seq_buf *s, char *buf, unsigned int size)
{
s->buffer = buf;
s->size = size;
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index bcf4cf26b8c8..a42a29952889 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -243,6 +243,13 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
* usual consistency guarantee. It is one wmb cheaper, because we can
* collapse the two back-to-back wmb()s.
*
+ * Note that, writes surrounding the barrier should be declared atomic (e.g.
+ * via WRITE_ONCE): a) to ensure the writes become visible to other threads
+ * atomically, avoiding compiler optimizations; b) to document which writes are
+ * meant to propagate to the reader critical section. This is necessary because
+ * neither writes before and after the barrier are enclosed in a seq-writer
+ * critical section that would ensure readers are aware of ongoing writes.
+ *
* seqcount_t seq;
* bool X = true, Y = false;
*
@@ -262,11 +269,11 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
*
* void write(void)
* {
- * Y = true;
+ * WRITE_ONCE(Y, true);
*
* raw_write_seqcount_barrier(seq);
*
- * X = false;
+ * WRITE_ONCE(X, false);
* }
*/
static inline void raw_write_seqcount_barrier(seqcount_t *s)
diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h
index 2a986d282a97..a0e15e7b0b99 100644
--- a/include/linux/set_memory.h
+++ b/include/linux/set_memory.h
@@ -18,7 +18,7 @@ static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
#endif
#ifndef set_mce_nospec
-static inline int set_mce_nospec(unsigned long pfn)
+static inline int set_mce_nospec(unsigned long pfn, bool unmap)
{
return 0;
}
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 25407c206e73..06176ef2a842 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1363,6 +1363,11 @@ static inline void skb_mark_not_on_list(struct sk_buff *skb)
skb->next = NULL;
}
+/* Iterate through singly-linked GSO fragments of an skb. */
+#define skb_list_walk_safe(first, skb, next_skb) \
+ for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \
+ (skb) = (next_skb), (next_skb) = (skb) ? (skb)->next : NULL)
+
static inline void skb_list_del_init(struct sk_buff *skb)
{
__list_del_entry(&skb->list);
@@ -1689,6 +1694,18 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
}
/**
+ * skb_queue_len_lockless - get queue length
+ * @list_: list to measure
+ *
+ * Return the length of an &sk_buff queue.
+ * This variant can be used in lockless contexts.
+ */
+static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_)
+{
+ return READ_ONCE(list_->qlen);
+}
+
+/**
* __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
* @list: queue to initialize
*
@@ -1895,7 +1912,7 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
{
struct sk_buff *next, *prev;
- list->qlen--;
+ WRITE_ONCE(list->qlen, list->qlen - 1);
next = skb->next;
prev = skb->prev;
skb->next = skb->prev = NULL;
@@ -3014,8 +3031,9 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len)
* is untouched. Otherwise it is extended. Returns zero on
* success. The skb is freed on error if @free_on_error is true.
*/
-static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
- bool free_on_error)
+static inline int __must_check __skb_put_padto(struct sk_buff *skb,
+ unsigned int len,
+ bool free_on_error)
{
unsigned int size = skb->len;
@@ -3038,7 +3056,7 @@ static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
* is untouched. Otherwise it is extended. Returns zero on
* success. The skb is freed on error.
*/
-static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
+static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int len)
{
return __skb_put_padto(skb, len, true);
}
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 9fb239e12b82..6bb7f07bc1dd 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -53,7 +53,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
smp_call_func_t func, void *info, bool wait,
gfp_t gfp_flags);
-int smp_call_function_single_async(int cpu, call_single_data_t *csd);
+int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
#ifdef CONFIG_SMP
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index a64235e05321..16158fe097a8 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -450,6 +450,9 @@ struct spi_controller {
#define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */
+ /* flag indicating this is a non-devres managed controller */
+ bool devm_allocated;
+
/* flag indicating this is an SPI slave controller */
bool slave;
@@ -634,6 +637,25 @@ static inline struct spi_controller *spi_alloc_slave(struct device *host,
return __spi_alloc_controller(host, size, true);
}
+struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
+ unsigned int size,
+ bool slave);
+
+static inline struct spi_controller *devm_spi_alloc_master(struct device *dev,
+ unsigned int size)
+{
+ return __devm_spi_alloc_controller(dev, size, false);
+}
+
+static inline struct spi_controller *devm_spi_alloc_slave(struct device *dev,
+ unsigned int size)
+{
+ if (!IS_ENABLED(CONFIG_SPI_SLAVE))
+ return NULL;
+
+ return __devm_spi_alloc_controller(dev, size, true);
+}
+
extern int spi_register_controller(struct spi_controller *ctlr);
extern int devm_spi_register_controller(struct device *dev,
struct spi_controller *ctlr);
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index 6d3635c86dbe..ccdaa8fd5657 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -138,7 +138,7 @@ int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
const struct cpumask *cpus);
#else /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
-static inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
+static __always_inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
const struct cpumask *cpus)
{
unsigned long flags;
@@ -149,14 +149,15 @@ static inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
return ret;
}
-static inline int stop_machine(cpu_stop_fn_t fn, void *data,
- const struct cpumask *cpus)
+static __always_inline int
+stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
{
return stop_machine_cpuslocked(fn, data, cpus);
}
-static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
- const struct cpumask *cpus)
+static __always_inline int
+stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
+ const struct cpumask *cpus)
{
return stop_machine(fn, data, cpus);
}
diff --git a/include/linux/string.h b/include/linux/string.h
index f58e1ef76572..1e0c442b941e 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -31,6 +31,10 @@ size_t strlcpy(char *, const char *, size_t);
#ifndef __HAVE_ARCH_STRSCPY
ssize_t strscpy(char *, const char *, size_t);
#endif
+
+/* Wraps calls to strscpy()/memset(), no arch specific code required */
+ssize_t strscpy_pad(char *dest, const char *src, size_t count);
+
#ifndef __HAVE_ARCH_STRCAT
extern char * strcat(char *, const char *);
#endif
@@ -239,6 +243,31 @@ void __read_overflow3(void) __compiletime_error("detected read beyond size of ob
void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter");
#if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
+
+#ifdef CONFIG_KASAN
+extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr);
+extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp);
+extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
+extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove);
+extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset);
+extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat);
+extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy);
+extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen);
+extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat);
+extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy);
+#else
+#define __underlying_memchr __builtin_memchr
+#define __underlying_memcmp __builtin_memcmp
+#define __underlying_memcpy __builtin_memcpy
+#define __underlying_memmove __builtin_memmove
+#define __underlying_memset __builtin_memset
+#define __underlying_strcat __builtin_strcat
+#define __underlying_strcpy __builtin_strcpy
+#define __underlying_strlen __builtin_strlen
+#define __underlying_strncat __builtin_strncat
+#define __underlying_strncpy __builtin_strncpy
+#endif
+
__FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
{
size_t p_size = __builtin_object_size(p, 0);
@@ -246,14 +275,14 @@ __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
__write_overflow();
if (p_size < size)
fortify_panic(__func__);
- return __builtin_strncpy(p, q, size);
+ return __underlying_strncpy(p, q, size);
}
__FORTIFY_INLINE char *strcat(char *p, const char *q)
{
size_t p_size = __builtin_object_size(p, 0);
if (p_size == (size_t)-1)
- return __builtin_strcat(p, q);
+ return __underlying_strcat(p, q);
if (strlcat(p, q, p_size) >= p_size)
fortify_panic(__func__);
return p;
@@ -267,7 +296,7 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p)
/* Work around gcc excess stack consumption issue */
if (p_size == (size_t)-1 ||
(__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0'))
- return __builtin_strlen(p);
+ return __underlying_strlen(p);
ret = strnlen(p, p_size);
if (p_size <= ret)
fortify_panic(__func__);
@@ -300,7 +329,7 @@ __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
__write_overflow();
if (len >= p_size)
fortify_panic(__func__);
- __builtin_memcpy(p, q, len);
+ __underlying_memcpy(p, q, len);
p[len] = '\0';
}
return ret;
@@ -313,12 +342,12 @@ __FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count)
size_t p_size = __builtin_object_size(p, 0);
size_t q_size = __builtin_object_size(q, 0);
if (p_size == (size_t)-1 && q_size == (size_t)-1)
- return __builtin_strncat(p, q, count);
+ return __underlying_strncat(p, q, count);
p_len = strlen(p);
copy_len = strnlen(q, count);
if (p_size < p_len + copy_len + 1)
fortify_panic(__func__);
- __builtin_memcpy(p + p_len, q, copy_len);
+ __underlying_memcpy(p + p_len, q, copy_len);
p[p_len + copy_len] = '\0';
return p;
}
@@ -330,7 +359,7 @@ __FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size)
__write_overflow();
if (p_size < size)
fortify_panic(__func__);
- return __builtin_memset(p, c, size);
+ return __underlying_memset(p, c, size);
}
__FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size)
@@ -345,7 +374,7 @@ __FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size)
}
if (p_size < size || q_size < size)
fortify_panic(__func__);
- return __builtin_memcpy(p, q, size);
+ return __underlying_memcpy(p, q, size);
}
__FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size)
@@ -360,7 +389,7 @@ __FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size)
}
if (p_size < size || q_size < size)
fortify_panic(__func__);
- return __builtin_memmove(p, q, size);
+ return __underlying_memmove(p, q, size);
}
extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan);
@@ -386,7 +415,7 @@ __FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size)
}
if (p_size < size || q_size < size)
fortify_panic(__func__);
- return __builtin_memcmp(p, q, size);
+ return __underlying_memcmp(p, q, size);
}
__FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size)
@@ -396,7 +425,7 @@ __FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size)
__read_overflow();
if (p_size < size)
fortify_panic(__func__);
- return __builtin_memchr(p, c, size);
+ return __underlying_memchr(p, c, size);
}
void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv);
@@ -427,11 +456,22 @@ __FORTIFY_INLINE char *strcpy(char *p, const char *q)
size_t p_size = __builtin_object_size(p, 0);
size_t q_size = __builtin_object_size(q, 0);
if (p_size == (size_t)-1 && q_size == (size_t)-1)
- return __builtin_strcpy(p, q);
+ return __underlying_strcpy(p, q);
memcpy(p, q, strlen(q) + 1);
return p;
}
+/* Don't use these outside the FORITFY_SOURCE implementation */
+#undef __underlying_memchr
+#undef __underlying_memcmp
+#undef __underlying_memcpy
+#undef __underlying_memmove
+#undef __underlying_memset
+#undef __underlying_strcat
+#undef __underlying_strcpy
+#undef __underlying_strlen
+#undef __underlying_strncat
+#undef __underlying_strncpy
#endif
/**
diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h
index 5ac5db4d295f..566d5f547567 100644
--- a/include/linux/sunrpc/gss_api.h
+++ b/include/linux/sunrpc/gss_api.h
@@ -83,6 +83,7 @@ struct pf_desc {
u32 service;
char *name;
char *auth_domain_name;
+ struct auth_domain *domain;
bool datatouch;
};
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index fd78f78df5c6..3e3214cca447 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -159,6 +159,7 @@ extern bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma);
extern void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
struct svc_rdma_recv_ctxt *ctxt);
extern void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma);
+extern void svc_rdma_release_rqst(struct svc_rqst *rqstp);
extern int svc_rdma_recvfrom(struct svc_rqst *);
/* svc_rdma_rw.c */
diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h
index a4528b26c8aa..d229d27ab19e 100644
--- a/include/linux/sunrpc/svcauth_gss.h
+++ b/include/linux/sunrpc/svcauth_gss.h
@@ -21,7 +21,8 @@ int gss_svc_init(void);
void gss_svc_shutdown(void);
int gss_svc_init_net(struct net *net);
void gss_svc_shutdown_net(struct net *net);
-int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name);
+struct auth_domain *svcauth_gss_register_pseudoflavor(u32 pseudoflavor,
+ char *name);
u32 svcauth_gss_flavor(struct auth_domain *dom);
#endif /* __KERNEL__ */
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 2bd68177a442..33580cc72a43 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -26,8 +26,7 @@ struct rpc_rqst;
#define XDR_QUADLEN(l) (((l) + 3) >> 2)
/*
- * Generic opaque `network object.' At the kernel level, this type
- * is used only by lockd.
+ * Generic opaque `network object.'
*/
#define XDR_MAX_NETOBJ 1024
struct xdr_netobj {
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index e7bbd82908b1..69fed13e633b 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -317,6 +317,7 @@ struct xprt_class {
struct rpc_xprt * (*setup)(struct xprt_create *);
struct module *owner;
char name[32];
+ const char * netid[];
};
/*
diff --git a/include/linux/swab.h b/include/linux/swab.h
index e466fd159c85..bcff5149861a 100644
--- a/include/linux/swab.h
+++ b/include/linux/swab.h
@@ -7,6 +7,7 @@
# define swab16 __swab16
# define swab32 __swab32
# define swab64 __swab64
+# define swab __swab
# define swahw32 __swahw32
# define swahb32 __swahb32
# define swab16p __swab16p
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 22af9d8a84ae..28d572b7ea73 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -368,7 +368,8 @@ static inline void num_poisoned_pages_inc(void)
}
#endif
-#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION)
+#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) || \
+ defined(CONFIG_DEVICE_PRIVATE)
static inline int non_swap_entry(swp_entry_t entry)
{
return swp_type(entry) >= MAX_SWAPFILES;
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 987cefa337de..1cd7bad56075 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -299,6 +299,11 @@ static inline void sysfs_enable_ns(struct kernfs_node *kn)
return kernfs_enable_ns(kn);
}
+__printf(2, 3)
+int sysfs_emit(char *buf, const char *fmt, ...);
+__printf(3, 4)
+int sysfs_emit_at(char *buf, int at, const char *fmt, ...);
+
#else /* CONFIG_SYSFS */
static inline int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
@@ -505,6 +510,17 @@ static inline void sysfs_enable_ns(struct kernfs_node *kn)
{
}
+__printf(2, 3)
+static inline int sysfs_emit(char *buf, const char *fmt, ...)
+{
+ return 0;
+}
+
+__printf(3, 4)
+static inline int sysfs_emit_at(char *buf, int at, const char *fmt, ...)
+{
+ return 0;
+}
#endif /* CONFIG_SYSFS */
static inline int __must_check sysfs_create_file(struct kobject *kobj,
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 4374196b98ea..1192f1e76015 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -225,6 +225,8 @@ struct tcp_sock {
} rack;
u16 advmss; /* Advertised MSS */
u8 compressed_ack;
+ u8 tlp_retrans:1, /* TLP is a retransmission */
+ unused_1:7;
u32 chrono_start; /* Start time in jiffies of a TCP chrono */
u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
u8 chrono_type:2, /* current chronograph type */
@@ -247,7 +249,7 @@ struct tcp_sock {
save_syn:1, /* Save headers of SYN packet */
is_cwnd_limited:1,/* forward progress limited by snd_cwnd? */
syn_smc:1; /* SYN includes SMC */
- u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */
+ u32 tlp_high_seq; /* snd_nxt at the time of TLP */
/* RTT measurement */
u64 tcp_mstamp; /* most recent packet received/sent */
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 8d8821b3689a..62dbecfe9132 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/bug.h>
#include <linux/restart_block.h>
+#include <linux/errno.h>
#ifdef CONFIG_THREAD_INFO_IN_TASK
/*
@@ -39,6 +40,18 @@ enum {
#ifdef __KERNEL__
+#ifndef arch_set_restart_data
+#define arch_set_restart_data(restart) do { } while (0)
+#endif
+
+static inline long set_restart_fn(struct restart_block *restart,
+ long (*fn)(struct restart_block *))
+{
+ restart->fn = fn;
+ arch_set_restart_data(restart);
+ return -ERESTART_RESTARTBLOCK;
+}
+
#ifndef THREAD_ALIGN
#define THREAD_ALIGN THREAD_SIZE
#endif
diff --git a/include/linux/time64.h b/include/linux/time64.h
index 4a45aea0f96e..8dbdf6cae3e8 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -138,6 +138,10 @@ static inline bool timespec64_valid_settod(const struct timespec64 *ts)
*/
static inline s64 timespec64_to_ns(const struct timespec64 *ts)
{
+ /* Prevent multiplication overflow */
+ if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
+ return KTIME_MAX;
+
return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
}
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
index 6609b39a7232..6db257466af6 100644
--- a/include/linux/trace_seq.h
+++ b/include/linux/trace_seq.h
@@ -12,7 +12,7 @@
*/
struct trace_seq {
- unsigned char buffer[PAGE_SIZE];
+ char buffer[PAGE_SIZE];
struct seq_buf seq;
int full;
};
@@ -51,7 +51,7 @@ static inline int trace_seq_used(struct trace_seq *s)
* that is about to be written to and then return the result
* of that write.
*/
-static inline unsigned char *
+static inline char *
trace_seq_buffer_ptr(struct trace_seq *s)
{
return s->buffer + seq_buf_used(&s->seq);
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index e9de8ad0bad7..444aa73037f1 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -364,7 +364,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
static const char *___tp_str __tracepoint_string = str; \
___tp_str; \
})
-#define __tracepoint_string __attribute__((section("__tracepoint_str")))
+#define __tracepoint_string __attribute__((section("__tracepoint_str"), used))
#else
/*
* tracepoint_string() is used to save the string address for userspace
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 248a137112e8..d808ab9c9aff 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -66,7 +66,7 @@ struct tty_buffer {
int read;
int flags;
/* Data points here */
- unsigned long data[0];
+ unsigned long data[];
};
/* Values for .flags field of tty_buffer */
@@ -306,6 +306,10 @@ struct tty_struct {
struct termiox *termiox; /* May be NULL for unsupported */
char name[64];
struct pid *pgrp; /* Protected by ctrl lock */
+ /*
+ * Writes protected by both ctrl lock and legacy mutex, readers must use
+ * at least one of them.
+ */
struct pid *session;
unsigned long flags;
int count;
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 71dbc891851a..e10b09672345 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -236,7 +236,7 @@
*
* Called when the device receives a TIOCGICOUNT ioctl. Passed a kernel
* structure to complete. This method is optional and will only be called
- * if provided (otherwise EINVAL will be returned).
+ * if provided (otherwise ENOTTY will be returned).
*/
#include <linux/export.h>
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index a27604f99ed0..11096b561dab 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -69,12 +69,13 @@ struct u64_stats_sync {
};
+#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
+#define u64_stats_init(syncp) seqcount_init(&(syncp)->seq)
+#else
static inline void u64_stats_init(struct u64_stats_sync *syncp)
{
-#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
- seqcount_init(&syncp->seq);
-#endif
}
+#endif
static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
{
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index efe79c1cdd47..db9b0dd0a7a3 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -243,6 +243,17 @@ extern long probe_kernel_read(void *dst, const void *src, size_t size);
extern long __probe_kernel_read(void *dst, const void *src, size_t size);
/*
+ * probe_user_read(): safely attempt to read from a location in user space
+ * @dst: pointer to the buffer that shall take the data
+ * @src: address to read from
+ * @size: size of the data chunk
+ *
+ * Safely read from address @src to the buffer at @dst. If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+extern long probe_user_read(void *dst, const void __user *src, size_t size);
+
+/*
* probe_kernel_write(): safely attempt to write to a location
* @dst: address to write to
* @src: pointer to the data that shall be written
@@ -254,7 +265,22 @@ extern long __probe_kernel_read(void *dst, const void *src, size_t size);
extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
+/*
+ * probe_user_write(): safely attempt to write to a location in user space
+ * @dst: address to write to
+ * @src: pointer to the data that shall be written
+ * @size: size of the data chunk
+ *
+ * Safely write to address @dst from the buffer at @src. If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+extern long notrace probe_user_write(void __user *dst, const void *src, size_t size);
+extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size);
+
extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
+extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr,
+ long count);
+extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count);
/**
* probe_kernel_address(): safely attempt to read from a location
@@ -267,7 +293,7 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
probe_kernel_read(&retval, addr, sizeof(retval))
#ifndef user_access_begin
-#define user_access_begin() do { } while (0)
+#define user_access_begin(type, ptr, len) access_ok(type, ptr, len)
#define user_access_end() do { } while (0)
#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 8675e145ea8b..2040696d75b6 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -249,6 +249,9 @@ int usb_function_activate(struct usb_function *);
int usb_interface_id(struct usb_configuration *, struct usb_function *);
+int config_ep_by_speed_and_alt(struct usb_gadget *g, struct usb_function *f,
+ struct usb_ep *_ep, u8 alt);
+
int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f,
struct usb_ep *_ep);
diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h
index a15ce99dfc2d..78e006355557 100644
--- a/include/linux/usb/ehci_def.h
+++ b/include/linux/usb/ehci_def.h
@@ -151,7 +151,7 @@ struct ehci_regs {
#define PORT_OWNER (1<<13) /* true: companion hc owns this port */
#define PORT_POWER (1<<12) /* true: has power (see PPC) */
#define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */
-/* 11:10 for detecting lowspeed devices (reset vs release ownership) */
+#define PORT_LS_MASK (3<<10) /* Link status (SE0, K or J */
/* 9 reserved */
#define PORT_LPM (1<<9) /* LPM transaction */
#define PORT_RESET (1<<8) /* reset port */
diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
index f2162e0fe531..bdf4c88d2aa0 100644
--- a/include/linux/usb/pd.h
+++ b/include/linux/usb/pd.h
@@ -451,6 +451,7 @@ static inline unsigned int rdo_max_power(u32 rdo)
#define PD_T_ERROR_RECOVERY 100 /* minimum 25 is insufficient */
#define PD_T_SRCSWAPSTDBY 625 /* Maximum of 650ms */
#define PD_T_NEWSRC 250 /* Maximum of 275ms */
+#define PD_T_SWAP_SRC_START 20 /* Minimum of 20ms */
#define PD_T_DRP_TRY 100 /* 75 - 150 ms */
#define PD_T_DRP_TRYWAIT 600 /* 400 - 800 ms */
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index 000a5954b2e8..0a958c794832 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -84,6 +84,10 @@
/* Cannot handle REPORT_LUNS */ \
US_FLAG(ALWAYS_SYNC, 0x20000000) \
/* lies about caching, so always sync */ \
+ US_FLAG(NO_SAME, 0x40000000) \
+ /* Cannot handle WRITE_SAME */ \
+ US_FLAG(SENSE_AFTER_SYNC, 0x80000000) \
+ /* Do REQUEST_SENSE after SYNCHRONIZE_CACHE */ \
#define US_FLAG(name, value) US_FL_##name = value ,
enum { US_DO_ALL_FLAGS };
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index e0348cb0a1dd..a1829139ff4a 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -3,6 +3,8 @@
#define _LINUX_VIRTIO_NET_H
#include <linux/if_vlan.h>
+#include <uapi/linux/tcp.h>
+#include <uapi/linux/udp.h>
#include <uapi/linux/virtio_net.h>
static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
@@ -28,17 +30,26 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
bool little_endian)
{
unsigned int gso_type = 0;
+ unsigned int thlen = 0;
+ unsigned int p_off = 0;
+ unsigned int ip_proto;
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
gso_type = SKB_GSO_TCPV4;
+ ip_proto = IPPROTO_TCP;
+ thlen = sizeof(struct tcphdr);
break;
case VIRTIO_NET_HDR_GSO_TCPV6:
gso_type = SKB_GSO_TCPV6;
+ ip_proto = IPPROTO_TCP;
+ thlen = sizeof(struct tcphdr);
break;
case VIRTIO_NET_HDR_GSO_UDP:
gso_type = SKB_GSO_UDP;
+ ip_proto = IPPROTO_UDP;
+ thlen = sizeof(struct udphdr);
break;
default:
return -EINVAL;
@@ -51,22 +62,36 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
return -EINVAL;
}
+ skb_reset_mac_header(skb);
+
if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
u16 start = __virtio16_to_cpu(little_endian, hdr->csum_start);
u16 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
if (!skb_partial_csum_set(skb, start, off))
return -EINVAL;
+
+ p_off = skb_transport_offset(skb) + thlen;
+ if (p_off > skb_headlen(skb))
+ return -EINVAL;
} else {
/* gso packets without NEEDS_CSUM do not set transport_offset.
* probe and drop if does not match one of the above types.
*/
if (gso_type && skb->network_header) {
- if (!skb->protocol)
+ struct flow_keys_basic keys;
+
+ if (!skb->protocol) {
+ __be16 protocol = dev_parse_header_protocol(skb);
+
virtio_net_hdr_set_proto(skb, hdr);
+ if (protocol && protocol != skb->protocol)
+ return -EINVAL;
+ }
retry:
- skb_probe_transport_header(skb, -1);
- if (!skb_transport_header_was_set(skb)) {
+ if (!skb_flow_dissect_flow_keys_basic(skb, &keys,
+ NULL, 0, 0, 0,
+ 0)) {
/* UFO does not specify ipv4 or 6: try both */
if (gso_type & SKB_GSO_UDP &&
skb->protocol == htons(ETH_P_IP)) {
@@ -75,18 +100,33 @@ retry:
}
return -EINVAL;
}
+
+ p_off = keys.control.thoff + thlen;
+ if (p_off > skb_headlen(skb) ||
+ keys.basic.ip_proto != ip_proto)
+ return -EINVAL;
+
+ skb_set_transport_header(skb, keys.control.thoff);
+ } else if (gso_type) {
+ p_off = thlen;
+ if (p_off > skb_headlen(skb))
+ return -EINVAL;
}
}
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size);
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
- skb_shinfo(skb)->gso_size = gso_size;
- skb_shinfo(skb)->gso_type = gso_type;
+ /* Too small packets are not really GSO ones. */
+ if (skb->len - p_off > gso_size) {
+ shinfo->gso_size = gso_size;
+ shinfo->gso_type = gso_type;
- /* Header must be checked, and gso_segs computed. */
- skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
- skb_shinfo(skb)->gso_segs = 0;
+ /* Header must be checked, and gso_segs computed. */
+ shinfo->gso_type |= SKB_GSO_DODGY;
+ shinfo->gso_segs = 0;
+ }
}
return 0;
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index e223e2632edd..8b8d13f01cae 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -149,7 +149,8 @@ virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
void virtio_transport_destruct(struct vsock_sock *vsk);
-void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt);
+void virtio_transport_recv_pkt(struct virtio_transport *t,
+ struct virtio_vsock_pkt *pkt);
void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt);
void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt);
u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted);
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 6ae8dd1d784f..206957b1b54d 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -103,7 +103,7 @@ extern void vunmap(const void *addr);
extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
unsigned long uaddr, void *kaddr,
- unsigned long size);
+ unsigned long pgoff, unsigned long size);
extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long pgoff);
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index 6dad031be3c2..3a71ad716da5 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -51,8 +51,10 @@ ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
int __vfs_setxattr(struct dentry *, struct inode *, const char *, const void *, size_t, int);
int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
+int __vfs_setxattr_locked(struct dentry *, const char *, const void *, size_t, int, struct inode **);
int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int);
int __vfs_removexattr(struct dentry *, const char *);
+int __vfs_removexattr_locked(struct dentry *, const char *, struct inode **);
int vfs_removexattr(struct dentry *, const char *);
ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index 2219cce81ca4..4638dddc040b 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -36,7 +36,7 @@ enum zs_mapmode {
struct zs_pool_stats {
/* How many pages were migrated (freed) */
- unsigned long pages_compacted;
+ atomic_long_t pages_compacted;
};
struct zs_pool;
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 0c82d7ea6ee1..c48b750de2fc 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -67,7 +67,8 @@ static inline void tcf_tm_dump(struct tcf_t *dtm, const struct tcf_t *stm)
{
dtm->install = jiffies_to_clock_t(jiffies - stm->install);
dtm->lastuse = jiffies_to_clock_t(jiffies - stm->lastuse);
- dtm->firstuse = jiffies_to_clock_t(jiffies - stm->firstuse);
+ dtm->firstuse = stm->firstuse ?
+ jiffies_to_clock_t(jiffies - stm->firstuse) : 0;
dtm->expires = jiffies_to_clock_t(stm->expires);
}
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 6def0351bcc3..db2a87981dd4 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -235,8 +235,10 @@ struct ipv6_stub {
const struct in6_addr *addr);
int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex,
const struct in6_addr *addr);
- int (*ipv6_dst_lookup)(struct net *net, struct sock *sk,
- struct dst_entry **dst, struct flowi6 *fl6);
+ struct dst_entry *(*ipv6_dst_lookup_flow)(struct net *net,
+ const struct sock *sk,
+ struct flowi6 *fl6,
+ const struct in6_addr *final_dst);
struct fib6_table *(*fib6_get_table)(struct net *net, u32 id);
struct fib6_info *(*fib6_lookup)(struct net *net, int oif,
@@ -303,6 +305,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex,
const struct in6_addr *addr);
int ipv6_sock_ac_drop(struct sock *sk, int ifindex,
const struct in6_addr *addr);
+void __ipv6_sock_ac_close(struct sock *sk);
void ipv6_sock_ac_close(struct sock *sk);
int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr);
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index cc2d0c3b475b..6a61faf0cc79 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -526,6 +526,7 @@ struct hci_chan {
struct sk_buff_head data_q;
unsigned int sent;
__u8 state;
+ bool amp;
};
struct hci_conn_params {
@@ -1287,16 +1288,34 @@ static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
conn->security_cfm_cb(conn, status);
}
-static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status,
- __u8 encrypt)
+static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status)
{
struct hci_cb *cb;
+ __u8 encrypt;
+
+ if (conn->state == BT_CONFIG) {
+ if (!status)
+ conn->state = BT_CONNECTED;
- if (conn->sec_level == BT_SECURITY_SDP)
- conn->sec_level = BT_SECURITY_LOW;
+ hci_connect_cfm(conn, status);
+ hci_conn_drop(conn);
+ return;
+ }
- if (conn->pending_sec_level > conn->sec_level)
- conn->sec_level = conn->pending_sec_level;
+ if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
+ encrypt = 0x00;
+ else if (test_bit(HCI_CONN_AES_CCM, &conn->flags))
+ encrypt = 0x02;
+ else
+ encrypt = 0x01;
+
+ if (!status) {
+ if (conn->sec_level == BT_SECURITY_SDP)
+ conn->sec_level = BT_SECURITY_LOW;
+
+ if (conn->pending_sec_level > conn->sec_level)
+ conn->sec_level = conn->pending_sec_level;
+ }
mutex_lock(&hci_cb_list_lock);
list_for_each_entry(cb, &hci_cb_list, list) {
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 0697fd413087..21dbd38f724d 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -619,6 +619,8 @@ struct l2cap_ops {
struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
unsigned long hdr_len,
unsigned long len, int nb);
+ int (*filter) (struct l2cap_chan * chan,
+ struct sk_buff *skb);
};
struct l2cap_conn {
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 8116648873c3..c458f084f7bb 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -170,6 +170,11 @@ struct slave {
struct rtnl_link_stats64 slave_stats;
};
+static inline struct slave *to_slave(struct kobject *kobj)
+{
+ return container_of(kobj, struct slave, kobj);
+}
+
struct bond_up_slave {
unsigned int count;
struct rcu_head rcu;
@@ -733,6 +738,9 @@ extern struct bond_parm_tbl ad_select_tbl[];
/* exported from bond_netlink.c */
extern struct rtnl_link_ops bond_link_ops;
+/* exported from bond_sysfs_slave.c */
+extern const struct sysfs_ops slave_sysfs_ops;
+
static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
{
atomic_long_inc(&dev->tx_dropped);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index ae936cd5567e..b96debd18e14 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -4605,7 +4605,7 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr);
*/
int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
const u8 *addr, enum nl80211_iftype iftype,
- u8 data_offset);
+ u8 data_offset, bool is_amsdu);
/**
* ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3
@@ -4617,7 +4617,7 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
enum nl80211_iftype iftype)
{
- return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype, 0);
+ return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype, 0, false);
}
/**
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 461e8a7661b7..d28df7adf948 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -196,6 +196,7 @@ struct dsa_port {
unsigned int index;
const char *name;
const struct dsa_port *cpu_dp;
+ const char *mac;
struct device_node *dn;
unsigned int ageing_time;
u8 stp_state;
diff --git a/include/net/dst.h b/include/net/dst.h
index 35ae45fa0758..d2728525df5a 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -412,7 +412,15 @@ static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, co
static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst,
struct sk_buff *skb)
{
- struct neighbour *n = dst->ops->neigh_lookup(dst, skb, NULL);
+ struct neighbour *n = NULL;
+
+ /* The packets from tunnel devices (eg bareudp) may have only
+ * metadata in the dst pointer of skb. Hence a pointer check of
+ * neigh_lookup is needed.
+ */
+ if (dst->ops->neigh_lookup)
+ n = dst->ops->neigh_lookup(dst, skb, NULL);
+
return IS_ERR(n) ? NULL : n;
}
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index decf6012a401..3e3a1a38884a 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -34,12 +34,6 @@ struct genl_info;
* do additional, common, filtering and return an error
* @post_doit: called after an operation's doit callback, it may
* undo operations done by pre_doit, for example release locks
- * @mcast_bind: a socket bound to the given multicast group (which
- * is given as the offset into the groups array)
- * @mcast_unbind: a socket was unbound from the given multicast group.
- * Note that unbind() will not be called symmetrically if the
- * generic netlink family is removed while there are still open
- * sockets.
* @attrbuf: buffer to store parsed attributes (private)
* @mcgrps: multicast groups used by this family
* @n_mcgrps: number of multicast groups
@@ -62,8 +56,6 @@ struct genl_family {
void (*post_doit)(const struct genl_ops *ops,
struct sk_buff *skb,
struct genl_info *info);
- int (*mcast_bind)(struct net *net, int group);
- void (*mcast_unbind)(struct net *net, int group);
struct nlattr ** attrbuf; /* private */
const struct genl_ops * ops;
const struct genl_multicast_group *mcgrps;
diff --git a/include/net/icmp.h b/include/net/icmp.h
index 8665bf24e3b7..ffe4a5d2bbe7 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -47,6 +47,16 @@ static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32
__icmp_send(skb_in, type, code, info, &IPCB(skb_in)->opt);
}
+#if IS_ENABLED(CONFIG_NF_NAT)
+void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info);
+#else
+static inline void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info)
+{
+ struct ip_options opts = { 0 };
+ __icmp_send(skb_in, type, code, info, &opts);
+}
+#endif
+
int icmp_rcv(struct sk_buff *skb);
void icmp_err(struct sk_buff *skb, u32 info);
int icmp_init(void);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 371b3b45fd5c..da8a582ab032 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -139,8 +139,8 @@ struct inet_connection_sock {
} icsk_mtup;
u32 icsk_user_timeout;
- u64 icsk_ca_priv[88 / sizeof(u64)];
-#define ICSK_CA_PRIV_SIZE (11 * sizeof(u64))
+ u64 icsk_ca_priv[104 / sizeof(u64)];
+#define ICSK_CA_PRIV_SIZE (13 * sizeof(u64))
};
#define ICSK_TIME_RETRANS 1 /* Retransmit timer */
@@ -288,7 +288,7 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
}
-void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
+bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);
void inet_csk_destroy_sock(struct sock *sk);
@@ -313,5 +313,9 @@ int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen);
+/* update the fast reuse flag when adding a socket */
+void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
+ struct sock *sk);
+
struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
#endif /* _INET_CONNECTION_SOCK_H */
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
index 482a1b705362..f667db202a3e 100644
--- a/include/net/inet_ecn.h
+++ b/include/net/inet_ecn.h
@@ -4,6 +4,7 @@
#include <linux/ip.h>
#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
#include <net/inet_sock.h>
#include <net/dsfield.h>
@@ -142,7 +143,7 @@ static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner)
static inline int INET_ECN_set_ce(struct sk_buff *skb)
{
- switch (skb->protocol) {
+ switch (skb_protocol(skb, true)) {
case cpu_to_be16(ETH_P_IP):
if (skb_network_header(skb) + sizeof(struct iphdr) <=
skb_tail_pointer(skb))
@@ -209,12 +210,16 @@ static inline int IP_ECN_decapsulate(const struct iphdr *oiph,
{
__u8 inner;
- if (skb->protocol == htons(ETH_P_IP))
+ switch (skb_protocol(skb, true)) {
+ case htons(ETH_P_IP):
inner = ip_hdr(skb)->tos;
- else if (skb->protocol == htons(ETH_P_IPV6))
+ break;
+ case htons(ETH_P_IPV6):
inner = ipv6_get_dsfield(ipv6_hdr(skb));
- else
+ break;
+ default:
return 0;
+ }
return INET_ECN_decapsulate(skb, oiph->tos, inner);
}
@@ -224,12 +229,16 @@ static inline int IP6_ECN_decapsulate(const struct ipv6hdr *oipv6h,
{
__u8 inner;
- if (skb->protocol == htons(ETH_P_IP))
+ switch (skb_protocol(skb, true)) {
+ case htons(ETH_P_IP):
inner = ip_hdr(skb)->tos;
- else if (skb->protocol == htons(ETH_P_IPV6))
+ break;
+ case htons(ETH_P_IPV6):
inner = ipv6_get_dsfield(ipv6_hdr(skb));
- else
+ break;
+ default:
return 0;
+ }
return INET_ECN_decapsulate(skb, ipv6_get_dsfield(oipv6h), inner);
}
diff --git a/include/net/ip.h b/include/net/ip.h
index 5b29f357862d..aad003685c31 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -399,12 +399,18 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
bool forwarding)
{
struct net *net = dev_net(dst->dev);
+ unsigned int mtu;
if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
ip_mtu_locked(dst) ||
!forwarding)
return dst_mtu(dst);
+ /* 'forwarding = true' case should always honour route mtu */
+ mtu = dst_metric_raw(dst, RTAX_MTU);
+ if (mtu)
+ return mtu;
+
return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
}
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 7b9c82de11cc..5e26d61867b2 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -234,6 +234,7 @@ static inline bool ipv6_anycast_destination(const struct dst_entry *dst,
return rt->rt6i_flags & RTF_ANYCAST ||
(rt->rt6i_dst.plen < 127 &&
+ !(rt->rt6i_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) &&
ipv6_addr_equal(&rt->rt6i_dst.addr, daddr));
}
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index e11423530d64..f8873c4eb003 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -489,9 +489,11 @@ static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
const void *from, int len,
__be16 flags)
{
- memcpy(ip_tunnel_info_opts(info), from, len);
info->options_len = len;
- info->key.tun_flags |= flags;
+ if (len > 0) {
+ memcpy(ip_tunnel_info_opts(info), from, len);
+ info->key.tun_flags |= flags;
+ }
}
static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
@@ -537,7 +539,6 @@ static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
__be16 flags)
{
info->options_len = 0;
- info->key.tun_flags |= flags;
}
#endif /* CONFIG_INET */
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index af0ede9ad4d0..c31e54a41b5c 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -1614,18 +1614,16 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
}
#endif /* CONFIG_IP_VS_NFCT */
-/* Really using conntrack? */
-static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp,
- struct sk_buff *skb)
+/* Using old conntrack that can not be redirected to another real server? */
+static inline bool ip_vs_conn_uses_old_conntrack(struct ip_vs_conn *cp,
+ struct sk_buff *skb)
{
#ifdef CONFIG_IP_VS_NFCT
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
- if (!(cp->flags & IP_VS_CONN_F_NFCT))
- return false;
ct = nf_ct_get(skb, &ctinfo);
- if (ct)
+ if (ct && nf_ct_is_confirmed(ct))
return true;
#endif
return false;
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index ff33f498c137..4c2e40882e88 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -959,7 +959,7 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
struct flowi6 *fl6);
-struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
+struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst);
struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst,
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index f45141bdbb83..ac4d70aeee12 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -85,7 +85,7 @@ struct nf_conn {
struct hlist_node nat_bysource;
#endif
/* all members below initialized via memset */
- u8 __nfct_init_offset[0];
+ struct { } __nfct_init_offset;
/* If we were expected by an expectation, this will be it */
struct nf_conn *master;
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
index 0d3920896d50..716db4a0fed8 100644
--- a/include/net/netfilter/nf_log.h
+++ b/include/net/netfilter/nf_log.h
@@ -108,6 +108,7 @@ int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
unsigned int logflags);
void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m,
struct sock *sk);
+void nf_log_dump_vlan(struct nf_log_buf *m, const struct sk_buff *skb);
void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
unsigned int hooknum, const struct sk_buff *skb,
const struct net_device *in,
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 024636c31adc..93253ba1eeac 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -130,6 +130,8 @@ static inline u8 nft_reg_load8(u32 *sreg)
static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
unsigned int len)
{
+ if (len % NFT_REG32_SIZE)
+ dst[len / NFT_REG32_SIZE] = 0;
memcpy(dst, src, len);
}
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 9991e5ef52cc..fbfa59801454 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -70,7 +70,9 @@ struct netns_xfrm {
#if IS_ENABLED(CONFIG_IPV6)
struct dst_ops xfrm6_dst_ops;
#endif
- spinlock_t xfrm_state_lock;
+ spinlock_t xfrm_state_lock;
+ seqcount_t xfrm_state_hash_generation;
+
spinlock_t xfrm_policy_lock;
struct mutex xfrm_cfg_mutex;
};
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index df5c69db68af..e67564af6f93 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -310,6 +310,7 @@ int nci_nfcc_loopback(struct nci_dev *ndev, void *data, size_t data_len,
struct sk_buff **resp);
struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev);
+void nci_hci_deallocate(struct nci_dev *ndev);
int nci_hci_send_event(struct nci_dev *ndev, u8 gate, u8 event,
const u8 *param, size_t param_len);
int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate,
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 7dc769e5452b..5e99771a5dcc 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -122,17 +122,6 @@ static inline void qdisc_run(struct Qdisc *q)
}
}
-static inline __be16 tc_skb_protocol(const struct sk_buff *skb)
-{
- /* We need to take extra care in case the skb came via
- * vlan accelerated path. In that case, use skb->vlan_proto
- * as the original vlan header was already stripped.
- */
- if (skb_vlan_tag_present(skb))
- return skb->vlan_proto;
- return skb->protocol;
-}
-
/* Calculate maximal size of packet seen by hard_start_xmit
routine of this device.
*/
diff --git a/include/net/red.h b/include/net/red.h
index 9665582c4687..ff07a7cedf68 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -168,14 +168,24 @@ static inline void red_set_vars(struct red_vars *v)
v->qcount = -1;
}
-static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog)
+static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog,
+ u8 Scell_log, u8 *stab)
{
- if (fls(qth_min) + Wlog > 32)
+ if (fls(qth_min) + Wlog >= 32)
return false;
- if (fls(qth_max) + Wlog > 32)
+ if (fls(qth_max) + Wlog >= 32)
+ return false;
+ if (Scell_log >= 32)
return false;
if (qth_max < qth_min)
return false;
+ if (stab) {
+ int i;
+
+ for (i = 0; i < RED_STAB_SIZE; i++)
+ if (stab[i] >= 32)
+ return false;
+ }
return true;
}
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 0bbaa5488423..3d961a3cf3c4 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -33,6 +33,7 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
*
* @list: Used internally
* @kind: Identifier
+ * @netns_refund: Physical device, move to init_net on netns exit
* @maxtype: Highest device specific netlink attribute number
* @policy: Netlink policy for device specific attribute validation
* @validate: Optional validation function for netlink/changelink parameters
@@ -64,6 +65,7 @@ struct rtnl_link_ops {
size_t priv_size;
void (*setup)(struct net_device *dev);
+ bool netns_refund;
unsigned int maxtype;
const struct nla_policy *policy;
int (*validate)(struct nlattr *tb[],
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 86f034b524d4..48d74674d5e9 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -361,11 +361,13 @@ enum {
ipv4_is_anycast_6to4(a))
/* Flags used for the bind address copy functions. */
-#define SCTP_ADDR6_ALLOWED 0x00000001 /* IPv6 address is allowed by
+#define SCTP_ADDR4_ALLOWED 0x00000001 /* IPv4 address is allowed by
local sock family */
-#define SCTP_ADDR4_PEERSUPP 0x00000002 /* IPv4 address is supported by
+#define SCTP_ADDR6_ALLOWED 0x00000002 /* IPv6 address is allowed by
+ local sock family */
+#define SCTP_ADDR4_PEERSUPP 0x00000004 /* IPv4 address is supported by
peer */
-#define SCTP_ADDR6_PEERSUPP 0x00000004 /* IPv6 address is supported by
+#define SCTP_ADDR6_PEERSUPP 0x00000008 /* IPv6 address is supported by
peer */
/* Reasons to retransmit. */
diff --git a/include/net/sock.h b/include/net/sock.h
index f359e5c94762..bc752237dff3 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -845,6 +845,8 @@ static inline int sk_memalloc_socks(void)
{
return static_branch_unlikely(&memalloc_socks_key);
}
+
+void __receive_sock(struct file *file);
#else
static inline int sk_memalloc_socks(void)
@@ -852,6 +854,8 @@ static inline int sk_memalloc_socks(void)
return 0;
}
+static inline void __receive_sock(struct file *file)
+{ }
#endif
static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask)
@@ -896,11 +900,11 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
skb_dst_force(skb);
if (!sk->sk_backlog.tail)
- sk->sk_backlog.head = skb;
+ WRITE_ONCE(sk->sk_backlog.head, skb);
else
sk->sk_backlog.tail->next = skb;
- sk->sk_backlog.tail = skb;
+ WRITE_ONCE(sk->sk_backlog.tail, skb);
skb->next = NULL;
}
@@ -1775,7 +1779,6 @@ static inline int sk_rx_queue_get(const struct sock *sk)
static inline void sk_set_socket(struct sock *sk, struct socket *sock)
{
- sk_tx_queue_clear(sk);
sk->sk_socket = sock;
}
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 918bfd0d7d1f..3f0d654984cf 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -53,7 +53,7 @@ extern struct inet_hashinfo tcp_hashinfo;
extern struct percpu_counter tcp_orphan_count;
void tcp_time_wait(struct sock *sk, int state, int timeo);
-#define MAX_TCP_HEADER (128 + MAX_HEADER)
+#define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER)
#define MAX_TCP_OPTION_SPACE 40
#define TCP_MIN_SND_MSS 48
#define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
@@ -1373,6 +1373,24 @@ static inline int tcp_full_space(const struct sock *sk)
return tcp_win_from_space(sk, sk->sk_rcvbuf);
}
+/* We provision sk_rcvbuf around 200% of sk_rcvlowat.
+ * If 87.5 % (7/8) of the space has been consumed, we want to override
+ * SO_RCVLOWAT constraint, since we are receiving skbs with too small
+ * len/truesize ratio.
+ */
+static inline bool tcp_rmem_pressure(const struct sock *sk)
+{
+ int rcvbuf, threshold;
+
+ if (tcp_under_memory_pressure(sk))
+ return true;
+
+ rcvbuf = READ_ONCE(sk->sk_rcvbuf);
+ threshold = rcvbuf - (rcvbuf >> 3);
+
+ return atomic_read(&sk->sk_rmem_alloc) > threshold;
+}
+
extern void tcp_openreq_init_rwin(struct request_sock *req,
const struct sock *sk_listener,
const struct dst_entry *dst);
@@ -1862,7 +1880,7 @@ static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
static inline bool tcp_stream_memory_free(const struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
- u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
+ u32 notsent_bytes = READ_ONCE(tp->write_seq) - tp->snd_nxt;
return notsent_bytes < tcp_notsent_lowat(tp);
}
@@ -1948,7 +1966,7 @@ void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
u32 reo_wnd);
-extern void tcp_rack_mark_lost(struct sock *sk);
+extern bool tcp_rack_mark_lost(struct sock *sk);
extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
u64 xmit_time);
extern void tcp_rack_reo_timeout(struct sock *sk);
diff --git a/include/net/tls.h b/include/net/tls.h
index 98f5ad0319a2..9caef9bad075 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -163,6 +163,12 @@ enum {
enum tls_context_flags {
TLS_RX_SYNC_RUNNING = 0,
+ /* tls_dev_del was called for the RX side, device state was released,
+ * but tls_ctx->netdev might still be kept, because TX-side driver
+ * resources might not be released yet. Used to prevent the second
+ * tls_dev_del call in tls_device_down if it happens simultaneously.
+ */
+ TLS_RX_DEV_CLOSED = 2,
};
struct cipher_context {
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 48dc1ce2170d..fe8bed557691 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1016,7 +1016,7 @@ struct xfrm_dst {
static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
{
#ifdef CONFIG_XFRM
- if (dst->xfrm) {
+ if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst;
return xdst->path;
@@ -1028,7 +1028,7 @@ static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst)
{
#ifdef CONFIG_XFRM
- if (dst->xfrm) {
+ if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
return xdst->child;
}
@@ -1083,6 +1083,7 @@ struct xfrm_offload {
#define XFRM_GRO 32
#define XFRM_ESP_NO_TRAILER 64
#define XFRM_DEV_RESUME 128
+#define XFRM_XMIT 256
__u32 status;
#define CRYPTO_SUCCESS 1
@@ -1872,21 +1873,17 @@ static inline unsigned int xfrm_replay_state_esn_len(struct xfrm_replay_state_es
static inline int xfrm_replay_clone(struct xfrm_state *x,
struct xfrm_state *orig)
{
- x->replay_esn = kzalloc(xfrm_replay_state_esn_len(orig->replay_esn),
+
+ x->replay_esn = kmemdup(orig->replay_esn,
+ xfrm_replay_state_esn_len(orig->replay_esn),
GFP_KERNEL);
if (!x->replay_esn)
return -ENOMEM;
-
- x->replay_esn->bmp_len = orig->replay_esn->bmp_len;
- x->replay_esn->replay_window = orig->replay_esn->replay_window;
-
- x->preplay_esn = kmemdup(x->replay_esn,
- xfrm_replay_state_esn_len(x->replay_esn),
+ x->preplay_esn = kmemdup(orig->preplay_esn,
+ xfrm_replay_state_esn_len(orig->preplay_esn),
GFP_KERNEL);
- if (!x->preplay_esn) {
- kfree(x->replay_esn);
+ if (!x->preplay_esn)
return -ENOMEM;
- }
return 0;
}
diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h
index 3b00231cc084..62f851569936 100644
--- a/include/rdma/uverbs_std_types.h
+++ b/include/rdma/uverbs_std_types.h
@@ -95,7 +95,7 @@ struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj,
static inline void uobj_put_destroy(struct ib_uobject *uobj)
{
- rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
+ rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY);
}
static inline void uobj_put_read(struct ib_uobject *uobj)
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index 58507c7783cf..eafccb2d4d6f 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -261,7 +261,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_lport *,
struct fc_frame *);
/* libfcoe funcs */
-u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int);
+u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], unsigned int, unsigned int);
int fcoe_libfc_config(struct fc_lport *, struct fcoe_ctlr *,
const struct libfc_function_template *, int init_fcp);
u32 fcoe_fc_crc(struct fc_frame *fp);
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index c9bd935f4fd1..1ee0f30ae190 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -145,6 +145,9 @@ struct iscsi_task {
void *dd_data; /* driver/transport data */
};
+/* invalid scsi_task pointer */
+#define INVALID_SCSI_TASK (struct iscsi_task *)-1l
+
static inline int iscsi_task_has_unsol_data(struct iscsi_task *task)
{
return task->unsol_r2t.data_length > task->unsol_r2t.sent;
diff --git a/include/scsi/scsi_common.h b/include/scsi/scsi_common.h
index 731ac09ed231..5b567b43e1b1 100644
--- a/include/scsi/scsi_common.h
+++ b/include/scsi/scsi_common.h
@@ -25,6 +25,13 @@ scsi_command_size(const unsigned char *cmnd)
scsi_varlen_cdb_length(cmnd) : COMMAND_SIZE(cmnd[0]);
}
+static inline unsigned char
+scsi_command_control(const unsigned char *cmnd)
+{
+ return (cmnd[0] == VARIABLE_LENGTH_CMD) ?
+ cmnd[1] : cmnd[COMMAND_SIZE(cmnd[0]) - 1];
+}
+
/* Returns a human-readable name for the device */
extern const char *scsi_device_type(unsigned type);
diff --git a/include/soc/nps/common.h b/include/soc/nps/common.h
index 9b1d43d671a3..8c18dc6d3fde 100644
--- a/include/soc/nps/common.h
+++ b/include/soc/nps/common.h
@@ -45,6 +45,12 @@
#define CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST 0x5B60
#define CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM 0x00010422
+#ifndef AUX_IENABLE
+#define AUX_IENABLE 0x40c
+#endif
+
+#define CTOP_AUX_IACK (0xFFFFF800 + 0x088)
+
#ifndef __ASSEMBLY__
/* In order to increase compilation test coverage */
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index 127c2713b543..99855d1c7476 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -57,6 +57,7 @@ struct snd_compr_runtime {
* @direction: stream direction, playback/recording
* @metadata_set: metadata set flag, true when set
* @next_track: has userspace signal next track transition, true when set
+ * @partial_drain: undergoing partial_drain for stream, true when set
* @private_data: pointer to DSP private data
*/
struct snd_compr_stream {
@@ -68,6 +69,7 @@ struct snd_compr_stream {
enum snd_compr_direction direction;
bool metadata_set;
bool next_track;
+ bool partial_drain;
void *private_data;
};
@@ -171,7 +173,13 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
if (snd_BUG_ON(!stream))
return;
- stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+ /* for partial_drain case we are back to running state on success */
+ if (stream->partial_drain) {
+ stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
+ stream->partial_drain = false; /* clear this flag as well */
+ } else {
+ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+ }
wake_up(&stream->runtime->sleep);
}
diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
index c2a71fd8dfaf..1894af415b20 100644
--- a/include/sound/rawmidi.h
+++ b/include/sound/rawmidi.h
@@ -76,6 +76,7 @@ struct snd_rawmidi_runtime {
size_t avail_min; /* min avail for wakeup */
size_t avail; /* max used buffer for wakeup */
size_t xruns; /* over/underruns counter */
+ int buffer_ref; /* buffer reference count */
/* misc */
spinlock_t lock;
wait_queue_head_t sleep;
diff --git a/include/sound/rt5670.h b/include/sound/rt5670.h
index b7d60510819b..491c7a8fd2bb 100644
--- a/include/sound/rt5670.h
+++ b/include/sound/rt5670.h
@@ -15,6 +15,7 @@ struct rt5670_platform_data {
int jd_mode;
bool in2_diff;
bool dev_gpio;
+ bool gpio1_is_ext_spk_en;
bool dmic_en;
unsigned int dmic1_data_pin;
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index f2e6abea8490..70d18444d18d 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -674,7 +674,7 @@ struct iscsi_session {
atomic_t session_logout;
atomic_t session_reinstatement;
atomic_t session_stop_active;
- atomic_t sleep_on_sess_wait_comp;
+ atomic_t session_close;
/* connection list */
struct list_head sess_conn_list;
struct list_head cr_active_list;
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 51b6f50eabee..0deeff9b4496 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -69,6 +69,7 @@ int transport_backend_register(const struct target_backend_ops *);
void target_backend_unregister(const struct target_backend_ops *);
void target_complete_cmd(struct se_cmd *, u8);
+void target_set_cmd_data_length(struct se_cmd *, int);
void target_complete_cmd_with_length(struct se_cmd *, u8, int);
void transport_copy_sense_to_cmd(struct se_cmd *, unsigned char *);
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index 4c91cadd1871..4a0b18921e06 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -1322,17 +1322,15 @@ DECLARE_EVENT_CLASS(svcrdma_sendcomp_event,
TRACE_EVENT(svcrdma_post_send,
TP_PROTO(
- const struct ib_send_wr *wr,
- int status
+ const struct ib_send_wr *wr
),
- TP_ARGS(wr, status),
+ TP_ARGS(wr),
TP_STRUCT__entry(
__field(const void *, cqe)
__field(unsigned int, num_sge)
__field(u32, inv_rkey)
- __field(int, status)
),
TP_fast_assign(
@@ -1340,12 +1338,11 @@ TRACE_EVENT(svcrdma_post_send,
__entry->num_sge = wr->num_sge;
__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
wr->ex.invalidate_rkey : 0;
- __entry->status = status;
),
- TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x status=%d",
+ TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x",
__entry->cqe, __entry->num_sge,
- __entry->inv_rkey, __entry->status
+ __entry->inv_rkey
)
);
@@ -1410,26 +1407,23 @@ TRACE_EVENT(svcrdma_wc_receive,
TRACE_EVENT(svcrdma_post_rw,
TP_PROTO(
const void *cqe,
- int sqecount,
- int status
+ int sqecount
),
- TP_ARGS(cqe, sqecount, status),
+ TP_ARGS(cqe, sqecount),
TP_STRUCT__entry(
__field(const void *, cqe)
__field(int, sqecount)
- __field(int, status)
),
TP_fast_assign(
__entry->cqe = cqe;
__entry->sqecount = sqecount;
- __entry->status = status;
),
- TP_printk("cqe=%p sqecount=%d status=%d",
- __entry->cqe, __entry->sqecount, __entry->status
+ TP_printk("cqe=%p sqecount=%d",
+ __entry->cqe, __entry->sqecount
)
);
@@ -1525,6 +1519,34 @@ DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
DEFINE_SQ_EVENT(full);
DEFINE_SQ_EVENT(retry);
+TRACE_EVENT(svcrdma_sq_post_err,
+ TP_PROTO(
+ const struct svcxprt_rdma *rdma,
+ int status
+ ),
+
+ TP_ARGS(rdma, status),
+
+ TP_STRUCT__entry(
+ __field(int, avail)
+ __field(int, depth)
+ __field(int, status)
+ __string(addr, rdma->sc_xprt.xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->avail = atomic_read(&rdma->sc_sq_avail);
+ __entry->depth = rdma->sc_sq_depth;
+ __entry->status = status;
+ __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
+ __get_str(addr), __entry->avail, __entry->depth,
+ __entry->status
+ )
+);
+
#endif /* _TRACE_RPCRDMA_H */
#include <trace/define_trace.h>
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 0924119bcfa4..ca29a5b8603c 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -408,7 +408,7 @@ enum rxrpc_tx_point {
EM(rxrpc_cong_begin_retransmission, " Retrans") \
EM(rxrpc_cong_cleared_nacks, " Cleared") \
EM(rxrpc_cong_new_low_nack, " NewLowN") \
- EM(rxrpc_cong_no_change, "") \
+ EM(rxrpc_cong_no_change, " -") \
EM(rxrpc_cong_progress, " Progres") \
EM(rxrpc_cong_retransmit_again, " ReTxAgn") \
EM(rxrpc_cong_rtt_window_end, " RttWinE") \
@@ -1549,6 +1549,41 @@ TRACE_EVENT(rxrpc_notify_socket,
__entry->serial)
);
+TRACE_EVENT(rxrpc_rx_discard_ack,
+ TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial,
+ rxrpc_seq_t first_soft_ack, rxrpc_seq_t call_ackr_first,
+ rxrpc_seq_t prev_pkt, rxrpc_seq_t call_ackr_prev),
+
+ TP_ARGS(debug_id, serial, first_soft_ack, call_ackr_first,
+ prev_pkt, call_ackr_prev),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, debug_id )
+ __field(rxrpc_serial_t, serial )
+ __field(rxrpc_seq_t, first_soft_ack)
+ __field(rxrpc_seq_t, call_ackr_first)
+ __field(rxrpc_seq_t, prev_pkt)
+ __field(rxrpc_seq_t, call_ackr_prev)
+ ),
+
+ TP_fast_assign(
+ __entry->debug_id = debug_id;
+ __entry->serial = serial;
+ __entry->first_soft_ack = first_soft_ack;
+ __entry->call_ackr_first = call_ackr_first;
+ __entry->prev_pkt = prev_pkt;
+ __entry->call_ackr_prev = call_ackr_prev;
+ ),
+
+ TP_printk("c=%08x r=%08x %08x<%08x %08x<%08x",
+ __entry->debug_id,
+ __entry->serial,
+ __entry->first_soft_ack,
+ __entry->call_ackr_first,
+ __entry->prev_pkt,
+ __entry->call_ackr_prev)
+ );
+
#endif /* _TRACE_RXRPC_H */
/* This part must be outside protection */
diff --git a/include/trace/events/sctp.h b/include/trace/events/sctp.h
index 7475c7be165a..d4aac3436595 100644
--- a/include/trace/events/sctp.h
+++ b/include/trace/events/sctp.h
@@ -75,15 +75,6 @@ TRACE_EVENT(sctp_probe,
__entry->pathmtu = asoc->pathmtu;
__entry->rwnd = asoc->peer.rwnd;
__entry->unack_data = asoc->unack_data;
-
- if (trace_sctp_probe_path_enabled()) {
- struct sctp_transport *sp;
-
- list_for_each_entry(sp, &asoc->peer.transport_addr_list,
- transports) {
- trace_sctp_probe_path(sp, asoc);
- }
- }
),
TP_printk("asoc=%#llx mark=%#x bind_port=%d peer_port=%d pathmtu=%d "
diff --git a/include/trace/events/target.h b/include/trace/events/target.h
index 914a872dd343..e87a3716b0ac 100644
--- a/include/trace/events/target.h
+++ b/include/trace/events/target.h
@@ -140,6 +140,7 @@ TRACE_EVENT(target_sequencer_start,
__field( unsigned int, opcode )
__field( unsigned int, data_length )
__field( unsigned int, task_attribute )
+ __field( unsigned char, control )
__array( unsigned char, cdb, TCM_MAX_COMMAND_SIZE )
__string( initiator, cmd->se_sess->se_node_acl->initiatorname )
),
@@ -149,6 +150,7 @@ TRACE_EVENT(target_sequencer_start,
__entry->opcode = cmd->t_task_cdb[0];
__entry->data_length = cmd->data_length;
__entry->task_attribute = cmd->sam_task_attr;
+ __entry->control = scsi_command_control(cmd->t_task_cdb);
memcpy(__entry->cdb, cmd->t_task_cdb, TCM_MAX_COMMAND_SIZE);
__assign_str(initiator, cmd->se_sess->se_node_acl->initiatorname);
),
@@ -158,9 +160,7 @@ TRACE_EVENT(target_sequencer_start,
show_opcode_name(__entry->opcode),
__entry->data_length, __print_hex(__entry->cdb, 16),
show_task_attribute_name(__entry->task_attribute),
- scsi_command_size(__entry->cdb) <= 16 ?
- __entry->cdb[scsi_command_size(__entry->cdb) - 1] :
- __entry->cdb[1]
+ __entry->control
)
);
@@ -175,6 +175,7 @@ TRACE_EVENT(target_cmd_complete,
__field( unsigned int, opcode )
__field( unsigned int, data_length )
__field( unsigned int, task_attribute )
+ __field( unsigned char, control )
__field( unsigned char, scsi_status )
__field( unsigned char, sense_length )
__array( unsigned char, cdb, TCM_MAX_COMMAND_SIZE )
@@ -187,6 +188,7 @@ TRACE_EVENT(target_cmd_complete,
__entry->opcode = cmd->t_task_cdb[0];
__entry->data_length = cmd->data_length;
__entry->task_attribute = cmd->sam_task_attr;
+ __entry->control = scsi_command_control(cmd->t_task_cdb);
__entry->scsi_status = cmd->scsi_status;
__entry->sense_length = cmd->scsi_status == SAM_STAT_CHECK_CONDITION ?
min(18, ((u8 *) cmd->sense_buffer)[SPC_ADD_SENSE_LEN_OFFSET] + 8) : 0;
@@ -203,9 +205,7 @@ TRACE_EVENT(target_cmd_complete,
show_opcode_name(__entry->opcode),
__entry->data_length, __print_hex(__entry->cdb, 16),
show_task_attribute_name(__entry->task_attribute),
- scsi_command_size(__entry->cdb) <= 16 ?
- __entry->cdb[scsi_command_size(__entry->cdb) - 1] :
- __entry->cdb[1]
+ __entry->control
)
);
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 32db72c7c055..300afa559f46 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -20,7 +20,6 @@
{I_CLEAR, "I_CLEAR"}, \
{I_SYNC, "I_SYNC"}, \
{I_DIRTY_TIME, "I_DIRTY_TIME"}, \
- {I_DIRTY_TIME_EXPIRED, "I_DIRTY_TIME_EXPIRED"}, \
{I_REFERENCED, "I_REFERENCED"} \
)
@@ -66,8 +65,9 @@ TRACE_EVENT(writeback_dirty_page,
),
TP_fast_assign(
- strncpy(__entry->name,
- mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32);
+ strscpy_pad(__entry->name,
+ bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
+ NULL), 32);
__entry->ino = mapping ? mapping->host->i_ino : 0;
__entry->index = page->index;
),
@@ -96,8 +96,7 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
struct backing_dev_info *bdi = inode_to_bdi(inode);
/* may be called for files on pseudo FSes w/ unregistered bdi */
- strncpy(__entry->name,
- bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
+ strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->flags = flags;
@@ -176,8 +175,8 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
),
TP_fast_assign(
- strncpy(__entry->name,
- dev_name(inode_to_bdi(inode)->dev), 32);
+ strscpy_pad(__entry->name,
+ bdi_dev_name(inode_to_bdi(inode)), 32);
__entry->ino = inode->i_ino;
__entry->sync_mode = wbc->sync_mode;
__entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
@@ -220,8 +219,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__field(unsigned int, cgroup_ino)
),
TP_fast_assign(
- strncpy(__entry->name,
- wb->bdi->dev ? dev_name(wb->bdi->dev) : "(unknown)", 32);
+ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
__entry->nr_pages = work->nr_pages;
__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
__entry->sync_mode = work->sync_mode;
@@ -274,7 +272,7 @@ DECLARE_EVENT_CLASS(writeback_class,
__field(unsigned int, cgroup_ino)
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
+ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
),
TP_printk("bdi %s: cgroup_ino=%u",
@@ -296,7 +294,7 @@ TRACE_EVENT(writeback_bdi_register,
__array(char, name, 32)
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
+ strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
),
TP_printk("bdi %s",
__entry->name
@@ -321,7 +319,7 @@ DECLARE_EVENT_CLASS(wbc_class,
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
+ strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
__entry->nr_to_write = wbc->nr_to_write;
__entry->pages_skipped = wbc->pages_skipped;
__entry->sync_mode = wbc->sync_mode;
@@ -360,8 +358,9 @@ DEFINE_WBC_EVENT(wbc_writepage);
TRACE_EVENT(writeback_queue_io,
TP_PROTO(struct bdi_writeback *wb,
struct wb_writeback_work *work,
+ unsigned long dirtied_before,
int moved),
- TP_ARGS(wb, work, moved),
+ TP_ARGS(wb, work, dirtied_before, moved),
TP_STRUCT__entry(
__array(char, name, 32)
__field(unsigned long, older)
@@ -371,19 +370,17 @@ TRACE_EVENT(writeback_queue_io,
__field(unsigned int, cgroup_ino)
),
TP_fast_assign(
- unsigned long *older_than_this = work->older_than_this;
- strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
- __entry->older = older_than_this ? *older_than_this : 0;
- __entry->age = older_than_this ?
- (jiffies - *older_than_this) * 1000 / HZ : -1;
+ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
+ __entry->older = dirtied_before;
+ __entry->age = (jiffies - dirtied_before) * 1000 / HZ;
__entry->moved = moved;
__entry->reason = work->reason;
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
),
TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%u",
__entry->name,
- __entry->older, /* older_than_this in jiffies */
- __entry->age, /* older_than_this in relative milliseconds */
+ __entry->older, /* dirtied_before in jiffies */
+ __entry->age, /* dirtied_before in relative milliseconds */
__entry->moved,
__print_symbolic(__entry->reason, WB_WORK_REASON),
__entry->cgroup_ino
@@ -458,7 +455,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
),
TP_fast_assign(
- strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
+ strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
__entry->write_bw = KBps(wb->write_bandwidth);
__entry->avg_write_bw = KBps(wb->avg_write_bandwidth);
__entry->dirty_rate = KBps(dirty_rate);
@@ -523,7 +520,7 @@ TRACE_EVENT(balance_dirty_pages,
TP_fast_assign(
unsigned long freerun = (thresh + bg_thresh) / 2;
- strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
+ strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
__entry->limit = global_wb_domain.dirty_limit;
__entry->setpoint = (global_wb_domain.dirty_limit +
@@ -583,8 +580,8 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
),
TP_fast_assign(
- strncpy(__entry->name,
- dev_name(inode_to_bdi(inode)->dev), 32);
+ strscpy_pad(__entry->name,
+ bdi_dev_name(inode_to_bdi(inode)), 32);
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->dirtied_when = inode->dirtied_when;
@@ -657,8 +654,8 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
),
TP_fast_assign(
- strncpy(__entry->name,
- dev_name(inode_to_bdi(inode)->dev), 32);
+ strscpy_pad(__entry->name,
+ bdi_dev_name(inode_to_bdi(inode)), 32);
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->dirtied_when = inode->dirtied_when;
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index d143e277cdaf..71ca8c4dc290 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -1193,8 +1193,8 @@ union bpf_attr {
* Return
* The return value depends on the result of the test, and can be:
*
- * * 0, if the *skb* task belongs to the cgroup2.
- * * 1, if the *skb* task does not belong to the cgroup2.
+ * * 0, if current task belongs to the cgroup2.
+ * * 1, if current task does not belong to the cgroup2.
* * A negative error code, if an error occurred.
*
* int bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
diff --git a/include/uapi/linux/const.h b/include/uapi/linux/const.h
index 5ed721ad5b19..af2a44c08683 100644
--- a/include/uapi/linux/const.h
+++ b/include/uapi/linux/const.h
@@ -28,4 +28,9 @@
#define _BITUL(x) (_UL(1) << (x))
#define _BITULL(x) (_ULL(1) << (x))
+#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
+#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
+
+#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+
#endif /* _UAPI_LINUX_CONST_H */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index dc69391d2bba..fc21d3726b59 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -14,7 +14,7 @@
#ifndef _UAPI_LINUX_ETHTOOL_H
#define _UAPI_LINUX_ETHTOOL_H
-#include <linux/kernel.h>
+#include <linux/const.h>
#include <linux/types.h>
#include <linux/if_ether.h>
diff --git a/include/uapi/linux/if_alg.h b/include/uapi/linux/if_alg.h
index bc2bcdec377b..769050771423 100644
--- a/include/uapi/linux/if_alg.h
+++ b/include/uapi/linux/if_alg.h
@@ -24,6 +24,22 @@ struct sockaddr_alg {
__u8 salg_name[64];
};
+/*
+ * Linux v4.12 and later removed the 64-byte limit on salg_name[]; it's now an
+ * arbitrary-length field. We had to keep the original struct above for source
+ * compatibility with existing userspace programs, though. Use the new struct
+ * below if support for very long algorithm names is needed. To do this,
+ * allocate 'sizeof(struct sockaddr_alg_new) + strlen(algname) + 1' bytes, and
+ * copy algname (including the null terminator) into salg_name.
+ */
+struct sockaddr_alg_new {
+ __u16 salg_family;
+ __u8 salg_type[14];
+ __u32 salg_feat;
+ __u32 salg_mask;
+ __u8 salg_name[];
+};
+
struct af_alg_iv {
__u32 ivlen;
__u8 iv[0];
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 61a5799b440b..c3e84f7c8261 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -795,7 +795,8 @@
#define SW_LINEIN_INSERT 0x0d /* set = inserted */
#define SW_MUTE_DEVICE 0x0e /* set = device disabled */
#define SW_PEN_INSERTED 0x0f /* set = pen inserted */
-#define SW_MAX 0x0f
+#define SW_MACHINE_COVER 0x10 /* set = cover closed */
+#define SW_MAX 0x10
#define SW_CNT (SW_MAX+1)
/*
diff --git a/include/uapi/linux/kernel.h b/include/uapi/linux/kernel.h
index 0ff8f7477847..fadf2db71fe8 100644
--- a/include/uapi/linux/kernel.h
+++ b/include/uapi/linux/kernel.h
@@ -3,13 +3,6 @@
#define _UAPI_LINUX_KERNEL_H
#include <linux/sysinfo.h>
-
-/*
- * 'kernel.h' contains some often-used function prototypes etc
- */
-#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
-#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
-
-#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#include <linux/const.h>
#endif /* _UAPI_LINUX_KERNEL_H */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 251be353f950..c297abc4e669 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -189,9 +189,11 @@ struct kvm_hyperv_exit {
#define KVM_EXIT_HYPERV_SYNIC 1
#define KVM_EXIT_HYPERV_HCALL 2
__u32 type;
+ __u32 pad1;
union {
struct {
__u32 msr;
+ __u32 pad2;
__u64 control;
__u64 evt_page;
__u64 msg_page;
@@ -744,9 +746,10 @@ struct kvm_ppc_resize_hpt {
#define KVM_VM_PPC_HV 1
#define KVM_VM_PPC_PR 2
-/* on MIPS, 0 forces trap & emulate, 1 forces VZ ASE */
-#define KVM_VM_MIPS_TE 0
+/* on MIPS, 0 indicates auto, 1 forces VZ ASE, 2 forces trap & emulate */
+#define KVM_VM_MIPS_AUTO 0
#define KVM_VM_MIPS_VZ 1
+#define KVM_VM_MIPS_TE 2
#define KVM_S390_SIE_PAGE_OFFSET 1
diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h
index f9a1be7fc696..ead2e72e5c88 100644
--- a/include/uapi/linux/lightnvm.h
+++ b/include/uapi/linux/lightnvm.h
@@ -21,7 +21,7 @@
#define _UAPI_LINUX_LIGHTNVM_H
#ifdef __KERNEL__
-#include <linux/kernel.h>
+#include <linux/const.h>
#include <linux/ioctl.h>
#else /* __KERNEL__ */
#include <stdio.h>
diff --git a/include/uapi/linux/mmc/ioctl.h b/include/uapi/linux/mmc/ioctl.h
index 45f369dc0a42..83a8c10fd104 100644
--- a/include/uapi/linux/mmc/ioctl.h
+++ b/include/uapi/linux/mmc/ioctl.h
@@ -3,6 +3,7 @@
#define LINUX_MMC_IOCTL_H
#include <linux/types.h>
+#include <linux/major.h>
struct mmc_ioc_cmd {
/* Implies direction of data. true = write, false = read */
diff --git a/include/uapi/linux/mroute6.h b/include/uapi/linux/mroute6.h
index 9999cc006390..1617eb9949a5 100644
--- a/include/uapi/linux/mroute6.h
+++ b/include/uapi/linux/mroute6.h
@@ -2,7 +2,7 @@
#ifndef _UAPI__LINUX_MROUTE6_H
#define _UAPI__LINUX_MROUTE6_H
-#include <linux/kernel.h>
+#include <linux/const.h>
#include <linux/types.h>
#include <linux/sockios.h>
#include <linux/in6.h> /* For struct sockaddr_in6. */
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
index 2f2c43d633c5..7b0189d6dfa9 100644
--- a/include/uapi/linux/ndctl.h
+++ b/include/uapi/linux/ndctl.h
@@ -247,6 +247,7 @@ struct nd_cmd_pkg {
#define NVDIMM_FAMILY_HPE1 1
#define NVDIMM_FAMILY_HPE2 2
#define NVDIMM_FAMILY_MSFT 3
+#define NVDIMM_FAMILY_HYPERV 4
#define ND_IOCTL_CALL _IOWR(ND_IOCTL, ND_CMD_CALL,\
struct nd_cmd_pkg)
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 5eac62e1b68d..cc00be102b9f 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -132,7 +132,7 @@ enum nf_tables_msg_types {
* @NFTA_LIST_ELEM: list element (NLA_NESTED)
*/
enum nft_list_attributes {
- NFTA_LIST_UNPEC,
+ NFTA_LIST_UNSPEC,
NFTA_LIST_ELEM,
__NFTA_LIST_MAX
};
diff --git a/include/uapi/linux/netfilter/nfnetlink_cthelper.h b/include/uapi/linux/netfilter/nfnetlink_cthelper.h
index a13137afc429..70af02092d16 100644
--- a/include/uapi/linux/netfilter/nfnetlink_cthelper.h
+++ b/include/uapi/linux/netfilter/nfnetlink_cthelper.h
@@ -5,7 +5,7 @@
#define NFCT_HELPER_STATUS_DISABLED 0
#define NFCT_HELPER_STATUS_ENABLED 1
-enum nfnl_acct_msg_types {
+enum nfnl_cthelper_msg_types {
NFNL_MSG_CTHELPER_NEW,
NFNL_MSG_CTHELPER_GET,
NFNL_MSG_CTHELPER_DEL,
diff --git a/include/uapi/linux/netfilter/x_tables.h b/include/uapi/linux/netfilter/x_tables.h
index a8283f7dbc51..b8c6bb233ac1 100644
--- a/include/uapi/linux/netfilter/x_tables.h
+++ b/include/uapi/linux/netfilter/x_tables.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI_X_TABLES_H
#define _UAPI_X_TABLES_H
-#include <linux/kernel.h>
+#include <linux/const.h>
#include <linux/types.h>
#define XT_FUNCTION_MAXNAMELEN 30
diff --git a/include/uapi/linux/netfilter/xt_SECMARK.h b/include/uapi/linux/netfilter/xt_SECMARK.h
index 1f2a708413f5..beb2cadba8a9 100644
--- a/include/uapi/linux/netfilter/xt_SECMARK.h
+++ b/include/uapi/linux/netfilter/xt_SECMARK.h
@@ -20,4 +20,10 @@ struct xt_secmark_target_info {
char secctx[SECMARK_SECCTX_MAX];
};
+struct xt_secmark_target_info_v1 {
+ __u8 mode;
+ char secctx[SECMARK_SECCTX_MAX];
+ __u32 secid;
+};
+
#endif /*_XT_SECMARK_H_target */
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index 776bc92e9118..3481cde43a84 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -2,7 +2,7 @@
#ifndef _UAPI__LINUX_NETLINK_H
#define _UAPI__LINUX_NETLINK_H
-#include <linux/kernel.h>
+#include <linux/const.h>
#include <linux/socket.h> /* for __kernel_sa_family_t */
#include <linux/types.h>
diff --git a/include/uapi/linux/nfs4.h b/include/uapi/linux/nfs4.h
index 8572930cf5b0..54a78529c8b3 100644
--- a/include/uapi/linux/nfs4.h
+++ b/include/uapi/linux/nfs4.h
@@ -136,6 +136,8 @@
#define EXCHGID4_FLAG_UPD_CONFIRMED_REC_A 0x40000000
#define EXCHGID4_FLAG_CONFIRMED_R 0x80000000
+
+#define EXCHGID4_FLAG_SUPP_FENCE_OPS 0x00000004
/*
* Since the validity of these bits depends on whether
* they're set in the argument or response, have separate
@@ -143,6 +145,7 @@
*/
#define EXCHGID4_FLAG_MASK_A 0x40070103
#define EXCHGID4_FLAG_MASK_R 0x80070103
+#define EXCHGID4_2_FLAG_MASK_R 0x80070107
#define SEQ4_STATUS_CB_PATH_DOWN 0x00000001
#define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRING 0x00000002
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index f35eb72739c0..5fb4cdf37100 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -1079,7 +1079,7 @@ union perf_mem_data_src {
#define PERF_MEM_SNOOPX_FWD 0x01 /* forward */
/* 1 free */
-#define PERF_MEM_SNOOPX_SHIFT 37
+#define PERF_MEM_SNOOPX_SHIFT 38
/* locked instruction */
#define PERF_MEM_LOCK_NA 0x01 /* not available */
diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h
index b0d15c73f6d7..1f2d8c81f0e0 100644
--- a/include/uapi/linux/raid/md_p.h
+++ b/include/uapi/linux/raid/md_p.h
@@ -329,6 +329,7 @@ struct mdp_superblock_1 {
#define MD_FEATURE_JOURNAL 512 /* support write cache */
#define MD_FEATURE_PPL 1024 /* support PPL */
#define MD_FEATURE_MULTIPLE_PPLS 2048 /* support for multiple PPLs */
+#define MD_FEATURE_RAID0_LAYOUT 4096 /* layout is meaningful for RAID0 */
#define MD_FEATURE_ALL (MD_FEATURE_BITMAP_OFFSET \
|MD_FEATURE_RECOVERY_OFFSET \
|MD_FEATURE_RESHAPE_ACTIVE \
@@ -341,6 +342,7 @@ struct mdp_superblock_1 {
|MD_FEATURE_JOURNAL \
|MD_FEATURE_PPL \
|MD_FEATURE_MULTIPLE_PPLS \
+ |MD_FEATURE_RAID0_LAYOUT \
)
struct r5l_payload_header {
diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
index 23cd84868cc3..7272f85d6d6a 100644
--- a/include/uapi/linux/swab.h
+++ b/include/uapi/linux/swab.h
@@ -4,6 +4,7 @@
#include <linux/types.h>
#include <linux/compiler.h>
+#include <asm/bitsperlong.h>
#include <asm/swab.h>
/*
@@ -132,6 +133,15 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val)
__fswab64(x))
#endif
+static __always_inline unsigned long __swab(const unsigned long y)
+{
+#if __BITS_PER_LONG == 64
+ return __swab64(y);
+#else /* __BITS_PER_LONG == 32 */
+ return __swab32(y);
+#endif
+}
+
/**
* __swahw32 - return a word-swapped 32-bit value
* @x: value to wordswap
diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
index d71013fffaf6..d3393a6571ba 100644
--- a/include/uapi/linux/sysctl.h
+++ b/include/uapi/linux/sysctl.h
@@ -23,7 +23,7 @@
#ifndef _UAPI_LINUX_SYSCTL_H
#define _UAPI_LINUX_SYSCTL_H
-#include <linux/kernel.h>
+#include <linux/const.h>
#include <linux/types.h>
#include <linux/compiler.h>
diff --git a/include/uapi/linux/tty_flags.h b/include/uapi/linux/tty_flags.h
index 900a32e63424..6a3ac496a56c 100644
--- a/include/uapi/linux/tty_flags.h
+++ b/include/uapi/linux/tty_flags.h
@@ -39,7 +39,7 @@
* WARNING: These flags are no longer used and have been superceded by the
* TTY_PORT_ flags in the iflags field (and not userspace-visible)
*/
-#ifndef _KERNEL_
+#ifndef __KERNEL__
#define ASYNCB_INITIALIZED 31 /* Serial port was initialized */
#define ASYNCB_SUSPENDED 30 /* Serial port is suspended */
#define ASYNCB_NORMAL_ACTIVE 29 /* Normal device is active */
@@ -81,7 +81,7 @@
#define ASYNC_SPD_WARP (ASYNC_SPD_HI|ASYNC_SPD_SHI)
#define ASYNC_SPD_MASK (ASYNC_SPD_HI|ASYNC_SPD_VHI|ASYNC_SPD_SHI)
-#ifndef _KERNEL_
+#ifndef __KERNEL__
/* These flags are no longer used (and were always masked from userspace) */
#define ASYNC_INITIALIZED (1U << ASYNCB_INITIALIZED)
#define ASYNC_NORMAL_ACTIVE (1U << ASYNCB_NORMAL_ACTIVE)
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index d5a5caec8fbc..3f376688bd04 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -364,6 +364,9 @@ struct usb_config_descriptor {
/*-------------------------------------------------------------------------*/
+/* USB String descriptors can contain at most 126 characters. */
+#define USB_MAX_STRING_LEN 126
+
/* USB_DT_STRING: String descriptor */
struct usb_string_descriptor {
__u8 bLength;
diff --git a/include/uapi/linux/vboxguest.h b/include/uapi/linux/vboxguest.h
index 612f0c7d3558..ded39da305d0 100644
--- a/include/uapi/linux/vboxguest.h
+++ b/include/uapi/linux/vboxguest.h
@@ -103,7 +103,7 @@ VMMDEV_ASSERT_SIZE(vbg_ioctl_driver_version_info, 24 + 20);
/* IOCTL to perform a VMM Device request larger then 1KB. */
-#define VBG_IOCTL_VMMDEV_REQUEST_BIG _IOC(_IOC_READ | _IOC_WRITE, 'V', 3, 0)
+#define VBG_IOCTL_VMMDEV_REQUEST_BIG _IO('V', 3)
/** VBG_IOCTL_HGCM_CONNECT data structure. */
@@ -198,7 +198,7 @@ struct vbg_ioctl_log {
} u;
};
-#define VBG_IOCTL_LOG(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 9, s)
+#define VBG_IOCTL_LOG(s) _IO('V', 9)
/** VBG_IOCTL_WAIT_FOR_EVENTS data structure. */
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 1aae2e4b8f10..b73f4423bc09 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -362,9 +362,9 @@ enum v4l2_hsv_encoding {
enum v4l2_quantization {
/*
- * The default for R'G'B' quantization is always full range, except
- * for the BT2020 colorspace. For Y'CbCr the quantization is always
- * limited range, except for COLORSPACE_JPEG: this is full range.
+ * The default for R'G'B' quantization is always full range.
+ * For Y'CbCr the quantization is always limited range, except
+ * for COLORSPACE_JPEG: this is full range.
*/
V4L2_QUANTIZATION_DEFAULT = 0,
V4L2_QUANTIZATION_FULL_RANGE = 1,
@@ -373,14 +373,13 @@ enum v4l2_quantization {
/*
* Determine how QUANTIZATION_DEFAULT should map to a proper quantization.
- * This depends on whether the image is RGB or not, the colorspace and the
- * Y'CbCr encoding.
+ * This depends on whether the image is RGB or not, the colorspace.
+ * The Y'CbCr encoding is not used anymore, but is still there for backwards
+ * compatibility.
*/
#define V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb_or_hsv, colsp, ycbcr_enc) \
- (((is_rgb_or_hsv) && (colsp) == V4L2_COLORSPACE_BT2020) ? \
- V4L2_QUANTIZATION_LIM_RANGE : \
- (((is_rgb_or_hsv) || (colsp) == V4L2_COLORSPACE_JPEG) ? \
- V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE))
+ (((is_rgb_or_hsv) || (colsp) == V4L2_COLORSPACE_JPEG) ? \
+ V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE)
/*
* Deprecated names for opRGB colorspace (IEC 61966-2-5)
diff --git a/include/uapi/linux/wireless.h b/include/uapi/linux/wireless.h
index 86eca3208b6b..24f3371ad826 100644
--- a/include/uapi/linux/wireless.h
+++ b/include/uapi/linux/wireless.h
@@ -74,6 +74,12 @@
#include <linux/socket.h> /* for "struct sockaddr" et al */
#include <linux/if.h> /* for IFNAMSIZ and co... */
+#ifdef __KERNEL__
+# include <linux/stddef.h> /* for offsetof */
+#else
+# include <stddef.h> /* for offsetof */
+#endif
+
/***************************** VERSION *****************************/
/*
* This constant is used to know the availability of the wireless
@@ -1090,8 +1096,7 @@ struct iw_event {
/* iw_point events are special. First, the payload (extra data) come at
* the end of the event, so they are bigger than IW_EV_POINT_LEN. Second,
* we omit the pointer, so start at an offset. */
-#define IW_EV_POINT_OFF (((char *) &(((struct iw_point *) NULL)->length)) - \
- (char *) NULL)
+#define IW_EV_POINT_OFF offsetof(struct iw_point, length)
#define IW_EV_POINT_LEN (IW_EV_LCP_LEN + sizeof(struct iw_point) - \
IW_EV_POINT_OFF)
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index 5f3b9fec7b5f..ff7cfdc6cb44 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -304,7 +304,7 @@ enum xfrm_attr_type_t {
XFRMA_PROTO, /* __u8 */
XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */
XFRMA_PAD,
- XFRMA_OFFLOAD_DEV, /* struct xfrm_state_offload */
+ XFRMA_OFFLOAD_DEV, /* struct xfrm_user_offload */
XFRMA_SET_MARK, /* __u32 */
XFRMA_SET_MARK_MASK, /* __u32 */
XFRMA_IF_ID, /* __u32 */
diff --git a/include/xen/events.h b/include/xen/events.h
index 1650d39decae..d8255ed2052c 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -14,11 +14,16 @@
unsigned xen_evtchn_nr_channels(void);
-int bind_evtchn_to_irq(unsigned int evtchn);
-int bind_evtchn_to_irqhandler(unsigned int evtchn,
+int bind_evtchn_to_irq(evtchn_port_t evtchn);
+int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn);
+int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
irq_handler_t handler,
unsigned long irqflags, const char *devname,
void *dev_id);
+int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn,
+ irq_handler_t handler,
+ unsigned long irqflags, const char *devname,
+ void *dev_id);
int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu);
int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
irq_handler_t handler,
@@ -31,13 +36,21 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
const char *devname,
void *dev_id);
int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
- unsigned int remote_port);
+ evtchn_port_t remote_port);
+int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain,
+ evtchn_port_t remote_port);
int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
- unsigned int remote_port,
+ evtchn_port_t remote_port,
irq_handler_t handler,
unsigned long irqflags,
const char *devname,
void *dev_id);
+int bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain,
+ evtchn_port_t remote_port,
+ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname,
+ void *dev_id);
/*
* Common unbind function for all event sources. Takes IRQ to unbind from.
@@ -46,6 +59,14 @@ int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
*/
void unbind_from_irqhandler(unsigned int irq, void *dev_id);
+/*
+ * Send late EOI for an IRQ bound to an event channel via one of the *_lateeoi
+ * functions above.
+ */
+void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags);
+/* Signal an event was spurious, i.e. there was no action resulting from it. */
+#define XEN_EOI_FLAG_SPURIOUS 0x00000001
+
#define XEN_IRQ_PRIORITY_MAX EVTCHN_FIFO_PRIORITY_MAX
#define XEN_IRQ_PRIORITY_DEFAULT EVTCHN_FIFO_PRIORITY_DEFAULT
#define XEN_IRQ_PRIORITY_MIN EVTCHN_FIFO_PRIORITY_MIN
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 9bc5bc07d4d3..a9978350b45b 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -157,6 +157,7 @@ gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
map->flags = flags;
map->ref = ref;
map->dom = domid;
+ map->status = 1; /* arbitrary positive value */
}
static inline void
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 869c816d5f8c..14d47ed4114f 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -59,6 +59,15 @@ struct xenbus_watch
/* Path being watched. */
const char *node;
+ unsigned int nr_pending;
+
+ /*
+ * Called just before enqueing new event while a spinlock is held.
+ * The event will be discarded if this callback returns false.
+ */
+ bool (*will_handle)(struct xenbus_watch *,
+ const char *path, const char *token);
+
/* Callback (executed in a process context with no locks held). */
void (*callback)(struct xenbus_watch *,
const char *path, const char *token);
@@ -178,8 +187,6 @@ void xs_suspend_cancel(void);
struct work_struct;
-void xenbus_probe(struct work_struct *);
-
#define XENBUS_IS_ERR_READ(str) ({ \
if (!IS_ERR(str) && strlen(str) == 0) { \
kfree(str); \
@@ -192,10 +199,14 @@ void xenbus_probe(struct work_struct *);
int xenbus_watch_path(struct xenbus_device *dev, const char *path,
struct xenbus_watch *watch,
+ bool (*will_handle)(struct xenbus_watch *,
+ const char *, const char *),
void (*callback)(struct xenbus_watch *,
const char *, const char *));
-__printf(4, 5)
+__printf(5, 6)
int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch,
+ bool (*will_handle)(struct xenbus_watch *,
+ const char *, const char *),
void (*callback)(struct xenbus_watch *,
const char *, const char *),
const char *pathfmt, ...);
diff --git a/init/Kconfig b/init/Kconfig
index 47035b5a46f6..b56a125b5a76 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -66,8 +66,7 @@ config INIT_ENV_ARG_LIMIT
config COMPILE_TEST
bool "Compile also drivers which will not load"
- depends on !UML
- default n
+ depends on HAS_IOMEM
help
Some drivers can be compiled on a different platform than they are
intended to be run on. Despite they cannot be loaded there (or even
@@ -535,7 +534,8 @@ config IKCONFIG_PROC
config LOG_BUF_SHIFT
int "Kernel log buffer size (16 => 64KB, 17 => 128KB)"
- range 12 25
+ range 12 25 if !H8300
+ range 12 19 if H8300
default 17
depends on PRINTK
help
diff --git a/init/init_task.c b/init/init_task.c
index 5aebe3be4d7c..994ffe018120 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -168,7 +168,8 @@ struct task_struct init_task
.lockdep_recursion = 0,
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- .ret_stack = NULL,
+ .ret_stack = NULL,
+ .tracing_graph_pause = ATOMIC_INIT(0),
#endif
#if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPT)
.trace_recursion = 0,
diff --git a/init/main.c b/init/main.c
index 38a603f62b7b..fdfef08da0c4 100644
--- a/init/main.c
+++ b/init/main.c
@@ -641,7 +641,6 @@ asmlinkage __visible void __init start_kernel(void)
softirq_init();
timekeeping_init();
time_init();
- printk_safe_init();
perf_event_init();
profile_init();
call_function_init();
@@ -735,6 +734,8 @@ asmlinkage __visible void __init start_kernel(void)
/* Do the rest non-__init'ed, we're now alive */
rest_init();
+
+ prevent_tail_call_optimization();
}
/* Call all constructor functions linked into the kernel. */
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index de4070d5472f..46d0265423f5 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -76,6 +76,7 @@ struct mqueue_inode_info {
struct sigevent notify;
struct pid *notify_owner;
+ u32 notify_self_exec_id;
struct user_namespace *notify_user_ns;
struct user_struct *user; /* user who created, for accounting */
struct sock *notify_sock;
@@ -662,28 +663,44 @@ static void __do_notify(struct mqueue_inode_info *info)
* synchronously. */
if (info->notify_owner &&
info->attr.mq_curmsgs == 1) {
- struct siginfo sig_i;
switch (info->notify.sigev_notify) {
case SIGEV_NONE:
break;
- case SIGEV_SIGNAL:
- /* sends signal */
+ case SIGEV_SIGNAL: {
+ struct siginfo sig_i;
+ struct task_struct *task;
+
+ /* do_mq_notify() accepts sigev_signo == 0, why?? */
+ if (!info->notify.sigev_signo)
+ break;
clear_siginfo(&sig_i);
sig_i.si_signo = info->notify.sigev_signo;
sig_i.si_errno = 0;
sig_i.si_code = SI_MESGQ;
sig_i.si_value = info->notify.sigev_value;
- /* map current pid/uid into info->owner's namespaces */
rcu_read_lock();
+ /* map current pid/uid into info->owner's namespaces */
sig_i.si_pid = task_tgid_nr_ns(current,
ns_of_pid(info->notify_owner));
- sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid());
+ sig_i.si_uid = from_kuid_munged(info->notify_user_ns,
+ current_uid());
+ /*
+ * We can't use kill_pid_info(), this signal should
+ * bypass check_kill_permission(). It is from kernel
+ * but si_fromuser() can't know this.
+ * We do check the self_exec_id, to avoid sending
+ * signals to programs that don't expect them.
+ */
+ task = pid_task(info->notify_owner, PIDTYPE_TGID);
+ if (task && task->self_exec_id ==
+ info->notify_self_exec_id) {
+ do_send_sig_info(info->notify.sigev_signo,
+ &sig_i, task, PIDTYPE_TGID);
+ }
rcu_read_unlock();
-
- kill_pid_info(info->notify.sigev_signo,
- &sig_i, info->notify_owner);
break;
+ }
case SIGEV_THREAD:
set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
netlink_sendskb(info->notify_sock, info->notify_cookie);
@@ -1273,6 +1290,7 @@ retry:
info->notify.sigev_signo = notification->sigev_signo;
info->notify.sigev_value = notification->sigev_value;
info->notify.sigev_notify = SIGEV_SIGNAL;
+ info->notify_self_exec_id = current->self_exec_id;
break;
}
diff --git a/ipc/util.c b/ipc/util.c
index 0af05752969f..af1b572effb1 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -735,21 +735,21 @@ static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos,
total++;
}
+ ipc = NULL;
if (total >= ids->in_use)
- return NULL;
+ goto out;
for (; pos < IPCMNI; pos++) {
ipc = idr_find(&ids->ipcs_idr, pos);
if (ipc != NULL) {
- *new_pos = pos + 1;
rcu_read_lock();
ipc_lock_object(ipc);
- return ipc;
+ break;
}
}
-
- /* Out of range - return NULL to terminate iteration */
- return NULL;
+out:
+ *new_pos = pos + 1;
+ return ipc;
}
static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos)
diff --git a/kernel/Makefile b/kernel/Makefile
index ad4b324d8906..01bc1d39b400 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -92,7 +92,6 @@ obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o
obj-$(CONFIG_TRACEPOINTS) += tracepoint.o
obj-$(CONFIG_LATENCYTOP) += latencytop.o
-obj-$(CONFIG_ELFCORE) += elfcore.o
obj-$(CONFIG_FUNCTION_TRACER) += trace/
obj-$(CONFIG_TRACING) += trace/
obj-$(CONFIG_TRACE_CLOCK) += trace/
diff --git a/kernel/audit.c b/kernel/audit.c
index 1f08c38e604a..45741c3c48a4 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -893,7 +893,7 @@ main_queue:
return 0;
}
-int audit_send_list(void *_dest)
+int audit_send_list_thread(void *_dest)
{
struct audit_netlink_list *dest = _dest;
struct sk_buff *skb;
@@ -937,19 +937,30 @@ out_kfree_skb:
return NULL;
}
+static void audit_free_reply(struct audit_reply *reply)
+{
+ if (!reply)
+ return;
+
+ if (reply->skb)
+ kfree_skb(reply->skb);
+ if (reply->net)
+ put_net(reply->net);
+ kfree(reply);
+}
+
static int audit_send_reply_thread(void *arg)
{
struct audit_reply *reply = (struct audit_reply *)arg;
- struct sock *sk = audit_get_sk(reply->net);
audit_ctl_lock();
audit_ctl_unlock();
/* Ignore failure. It'll only happen if the sender goes away,
because our timeout is set to infinite. */
- netlink_unicast(sk, reply->skb, reply->portid, 0);
- put_net(reply->net);
- kfree(reply);
+ netlink_unicast(audit_get_sk(reply->net), reply->skb, reply->portid, 0);
+ reply->skb = NULL;
+ audit_free_reply(reply);
return 0;
}
@@ -963,35 +974,32 @@ static int audit_send_reply_thread(void *arg)
* @payload: payload data
* @size: payload size
*
- * Allocates an skb, builds the netlink message, and sends it to the port id.
- * No failure notifications.
+ * Allocates a skb, builds the netlink message, and sends it to the port id.
*/
static void audit_send_reply(struct sk_buff *request_skb, int seq, int type, int done,
int multi, const void *payload, int size)
{
- struct net *net = sock_net(NETLINK_CB(request_skb).sk);
- struct sk_buff *skb;
struct task_struct *tsk;
- struct audit_reply *reply = kmalloc(sizeof(struct audit_reply),
- GFP_KERNEL);
+ struct audit_reply *reply;
+ reply = kzalloc(sizeof(*reply), GFP_KERNEL);
if (!reply)
return;
- skb = audit_make_reply(seq, type, done, multi, payload, size);
- if (!skb)
- goto out;
-
- reply->net = get_net(net);
+ reply->skb = audit_make_reply(seq, type, done, multi, payload, size);
+ if (!reply->skb)
+ goto err;
+ reply->net = get_net(sock_net(NETLINK_CB(request_skb).sk));
reply->portid = NETLINK_CB(request_skb).portid;
- reply->skb = skb;
tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply");
- if (!IS_ERR(tsk))
- return;
- kfree_skb(skb);
-out:
- kfree(reply);
+ if (IS_ERR(tsk))
+ goto err;
+
+ return;
+
+err:
+ audit_free_reply(reply);
}
/*
@@ -1331,6 +1339,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
if (!audit_enabled && msg_type != AUDIT_USER_AVC)
return 0;
+ /* exit early if there isn't at least one character to print */
+ if (data_len < 2)
+ return -EINVAL;
err = audit_filter(msg_type, AUDIT_FILTER_USER);
if (err == 1) { /* match or error */
diff --git a/kernel/audit.h b/kernel/audit.h
index 214e14948370..99badd7ba56f 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -248,7 +248,7 @@ struct audit_netlink_list {
struct sk_buff_head q;
};
-int audit_send_list(void *_dest);
+int audit_send_list_thread(void *_dest);
extern int selinux_audit_rule_update(void);
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index 4f7262eba73d..50952d6d8120 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -317,8 +317,6 @@ static void audit_update_watch(struct audit_parent *parent,
if (oentry->rule.exe)
audit_remove_mark(oentry->rule.exe);
- audit_watch_log_rule_change(r, owatch, "updated_rules");
-
call_rcu(&oentry->rcu, audit_free_rule_rcu);
}
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 1c8a48abda80..b2cc63ca0068 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -1157,11 +1157,8 @@ int audit_rule_change(int type, int seq, void *data, size_t datasz)
*/
int audit_list_rules_send(struct sk_buff *request_skb, int seq)
{
- u32 portid = NETLINK_CB(request_skb).portid;
- struct net *net = sock_net(NETLINK_CB(request_skb).sk);
struct task_struct *tsk;
struct audit_netlink_list *dest;
- int err = 0;
/* We can't just spew out the rules here because we might fill
* the available socket buffer space and deadlock waiting for
@@ -1169,25 +1166,26 @@ int audit_list_rules_send(struct sk_buff *request_skb, int seq)
* happen if we're actually running in the context of auditctl
* trying to _send_ the stuff */
- dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL);
+ dest = kmalloc(sizeof(*dest), GFP_KERNEL);
if (!dest)
return -ENOMEM;
- dest->net = get_net(net);
- dest->portid = portid;
+ dest->net = get_net(sock_net(NETLINK_CB(request_skb).sk));
+ dest->portid = NETLINK_CB(request_skb).portid;
skb_queue_head_init(&dest->q);
mutex_lock(&audit_filter_mutex);
audit_list_rules(seq, &dest->q);
mutex_unlock(&audit_filter_mutex);
- tsk = kthread_run(audit_send_list, dest, "audit_send_list");
+ tsk = kthread_run(audit_send_list_thread, dest, "audit_send_list");
if (IS_ERR(tsk)) {
skb_queue_purge(&dest->q);
+ put_net(dest->net);
kfree(dest);
- err = PTR_ERR(tsk);
+ return PTR_ERR(tsk);
}
- return err;
+ return 0;
}
int audit_comparator(u32 left, u32 op, u32 right)
diff --git a/kernel/bpf/bpf_lru_list.c b/kernel/bpf/bpf_lru_list.c
index e6ef4401a138..9b5eeff72fd3 100644
--- a/kernel/bpf/bpf_lru_list.c
+++ b/kernel/bpf/bpf_lru_list.c
@@ -505,13 +505,14 @@ struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash)
static void bpf_common_lru_push_free(struct bpf_lru *lru,
struct bpf_lru_node *node)
{
+ u8 node_type = READ_ONCE(node->type);
unsigned long flags;
- if (WARN_ON_ONCE(node->type == BPF_LRU_LIST_T_FREE) ||
- WARN_ON_ONCE(node->type == BPF_LRU_LOCAL_LIST_T_FREE))
+ if (WARN_ON_ONCE(node_type == BPF_LRU_LIST_T_FREE) ||
+ WARN_ON_ONCE(node_type == BPF_LRU_LOCAL_LIST_T_FREE))
return;
- if (node->type == BPF_LRU_LOCAL_LIST_T_PENDING) {
+ if (node_type == BPF_LRU_LOCAL_LIST_T_PENDING) {
struct bpf_lru_locallist *loc_l;
loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu);
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index 3c18260403dd..61fbcae82f0a 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -455,7 +455,7 @@ static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
return -EOVERFLOW;
/* Make sure CPU is a valid possible cpu */
- if (!cpu_possible(key_cpu))
+ if (key_cpu >= nr_cpumask_bits || !cpu_possible(key_cpu))
return -ENODEV;
if (qsize == 0) {
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 6fe72792312d..3f3ed33bd2fd 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -667,26 +667,23 @@ static void htab_elem_free_rcu(struct rcu_head *head)
struct htab_elem *l = container_of(head, struct htab_elem, rcu);
struct bpf_htab *htab = l->htab;
- /* must increment bpf_prog_active to avoid kprobe+bpf triggering while
- * we're calling kfree, otherwise deadlock is possible if kprobes
- * are placed somewhere inside of slub
- */
- preempt_disable();
- __this_cpu_inc(bpf_prog_active);
htab_elem_free(htab, l);
- __this_cpu_dec(bpf_prog_active);
- preempt_enable();
}
-static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
+static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
{
struct bpf_map *map = &htab->map;
+ void *ptr;
if (map->ops->map_fd_put_ptr) {
- void *ptr = fd_htab_map_get_ptr(map, l);
-
+ ptr = fd_htab_map_get_ptr(map, l);
map->ops->map_fd_put_ptr(ptr);
}
+}
+
+static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
+{
+ htab_put_fd_value(htab, l);
if (htab_is_prealloc(htab)) {
__pcpu_freelist_push(&htab->freelist, &l->fnode);
@@ -747,6 +744,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
*/
pl_new = this_cpu_ptr(htab->extra_elems);
l_new = *pl_new;
+ htab_put_fd_value(htab, old_elem);
*pl_new = old_elem;
} else {
struct pcpu_freelist_node *l;
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index c04815bb15cc..11fade89c1f3 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -207,10 +207,12 @@ static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos)
else
prev_key = key;
+ rcu_read_lock();
if (map->ops->map_get_next_key(map, prev_key, key)) {
map_iter(m)->done = true;
- return NULL;
+ key = NULL;
}
+ rcu_read_unlock();
return key;
}
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 55fff5e6d983..a47d623f59fe 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -114,6 +114,8 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
/* hash table size must be power of 2 */
n_buckets = roundup_pow_of_two(attr->max_entries);
+ if (!n_buckets)
+ return ERR_PTR(-E2BIG);
cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
if (cost >= U32_MAX - PAGE_SIZE)
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index b766265cf37d..21a366a661ac 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1903,7 +1903,8 @@ static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
return NULL;
}
-static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
+static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
+ const struct cred *f_cred)
{
const struct bpf_map *map;
struct bpf_insn *insns;
@@ -1925,7 +1926,7 @@ static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) {
if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS))
insns[i].code = BPF_JMP | BPF_CALL;
- if (!bpf_dump_raw_ok())
+ if (!bpf_dump_raw_ok(f_cred))
insns[i].imm = 0;
continue;
}
@@ -1942,7 +1943,7 @@ static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
continue;
}
- if (!bpf_dump_raw_ok() &&
+ if (!bpf_dump_raw_ok(f_cred) &&
imm == (unsigned long)prog->aux) {
insns[i].imm = 0;
insns[i + 1].imm = 0;
@@ -1953,7 +1954,8 @@ static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
return insns;
}
-static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
+static int bpf_prog_get_info_by_fd(struct file *file,
+ struct bpf_prog *prog,
const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
@@ -2010,11 +2012,11 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
struct bpf_insn *insns_sanitized;
bool fault;
- if (prog->blinded && !bpf_dump_raw_ok()) {
+ if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
info.xlated_prog_insns = 0;
goto done;
}
- insns_sanitized = bpf_insn_prepare_dump(prog);
+ insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
if (!insns_sanitized)
return -ENOMEM;
uinsns = u64_to_user_ptr(info.xlated_prog_insns);
@@ -2048,7 +2050,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
}
if (info.jited_prog_len && ulen) {
- if (bpf_dump_raw_ok()) {
+ if (bpf_dump_raw_ok(file->f_cred)) {
uinsns = u64_to_user_ptr(info.jited_prog_insns);
ulen = min_t(u32, info.jited_prog_len, ulen);
@@ -2083,7 +2085,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
ulen = info.nr_jited_ksyms;
info.nr_jited_ksyms = prog->aux->func_cnt;
if (info.nr_jited_ksyms && ulen) {
- if (bpf_dump_raw_ok()) {
+ if (bpf_dump_raw_ok(file->f_cred)) {
u64 __user *user_ksyms;
ulong ksym_addr;
u32 i;
@@ -2107,7 +2109,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
ulen = info.nr_jited_func_lens;
info.nr_jited_func_lens = prog->aux->func_cnt;
if (info.nr_jited_func_lens && ulen) {
- if (bpf_dump_raw_ok()) {
+ if (bpf_dump_raw_ok(file->f_cred)) {
u32 __user *user_lens;
u32 func_len, i;
@@ -2132,7 +2134,8 @@ done:
return 0;
}
-static int bpf_map_get_info_by_fd(struct bpf_map *map,
+static int bpf_map_get_info_by_fd(struct file *file,
+ struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
@@ -2174,7 +2177,8 @@ static int bpf_map_get_info_by_fd(struct bpf_map *map,
return 0;
}
-static int bpf_btf_get_info_by_fd(struct btf *btf,
+static int bpf_btf_get_info_by_fd(struct file *file,
+ struct btf *btf,
const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
@@ -2206,13 +2210,13 @@ static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
return -EBADFD;
if (f.file->f_op == &bpf_prog_fops)
- err = bpf_prog_get_info_by_fd(f.file->private_data, attr,
+ err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr,
uattr);
else if (f.file->f_op == &bpf_map_fops)
- err = bpf_map_get_info_by_fd(f.file->private_data, attr,
+ err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr,
uattr);
else if (f.file->f_op == &btf_fops)
- err = bpf_btf_get_info_by_fd(f.file->private_data, attr, uattr);
+ err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
else
err = -EINVAL;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index e85636fb81b9..1f4c88ce58de 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -188,8 +188,7 @@ struct bpf_call_arg_meta {
bool pkt_access;
int regno;
int access_size;
- s64 msize_smax_value;
- u64 msize_umax_value;
+ u64 msize_max_value;
};
static DEFINE_MUTEX(bpf_verifier_lock);
@@ -2076,8 +2075,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
/* remember the mem_size which may be used later
* to refine return values.
*/
- meta->msize_smax_value = reg->smax_value;
- meta->msize_umax_value = reg->umax_value;
+ meta->msize_max_value = reg->umax_value;
/* The register is SCALAR_VALUE; the access check
* happens using its boundaries.
@@ -2448,21 +2446,44 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
return 0;
}
-static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
- int func_id,
- struct bpf_call_arg_meta *meta)
+static int do_refine_retval_range(struct bpf_verifier_env *env,
+ struct bpf_reg_state *regs, int ret_type,
+ int func_id, struct bpf_call_arg_meta *meta)
{
struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
+ struct bpf_reg_state tmp_reg = *ret_reg;
+ bool ret;
if (ret_type != RET_INTEGER ||
(func_id != BPF_FUNC_get_stack &&
func_id != BPF_FUNC_probe_read_str))
- return;
+ return 0;
+
+ /* Error case where ret is in interval [S32MIN, -1]. */
+ ret_reg->smin_value = S32_MIN;
+ ret_reg->smax_value = -1;
+
+ __reg_deduce_bounds(ret_reg);
+ __reg_bound_offset(ret_reg);
+ __update_reg_bounds(ret_reg);
+
+ ret = push_stack(env, env->insn_idx + 1, env->insn_idx, false);
+ if (!ret)
+ return -EFAULT;
+
+ *ret_reg = tmp_reg;
+
+ /* Success case where ret is in range [0, msize_max_value]. */
+ ret_reg->smin_value = 0;
+ ret_reg->smax_value = meta->msize_max_value;
+ ret_reg->umin_value = ret_reg->smin_value;
+ ret_reg->umax_value = ret_reg->smax_value;
- ret_reg->smax_value = meta->msize_smax_value;
- ret_reg->umax_value = meta->msize_umax_value;
__reg_deduce_bounds(ret_reg);
__reg_bound_offset(ret_reg);
+ __update_reg_bounds(ret_reg);
+
+ return 0;
}
static int
@@ -2617,7 +2638,9 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
return -EINVAL;
}
- do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
+ err = do_refine_retval_range(env, regs, fn->ret_type, func_id, &meta);
+ if (err)
+ return err;
err = check_map_func_compatibility(env, meta.map_ptr, func_id);
if (err)
@@ -2706,32 +2729,43 @@ static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
return &env->insn_aux_data[env->insn_idx];
}
+enum {
+ REASON_BOUNDS = -1,
+ REASON_TYPE = -2,
+ REASON_PATHS = -3,
+ REASON_LIMIT = -4,
+ REASON_STACK = -5,
+};
+
static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
- u32 *ptr_limit, u8 opcode, bool off_is_neg)
+ u32 *alu_limit, bool mask_to_left)
{
- bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
- (opcode == BPF_SUB && !off_is_neg);
- u32 off;
+ u32 max = 0, ptr_limit = 0;
switch (ptr_reg->type) {
case PTR_TO_STACK:
- off = ptr_reg->off + ptr_reg->var_off.value;
- if (mask_to_left)
- *ptr_limit = MAX_BPF_STACK + off;
- else
- *ptr_limit = -off;
- return 0;
+ /* Offset 0 is out-of-bounds, but acceptable start for the
+ * left direction, see BPF_REG_FP. Also, unknown scalar
+ * offset where we would need to deal with min/max bounds is
+ * currently prohibited for unprivileged.
+ */
+ max = MAX_BPF_STACK + mask_to_left;
+ ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off);
+ break;
case PTR_TO_MAP_VALUE:
- if (mask_to_left) {
- *ptr_limit = ptr_reg->umax_value + ptr_reg->off;
- } else {
- off = ptr_reg->smin_value + ptr_reg->off;
- *ptr_limit = ptr_reg->map_ptr->value_size - off;
- }
- return 0;
+ max = ptr_reg->map_ptr->value_size;
+ ptr_limit = (mask_to_left ?
+ ptr_reg->smin_value :
+ ptr_reg->umax_value) + ptr_reg->off;
+ break;
default:
- return -EINVAL;
+ return REASON_TYPE;
}
+
+ if (ptr_limit >= max)
+ return REASON_LIMIT;
+ *alu_limit = ptr_limit;
+ return 0;
}
static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
@@ -2749,7 +2783,7 @@ static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
if (aux->alu_state &&
(aux->alu_state != alu_state ||
aux->alu_limit != alu_limit))
- return -EACCES;
+ return REASON_PATHS;
/* Corresponding fixup done in fixup_bpf_calls(). */
aux->alu_state = alu_state;
@@ -2768,19 +2802,34 @@ static int sanitize_val_alu(struct bpf_verifier_env *env,
return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
}
+static bool sanitize_needed(u8 opcode)
+{
+ return opcode == BPF_ADD || opcode == BPF_SUB;
+}
+
+struct bpf_sanitize_info {
+ struct bpf_insn_aux_data aux;
+ bool mask_to_left;
+};
+
static int sanitize_ptr_alu(struct bpf_verifier_env *env,
struct bpf_insn *insn,
const struct bpf_reg_state *ptr_reg,
+ const struct bpf_reg_state *off_reg,
struct bpf_reg_state *dst_reg,
- bool off_is_neg)
+ struct bpf_sanitize_info *info,
+ const bool commit_window)
{
+ struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux;
struct bpf_verifier_state *vstate = env->cur_state;
- struct bpf_insn_aux_data *aux = cur_aux(env);
+ bool off_is_imm = tnum_is_const(off_reg->var_off);
+ bool off_is_neg = off_reg->smin_value < 0;
bool ptr_is_dst_reg = ptr_reg == dst_reg;
u8 opcode = BPF_OP(insn->code);
u32 alu_state, alu_limit;
struct bpf_reg_state tmp;
bool ret;
+ int err;
if (can_skip_alu_sanitation(env, insn))
return 0;
@@ -2792,15 +2841,47 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
if (vstate->speculative)
goto do_sim;
- alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
- alu_state |= ptr_is_dst_reg ?
- BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
+ if (!commit_window) {
+ if (!tnum_is_const(off_reg->var_off) &&
+ (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
+ return REASON_BOUNDS;
- if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
- return 0;
- if (update_alu_sanitation_state(aux, alu_state, alu_limit))
- return -EACCES;
+ info->mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
+ (opcode == BPF_SUB && !off_is_neg);
+ }
+
+ err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left);
+ if (err < 0)
+ return err;
+
+ if (commit_window) {
+ /* In commit phase we narrow the masking window based on
+ * the observed pointer move after the simulated operation.
+ */
+ alu_state = info->aux.alu_state;
+ alu_limit = abs(info->aux.alu_limit - alu_limit);
+ } else {
+ alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
+ alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
+ alu_state |= ptr_is_dst_reg ?
+ BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
+ }
+
+ err = update_alu_sanitation_state(aux, alu_state, alu_limit);
+ if (err < 0)
+ return err;
do_sim:
+ /* If we're in commit phase, we're done here given we already
+ * pushed the truncated dst_reg into the speculative verification
+ * stack.
+ *
+ * Also, when register is a known constant, we rewrite register-based
+ * operation to immediate-based, and thus do not need masking (and as
+ * a consequence, do not need to simulate the zero-truncation either).
+ */
+ if (commit_window || off_is_imm)
+ return 0;
+
/* Simulate and find potential out-of-bounds access under
* speculative execution from truncation as a result of
* masking when off was not within expected range. If off
@@ -2817,7 +2898,81 @@ do_sim:
ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
if (!ptr_is_dst_reg && ret)
*dst_reg = tmp;
- return !ret ? -EFAULT : 0;
+ return !ret ? REASON_STACK : 0;
+}
+
+static int sanitize_err(struct bpf_verifier_env *env,
+ const struct bpf_insn *insn, int reason,
+ const struct bpf_reg_state *off_reg,
+ const struct bpf_reg_state *dst_reg)
+{
+ static const char *err = "pointer arithmetic with it prohibited for !root";
+ const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub";
+ u32 dst = insn->dst_reg, src = insn->src_reg;
+
+ switch (reason) {
+ case REASON_BOUNDS:
+ verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n",
+ off_reg == dst_reg ? dst : src, err);
+ break;
+ case REASON_TYPE:
+ verbose(env, "R%d has pointer with unsupported alu operation, %s\n",
+ off_reg == dst_reg ? src : dst, err);
+ break;
+ case REASON_PATHS:
+ verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n",
+ dst, op, err);
+ break;
+ case REASON_LIMIT:
+ verbose(env, "R%d tried to %s beyond pointer bounds, %s\n",
+ dst, op, err);
+ break;
+ case REASON_STACK:
+ verbose(env, "R%d could not be pushed for speculative verification, %s\n",
+ dst, err);
+ break;
+ default:
+ verbose(env, "verifier internal error: unknown reason (%d)\n",
+ reason);
+ break;
+ }
+
+ return -EACCES;
+}
+
+static int sanitize_check_bounds(struct bpf_verifier_env *env,
+ const struct bpf_insn *insn,
+ const struct bpf_reg_state *dst_reg)
+{
+ u32 dst = insn->dst_reg;
+
+ /* For unprivileged we require that resulting offset must be in bounds
+ * in order to be able to sanitize access later on.
+ */
+ if (env->allow_ptr_leaks)
+ return 0;
+
+ switch (dst_reg->type) {
+ case PTR_TO_STACK:
+ if (check_stack_access(env, dst_reg, dst_reg->off +
+ dst_reg->var_off.value, 1)) {
+ verbose(env, "R%d stack pointer arithmetic goes out of range, "
+ "prohibited for !root\n", dst);
+ return -EACCES;
+ }
+ break;
+ case PTR_TO_MAP_VALUE:
+ if (check_map_access(env, dst, dst_reg->off, 1, false)) {
+ verbose(env, "R%d pointer arithmetic of map value goes out of range, "
+ "prohibited for !root\n", dst);
+ return -EACCES;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
}
/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
@@ -2838,8 +2993,9 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
- u32 dst = insn->dst_reg, src = insn->src_reg;
+ struct bpf_sanitize_info info = {};
u8 opcode = BPF_OP(insn->code);
+ u32 dst = insn->dst_reg;
int ret;
dst_reg = &regs[dst];
@@ -2876,12 +3032,6 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
dst);
return -EACCES;
}
- if (ptr_reg->type == PTR_TO_MAP_VALUE &&
- !env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) {
- verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
- off_reg == dst_reg ? dst : src);
- return -EACCES;
- }
/* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
* The id may be overwritten later if we create a new variable offset.
@@ -2893,13 +3043,15 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
!check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
return -EINVAL;
+ if (sanitize_needed(opcode)) {
+ ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
+ &info, false);
+ if (ret < 0)
+ return sanitize_err(env, insn, ret, off_reg, dst_reg);
+ }
+
switch (opcode) {
case BPF_ADD:
- ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
- if (ret < 0) {
- verbose(env, "R%d tried to add from different maps or paths\n", dst);
- return ret;
- }
/* We can take a fixed offset as long as it doesn't overflow
* the s32 'off' field
*/
@@ -2950,11 +3102,6 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
}
break;
case BPF_SUB:
- ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
- if (ret < 0) {
- verbose(env, "R%d tried to sub from different maps or paths\n", dst);
- return ret;
- }
if (dst_reg == off_reg) {
/* scalar -= pointer. Creates an unknown scalar */
verbose(env, "R%d tried to subtract pointer from scalar\n",
@@ -3035,22 +3182,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
__reg_deduce_bounds(dst_reg);
__reg_bound_offset(dst_reg);
- /* For unprivileged we require that resulting offset must be in bounds
- * in order to be able to sanitize access later on.
- */
- if (!env->allow_ptr_leaks) {
- if (dst_reg->type == PTR_TO_MAP_VALUE &&
- check_map_access(env, dst, dst_reg->off, 1, false)) {
- verbose(env, "R%d pointer arithmetic of map value goes out of range, "
- "prohibited for !root\n", dst);
- return -EACCES;
- } else if (dst_reg->type == PTR_TO_STACK &&
- check_stack_access(env, dst_reg, dst_reg->off +
- dst_reg->var_off.value, 1)) {
- verbose(env, "R%d stack pointer arithmetic goes out of range, "
- "prohibited for !root\n", dst);
- return -EACCES;
- }
+ if (sanitize_check_bounds(env, insn, dst_reg) < 0)
+ return -EACCES;
+ if (sanitize_needed(opcode)) {
+ ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
+ &info, true);
+ if (ret < 0)
+ return sanitize_err(env, insn, ret, off_reg, dst_reg);
}
return 0;
@@ -3071,7 +3209,6 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
s64 smin_val, smax_val;
u64 umin_val, umax_val;
u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
- u32 dst = insn->dst_reg;
int ret;
if (insn_bitness == 32) {
@@ -3105,13 +3242,14 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
return 0;
}
+ if (sanitize_needed(opcode)) {
+ ret = sanitize_val_alu(env, insn);
+ if (ret < 0)
+ return sanitize_err(env, insn, ret, NULL, NULL);
+ }
+
switch (opcode) {
case BPF_ADD:
- ret = sanitize_val_alu(env, insn);
- if (ret < 0) {
- verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
- return ret;
- }
if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
signed_add_overflows(dst_reg->smax_value, smax_val)) {
dst_reg->smin_value = S64_MIN;
@@ -3131,11 +3269,6 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
break;
case BPF_SUB:
- ret = sanitize_val_alu(env, insn);
- if (ret < 0) {
- verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
- return ret;
- }
if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
signed_sub_overflows(dst_reg->smax_value, smin_val)) {
/* Overflow possible, we know nothing */
@@ -4095,8 +4228,9 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
struct bpf_verifier_state *this_branch = env->cur_state;
struct bpf_verifier_state *other_branch;
struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
- struct bpf_reg_state *dst_reg, *other_branch_regs;
+ struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
u8 opcode = BPF_OP(insn->code);
+ int pred = -1;
int err;
if (opcode > BPF_JSLE) {
@@ -4120,6 +4254,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
insn->src_reg);
return -EACCES;
}
+ src_reg = &regs[insn->src_reg];
} else {
if (insn->src_reg != BPF_REG_0) {
verbose(env, "BPF_JMP uses reserved fields\n");
@@ -4134,19 +4269,21 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
dst_reg = &regs[insn->dst_reg];
- if (BPF_SRC(insn->code) == BPF_K) {
- int pred = is_branch_taken(dst_reg, insn->imm, opcode);
-
- if (pred == 1) {
- /* only follow the goto, ignore fall-through */
- *insn_idx += insn->off;
- return 0;
- } else if (pred == 0) {
- /* only follow fall-through branch, since
- * that's where the program will go
- */
- return 0;
- }
+ if (BPF_SRC(insn->code) == BPF_K)
+ pred = is_branch_taken(dst_reg, insn->imm, opcode);
+ else if (src_reg->type == SCALAR_VALUE &&
+ tnum_is_const(src_reg->var_off))
+ pred = is_branch_taken(dst_reg, src_reg->var_off.value,
+ opcode);
+ if (pred == 1) {
+ /* only follow the goto, ignore fall-through */
+ *insn_idx += insn->off;
+ return 0;
+ } else if (pred == 0) {
+ /* only follow fall-through branch, since
+ * that's where the program will go
+ */
+ return 0;
}
other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
@@ -6047,7 +6184,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
struct bpf_insn insn_buf[16];
struct bpf_insn *patch = &insn_buf[0];
- bool issrc, isneg;
+ bool issrc, isneg, isimm;
u32 off_reg;
aux = &env->insn_aux_data[i + delta];
@@ -6058,28 +6195,29 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
BPF_ALU_SANITIZE_SRC;
+ isimm = aux->alu_state & BPF_ALU_IMMEDIATE;
off_reg = issrc ? insn->src_reg : insn->dst_reg;
- if (isneg)
- *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
- *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1);
- *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
- *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
- *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
- *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
- if (issrc) {
- *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
- off_reg);
- insn->src_reg = BPF_REG_AX;
+ if (isimm) {
+ *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
} else {
- *patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
- BPF_REG_AX);
+ if (isneg)
+ *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
+ *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
+ *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
+ *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
+ *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
+ *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
+ *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg);
}
+ if (!issrc)
+ *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg);
+ insn->src_reg = BPF_REG_AX;
if (isneg)
insn->code = insn->code == code_add ?
code_sub : code_add;
*patch++ = *insn;
- if (issrc && isneg)
+ if (issrc && isneg && !isimm)
*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
cnt = patch - insn_buf;
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 877ba6dacca8..2a879d34bbe5 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -5928,17 +5928,8 @@ void cgroup_sk_alloc_disable(void)
void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
{
- if (cgroup_sk_alloc_disabled)
- return;
-
- /* Socket clone path */
- if (skcd->val) {
- /*
- * We might be cloning a socket which is left in an empty
- * cgroup and the cgroup might have already been rmdir'd.
- * Don't use cgroup_get_live().
- */
- cgroup_get(sock_cgroup_ptr(skcd));
+ if (cgroup_sk_alloc_disabled) {
+ skcd->no_refcnt = 1;
return;
}
@@ -5962,8 +5953,26 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
rcu_read_unlock();
}
+void cgroup_sk_clone(struct sock_cgroup_data *skcd)
+{
+ /* Socket clone path */
+ if (skcd->val) {
+ if (skcd->no_refcnt)
+ return;
+ /*
+ * We might be cloning a socket which is left in an empty
+ * cgroup and the cgroup might have already been rmdir'd.
+ * Don't use cgroup_get_live().
+ */
+ cgroup_get(sock_cgroup_ptr(skcd));
+ }
+}
+
void cgroup_sk_free(struct sock_cgroup_data *skcd)
{
+ if (skcd->no_refcnt)
+ return;
+
cgroup_put(sock_cgroup_ptr(skcd));
}
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index bb95a35e8c2d..d0ed410b4127 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -32,12 +32,9 @@ void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
return;
/*
- * Paired with the one in cgroup_rstat_cpu_pop_upated(). Either we
- * see NULL updated_next or they see our updated stat.
- */
- smp_mb();
-
- /*
+ * Speculative already-on-list test. This may race leading to
+ * temporary inaccuracies, which is fine.
+ *
* Because @parent's updated_children is terminated with @parent
* instead of NULL, we can tell whether @cgrp is on the list by
* testing the next pointer for NULL.
@@ -133,13 +130,6 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
*nextp = rstatc->updated_next;
rstatc->updated_next = NULL;
- /*
- * Paired with the one in cgroup_rstat_cpu_updated().
- * Either they see NULL updated_next or we see their
- * updated stat.
- */
- smp_mb();
-
return pos;
}
diff --git a/kernel/compat.c b/kernel/compat.c
index 8e40efc2928a..e4548a9e9c52 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -354,10 +354,9 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG);
nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size);
- if (!access_ok(VERIFY_READ, umask, bitmap_size / 8))
+ if (!user_access_begin(VERIFY_READ, umask, bitmap_size / 8))
return -EFAULT;
- user_access_begin();
while (nr_compat_longs > 1) {
compat_ulong_t l1, l2;
unsafe_get_user(l1, umask++, Efault);
@@ -384,10 +383,9 @@ long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG);
nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size);
- if (!access_ok(VERIFY_WRITE, umask, bitmap_size / 8))
+ if (!user_access_begin(VERIFY_WRITE, umask, bitmap_size / 8))
return -EFAULT;
- user_access_begin();
while (nr_compat_longs > 1) {
unsigned long m = *mask++;
unsafe_put_user((compat_ulong_t)m, umask++, Efault);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 2d850eaaf82e..9a39a24f6025 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -3,6 +3,7 @@
*
* This code is licenced under the GPL.
*/
+#include <linux/sched/mm.h>
#include <linux/proc_fs.h>
#include <linux/smp.h>
#include <linux/init.h>
@@ -532,6 +533,21 @@ static int bringup_cpu(unsigned int cpu)
return bringup_wait_for_ap(cpu);
}
+static int finish_cpu(unsigned int cpu)
+{
+ struct task_struct *idle = idle_thread_get(cpu);
+ struct mm_struct *mm = idle->active_mm;
+
+ /*
+ * idle_task_exit() will have switched to &init_mm, now
+ * clean up any remaining active_mm state.
+ */
+ if (mm != &init_mm)
+ idle->active_mm = &init_mm;
+ mmdrop(mm);
+ return 0;
+}
+
/*
* Hotplug state machine related functions
*/
@@ -760,6 +776,10 @@ void __init cpuhp_threads_init(void)
}
#ifdef CONFIG_HOTPLUG_CPU
+#ifndef arch_clear_mm_cpumask_cpu
+#define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
+#endif
+
/**
* clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
* @cpu: a CPU id
@@ -795,7 +815,7 @@ void clear_tasks_mm_cpumask(int cpu)
t = find_lock_task_mm(p);
if (!t)
continue;
- cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
+ arch_clear_mm_cpumask_cpu(cpu, t->mm);
task_unlock(t);
}
rcu_read_unlock();
@@ -1379,7 +1399,7 @@ static struct cpuhp_step cpuhp_hp_states[] = {
[CPUHP_BRINGUP_CPU] = {
.name = "cpu:bringup",
.startup.single = bringup_cpu,
- .teardown.single = NULL,
+ .teardown.single = finish_cpu,
.cant_stop = true,
},
/* Final state before CPU kills itself */
@@ -2070,10 +2090,8 @@ int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
*/
cpuhp_offline_cpu_device(cpu);
}
- if (!ret) {
+ if (!ret)
cpu_smt_control = ctrlval;
- arch_smt_update();
- }
cpu_maps_update_done();
return ret;
}
@@ -2084,7 +2102,6 @@ int cpuhp_smt_enable(void)
cpu_maps_update_begin();
cpu_smt_control = CPU_SMT_ENABLED;
- arch_smt_update();
for_each_present_cpu(cpu) {
/* Skip online CPUs and CPUs on offline nodes */
if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c
index 67b02e138a47..2ed6351e2a7e 100644
--- a/kernel/cpu_pm.c
+++ b/kernel/cpu_pm.c
@@ -89,7 +89,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
*/
int cpu_pm_enter(void)
{
- int nr_calls;
+ int nr_calls = 0;
int ret = 0;
ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls);
@@ -140,7 +140,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_exit);
*/
int cpu_cluster_pm_enter(void)
{
- int nr_calls;
+ int nr_calls = 0;
int ret = 0;
ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls);
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 94aa9ae0007a..8c76141c99c8 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -95,14 +95,6 @@ int dbg_switch_cpu;
/* Use kdb or gdbserver mode */
int dbg_kdb_mode = 1;
-static int __init opt_kgdb_con(char *str)
-{
- kgdb_use_con = 1;
- return 0;
-}
-
-early_param("kgdbcon", opt_kgdb_con);
-
module_param(kgdb_use_con, int, 0644);
module_param(kgdbreboot, int, 0644);
@@ -444,6 +436,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks)
if (exception_level > 1) {
dump_stack();
+ kgdb_io_module_registered = false;
panic("Recursive entry to debugger");
}
@@ -488,6 +481,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
arch_kgdb_ops.disable_hw_break(regs);
acquirelock:
+ rcu_read_lock();
/*
* Interrupts will be restored by the 'trap return' code, except when
* single stepping.
@@ -544,6 +538,7 @@ return_normal:
atomic_dec(&slaves_in_kgdb);
dbg_touch_watchdogs();
local_irq_restore(flags);
+ rcu_read_unlock();
return 0;
}
cpu_relax();
@@ -562,6 +557,7 @@ return_normal:
raw_spin_unlock(&dbg_master_lock);
dbg_touch_watchdogs();
local_irq_restore(flags);
+ rcu_read_unlock();
goto acquirelock;
}
@@ -577,6 +573,8 @@ return_normal:
if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
goto kgdb_restore;
+ atomic_inc(&ignore_console_lock_warning);
+
/* Call the I/O driver's pre_exception routine */
if (dbg_io_ops->pre_exception)
dbg_io_ops->pre_exception();
@@ -649,6 +647,8 @@ cpu_master_loop:
if (dbg_io_ops->post_exception)
dbg_io_ops->post_exception();
+ atomic_dec(&ignore_console_lock_warning);
+
if (!kgdb_single_step) {
raw_spin_unlock(&dbg_slave_lock);
/* Wait till all the CPUs have quit from the debugger. */
@@ -681,6 +681,7 @@ kgdb_restore:
raw_spin_unlock(&dbg_master_lock);
dbg_touch_watchdogs();
local_irq_restore(flags);
+ rcu_read_unlock();
return kgdb_info[cpu].ret_state;
}
@@ -811,6 +812,20 @@ static struct console kgdbcons = {
.index = -1,
};
+static int __init opt_kgdb_con(char *str)
+{
+ kgdb_use_con = 1;
+
+ if (kgdb_io_module_registered && !kgdb_con_registered) {
+ register_console(&kgdbcons);
+ kgdb_con_registered = 1;
+ }
+
+ return 0;
+}
+
+early_param("kgdbcon", opt_kgdb_con);
+
#ifdef CONFIG_MAGIC_SYSRQ
static void sysrq_handle_dbg(int key)
{
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index 6a4b41484afe..b45576ca3b0d 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -679,12 +679,16 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
size_avail = sizeof(kdb_buffer) - len;
goto kdb_print_out;
}
- if (kdb_grepping_flag >= KDB_GREPPING_FLAG_SEARCH)
+ if (kdb_grepping_flag >= KDB_GREPPING_FLAG_SEARCH) {
/*
* This was a interactive search (using '/' at more
- * prompt) and it has completed. Clear the flag.
+ * prompt) and it has completed. Replace the \0 with
+ * its original value to ensure multi-line strings
+ * are handled properly, and return to normal mode.
*/
+ *cphold = replaced_byte;
kdb_grepping_flag = 0;
+ }
/*
* at this point the string is a full line and
* should be printed, up to the null.
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
index 2118d8258b7c..ad53b19734e9 100644
--- a/kernel/debug/kdb/kdb_private.h
+++ b/kernel/debug/kdb/kdb_private.h
@@ -233,7 +233,7 @@ extern struct task_struct *kdb_curr_task(int);
#define kdb_do_each_thread(g, p) do_each_thread(g, p)
#define kdb_while_each_thread(g, p) while_each_thread(g, p)
-#define GFP_KDB (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL)
+#define GFP_KDB (in_dbg_master() ? GFP_ATOMIC : GFP_KERNEL)
extern void *debug_kmalloc(size_t size, gfp_t flags);
extern void debug_kfree(void *);
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 2a8c41f12d45..6f7d4e977c5c 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -239,6 +239,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
}
io_tlb_index = 0;
+ no_iotlb_memory = false;
if (verbose)
swiotlb_print_info();
@@ -270,9 +271,11 @@ swiotlb_init(int verbose)
if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
return;
- if (io_tlb_start)
+ if (io_tlb_start) {
memblock_free_early(io_tlb_start,
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
+ io_tlb_start = 0;
+ }
pr_warn("Cannot allocate buffer");
no_iotlb_memory = true;
}
@@ -376,6 +379,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
}
io_tlb_index = 0;
+ no_iotlb_memory = false;
swiotlb_print_info();
diff --git a/kernel/elfcore.c b/kernel/elfcore.c
deleted file mode 100644
index 57fb4dcff434..000000000000
--- a/kernel/elfcore.c
+++ /dev/null
@@ -1,26 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/elf.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/binfmts.h>
-#include <linux/elfcore.h>
-
-Elf_Half __weak elf_core_extra_phdrs(void)
-{
- return 0;
-}
-
-int __weak elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
-{
- return 1;
-}
-
-int __weak elf_core_write_extra_data(struct coredump_params *cprm)
-{
- return 1;
-}
-
-size_t __weak elf_core_extra_data_size(void)
-{
- return 0;
-}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 8c70ee23fbe9..b8b74a4a524c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -94,11 +94,11 @@ static void remote_function(void *data)
* @info: the function call argument
*
* Calls the function @func when the task is currently running. This might
- * be on the current CPU, which just calls the function directly
+ * be on the current CPU, which just calls the function directly. This will
+ * retry due to any failures in smp_call_function_single(), such as if the
+ * task_cpu() goes offline concurrently.
*
- * returns: @func return value, or
- * -ESRCH - when the process isn't running
- * -EAGAIN - when the process moved away
+ * returns @func return value or -ESRCH or -ENXIO when the process isn't running
*/
static int
task_function_call(struct task_struct *p, remote_function_f func, void *info)
@@ -111,11 +111,17 @@ task_function_call(struct task_struct *p, remote_function_f func, void *info)
};
int ret;
- do {
- ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1);
+ for (;;) {
+ ret = smp_call_function_single(task_cpu(p), remote_function,
+ &data, 1);
if (!ret)
ret = data.ret;
- } while (ret == -EAGAIN);
+
+ if (ret != -EAGAIN)
+ break;
+
+ cond_resched();
+ }
return ret;
}
@@ -5469,11 +5475,11 @@ static void perf_pmu_output_stop(struct perf_event *event);
static void perf_mmap_close(struct vm_area_struct *vma)
{
struct perf_event *event = vma->vm_file->private_data;
-
struct ring_buffer *rb = ring_buffer_get(event);
struct user_struct *mmap_user = rb->mmap_user;
int mmap_locked = rb->mmap_locked;
unsigned long size = perf_data_size(rb);
+ bool detach_rest = false;
if (event->pmu->event_unmapped)
event->pmu->event_unmapped(event, vma->vm_mm);
@@ -5504,7 +5510,8 @@ static void perf_mmap_close(struct vm_area_struct *vma)
mutex_unlock(&event->mmap_mutex);
}
- atomic_dec(&rb->mmap_count);
+ if (atomic_dec_and_test(&rb->mmap_count))
+ detach_rest = true;
if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
goto out_put;
@@ -5513,7 +5520,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
mutex_unlock(&event->mmap_mutex);
/* If there's still other mmap()s of this buffer, we're done. */
- if (atomic_read(&rb->mmap_count))
+ if (!detach_rest)
goto out_put;
/*
@@ -6411,9 +6418,12 @@ static u64 perf_virt_to_phys(u64 virt)
* Try IRQ-safe __get_user_pages_fast first.
* If failed, leave phys_addr as 0.
*/
- if ((current->mm != NULL) &&
- (__get_user_pages_fast(virt, 1, 0, &p) == 1))
- phys_addr = page_to_phys(p) + virt % PAGE_SIZE;
+ if (current->mm != NULL) {
+ pagefault_disable();
+ if (__get_user_pages_fast(virt, 1, 0, &p) == 1)
+ phys_addr = page_to_phys(p) + virt % PAGE_SIZE;
+ pagefault_enable();
+ }
if (p)
put_page(p);
@@ -6920,10 +6930,17 @@ static void perf_event_task_output(struct perf_event *event,
goto out;
task_event->event_id.pid = perf_event_pid(event, task);
- task_event->event_id.ppid = perf_event_pid(event, current);
-
task_event->event_id.tid = perf_event_tid(event, task);
- task_event->event_id.ptid = perf_event_tid(event, current);
+
+ if (task_event->event_id.header.type == PERF_RECORD_EXIT) {
+ task_event->event_id.ppid = perf_event_pid(event,
+ task->real_parent);
+ task_event->event_id.ptid = perf_event_pid(event,
+ task->real_parent);
+ } else { /* PERF_RECORD_FORK */
+ task_event->event_id.ppid = perf_event_pid(event, current);
+ task_event->event_id.ptid = perf_event_tid(event, current);
+ }
task_event->event_id.time = perf_event_clock(event);
@@ -9031,6 +9048,7 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) {
int fpos = token == IF_SRC_FILE ? 2 : 1;
+ kfree(filename);
filename = match_strdup(&args[fpos]);
if (!filename) {
ret = -ENOMEM;
@@ -9077,16 +9095,13 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
*/
ret = -EOPNOTSUPP;
if (!event->ctx->task)
- goto fail_free_name;
+ goto fail;
/* look up the path and grab its inode */
ret = kern_path(filename, LOOKUP_FOLLOW,
&filter->path);
if (ret)
- goto fail_free_name;
-
- kfree(filename);
- filename = NULL;
+ goto fail;
ret = -EINVAL;
if (!filter->path.dentry ||
@@ -9106,13 +9121,13 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
if (state != IF_STATE_ACTION)
goto fail;
+ kfree(filename);
kfree(orig);
return 0;
-fail_free_name:
- kfree(filename);
fail:
+ kfree(filename);
free_filters_list(filters);
kfree(orig);
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 6dc725a7e7bc..8fc0ddc38cb6 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -209,7 +209,7 @@ static inline int get_recursion_context(int *recursion)
rctx = 3;
else if (in_irq())
rctx = 2;
- else if (in_softirq())
+ else if (in_serving_softirq())
rctx = 1;
else
rctx = 0;
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index c173e4131df8..24342bca11f2 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -612,10 +612,6 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
if (ret)
goto out;
- /* uprobe_write_opcode() assumes we don't cross page boundary */
- BUG_ON((uprobe->offset & ~PAGE_MASK) +
- UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
-
smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */
set_bit(UPROBE_COPY_INSN, &uprobe->flags);
@@ -911,6 +907,13 @@ static int __uprobe_register(struct inode *inode, loff_t offset,
if (offset > i_size_read(inode))
return -EINVAL;
+ /*
+ * This ensures that copy_from_page() and copy_to_page()
+ * can't cross page boundary.
+ */
+ if (!IS_ALIGNED(offset, UPROBE_SWBP_INSN_SIZE))
+ return -EINVAL;
+
retry:
uprobe = alloc_uprobe(inode, offset);
if (!uprobe)
@@ -1708,6 +1711,9 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
uprobe_opcode_t opcode;
int result;
+ if (WARN_ON_ONCE(!IS_ALIGNED(vaddr, UPROBE_SWBP_INSN_SIZE)))
+ return -EINVAL;
+
pagefault_disable();
result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
pagefault_enable();
@@ -1891,7 +1897,7 @@ static void handle_swbp(struct pt_regs *regs)
if (!uprobe) {
if (is_swbp > 0) {
/* No matching uprobe; signal SIGTRAP. */
- send_sig(SIGTRAP, current, 0);
+ force_sig(SIGTRAP, current);
} else {
/*
* Either we raced with uprobe_unregister() or we can't
diff --git a/kernel/exit.c b/kernel/exit.c
index 54c3269b8dda..908e7a33e1fc 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -498,7 +498,7 @@ static void exit_mm(void)
struct mm_struct *mm = current->mm;
struct core_state *core_state;
- mm_release(current, mm);
+ exit_mm_release(current, mm);
if (!mm)
return;
sync_mm_rss(mm);
@@ -517,7 +517,10 @@ static void exit_mm(void)
up_read(&mm->mmap_sem);
self.task = current;
- self.next = xchg(&core_state->dumper.next, &self);
+ if (self.task->flags & PF_SIGNALED)
+ self.next = xchg(&core_state->dumper.next, &self);
+ else
+ self.task = NULL;
/*
* Implies mb(), the result of xchg() must be visible
* to core_state->dumper.
@@ -772,8 +775,12 @@ void __noreturn do_exit(long code)
struct task_struct *tsk = current;
int group_dead;
- profile_task_exit(tsk);
- kcov_task_exit(tsk);
+ /*
+ * We can get here from a kernel oops, sometimes with preemption off.
+ * Start by checking for critical errors.
+ * Then fix up important state like USER_DS and preemption.
+ * Then do everything else.
+ */
WARN_ON(blk_needs_flush_plug(tsk));
@@ -791,6 +798,16 @@ void __noreturn do_exit(long code)
*/
set_fs(USER_DS);
+ if (unlikely(in_atomic())) {
+ pr_info("note: %s[%d] exited with preempt_count %d\n",
+ current->comm, task_pid_nr(current),
+ preempt_count());
+ preempt_count_set(PREEMPT_ENABLED);
+ }
+
+ profile_task_exit(tsk);
+ kcov_task_exit(tsk);
+
ptrace_event(PTRACE_EVENT_EXIT, code);
validate_creds_for_do_exit(tsk);
@@ -801,39 +818,12 @@ void __noreturn do_exit(long code)
*/
if (unlikely(tsk->flags & PF_EXITING)) {
pr_alert("Fixing recursive fault but reboot is needed!\n");
- /*
- * We can do this unlocked here. The futex code uses
- * this flag just to verify whether the pi state
- * cleanup has been done or not. In the worst case it
- * loops once more. We pretend that the cleanup was
- * done as there is no way to return. Either the
- * OWNER_DIED bit is set by now or we push the blocked
- * task into the wait for ever nirwana as well.
- */
- tsk->flags |= PF_EXITPIDONE;
+ futex_exit_recursive(tsk);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
}
exit_signals(tsk); /* sets PF_EXITING */
- /*
- * Ensure that all new tsk->pi_lock acquisitions must observe
- * PF_EXITING. Serializes against futex.c:attach_to_pi_owner().
- */
- smp_mb();
- /*
- * Ensure that we must observe the pi_state in exit_mm() ->
- * mm_release() -> exit_pi_state_list().
- */
- raw_spin_lock_irq(&tsk->pi_lock);
- raw_spin_unlock_irq(&tsk->pi_lock);
-
- if (unlikely(in_atomic())) {
- pr_info("note: %s[%d] exited with preempt_count %d\n",
- current->comm, task_pid_nr(current),
- preempt_count());
- preempt_count_set(PREEMPT_ENABLED);
- }
/* sync mm's RSS info before statistics gathering */
if (tsk->mm)
@@ -908,12 +898,6 @@ void __noreturn do_exit(long code)
* Make sure we are holding no locks:
*/
debug_check_no_locks_held();
- /*
- * We can do this unlocked here. The futex code uses this flag
- * just to verify whether the pi state cleanup has been done
- * or not. In the worst case it loops once more.
- */
- tsk->flags |= PF_EXITPIDONE;
if (tsk->io_context)
exit_io_context(tsk);
@@ -1617,10 +1601,9 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
if (!infop)
return err;
- if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop)))
+ if (!user_access_begin(VERIFY_WRITE, infop, sizeof(*infop)))
return -EFAULT;
- user_access_begin();
unsafe_put_user(signo, &infop->si_signo, Efault);
unsafe_put_user(0, &infop->si_errno, Efault);
unsafe_put_user(info.cause, &infop->si_code, Efault);
@@ -1745,10 +1728,9 @@ COMPAT_SYSCALL_DEFINE5(waitid,
if (!infop)
return err;
- if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop)))
+ if (!user_access_begin(VERIFY_WRITE, infop, sizeof(*infop)))
return -EFAULT;
- user_access_begin();
unsafe_put_user(signo, &infop->si_signo, Efault);
unsafe_put_user(0, &infop->si_errno, Efault);
unsafe_put_user(info.cause, &infop->si_code, Efault);
diff --git a/kernel/fail_function.c b/kernel/fail_function.c
index bc80a4e268c0..a52151a2291f 100644
--- a/kernel/fail_function.c
+++ b/kernel/fail_function.c
@@ -261,7 +261,7 @@ static ssize_t fei_write(struct file *file, const char __user *buffer,
if (copy_from_user(buf, buffer, count)) {
ret = -EFAULT;
- goto out;
+ goto out_free;
}
buf[count] = '\0';
sym = strstrip(buf);
@@ -315,8 +315,9 @@ static ssize_t fei_write(struct file *file, const char __user *buffer,
ret = count;
}
out:
- kfree(buf);
mutex_unlock(&fei_lock);
+out_free:
+ kfree(buf);
return ret;
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 1a2d18e98bf9..cf535b9d5db7 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1217,24 +1217,8 @@ static int wait_for_vfork_done(struct task_struct *child,
* restoring the old one. . .
* Eric Biederman 10 January 1998
*/
-void mm_release(struct task_struct *tsk, struct mm_struct *mm)
+static void mm_release(struct task_struct *tsk, struct mm_struct *mm)
{
- /* Get rid of any futexes when releasing the mm */
-#ifdef CONFIG_FUTEX
- if (unlikely(tsk->robust_list)) {
- exit_robust_list(tsk);
- tsk->robust_list = NULL;
- }
-#ifdef CONFIG_COMPAT
- if (unlikely(tsk->compat_robust_list)) {
- compat_exit_robust_list(tsk);
- tsk->compat_robust_list = NULL;
- }
-#endif
- if (unlikely(!list_empty(&tsk->pi_state_list)))
- exit_pi_state_list(tsk);
-#endif
-
uprobe_free_utask(tsk);
/* Get rid of any cached register state */
@@ -1267,6 +1251,18 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
complete_vfork_done(tsk);
}
+void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm)
+{
+ futex_exit_release(tsk);
+ mm_release(tsk, mm);
+}
+
+void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm)
+{
+ futex_exec_release(tsk);
+ mm_release(tsk, mm);
+}
+
/*
* Allocate a new mm structure and copy contents from the
* mm structure of the passed in task structure.
@@ -1647,6 +1643,25 @@ static __always_inline void delayed_free_task(struct task_struct *tsk)
free_task(tsk);
}
+static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk)
+{
+ /* Skip if kernel thread */
+ if (!tsk->mm)
+ return;
+
+ /* Skip if spawning a thread or using vfork */
+ if ((clone_flags & (CLONE_VM | CLONE_THREAD | CLONE_VFORK)) != CLONE_VM)
+ return;
+
+ /* We need to synchronize with __set_oom_adj */
+ mutex_lock(&oom_adj_mutex);
+ set_bit(MMF_MULTIPROCESS, &tsk->mm->flags);
+ /* Update the values in case they were changed after copy_signal */
+ tsk->signal->oom_score_adj = current->signal->oom_score_adj;
+ tsk->signal->oom_score_adj_min = current->signal->oom_score_adj_min;
+ mutex_unlock(&oom_adj_mutex);
+}
+
/*
* This creates a new process as a copy of the old one,
* but does not actually start it yet.
@@ -1918,14 +1933,8 @@ static __latent_entropy struct task_struct *copy_process(
#ifdef CONFIG_BLOCK
p->plug = NULL;
#endif
-#ifdef CONFIG_FUTEX
- p->robust_list = NULL;
-#ifdef CONFIG_COMPAT
- p->compat_robust_list = NULL;
-#endif
- INIT_LIST_HEAD(&p->pi_state_list);
- p->pi_state_cache = NULL;
-#endif
+ futex_init_task(p);
+
/*
* sigaltstack should be cleared when sharing the same VM
*/
@@ -1946,14 +1955,9 @@ static __latent_entropy struct task_struct *copy_process(
/* ok, now we should be set up.. */
p->pid = pid_nr(pid);
if (clone_flags & CLONE_THREAD) {
- p->exit_signal = -1;
p->group_leader = current->group_leader;
p->tgid = current->tgid;
} else {
- if (clone_flags & CLONE_PARENT)
- p->exit_signal = current->group_leader->exit_signal;
- else
- p->exit_signal = (clone_flags & CSIGNAL);
p->group_leader = p;
p->tgid = p->pid;
}
@@ -1998,9 +2002,14 @@ static __latent_entropy struct task_struct *copy_process(
if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
p->real_parent = current->real_parent;
p->parent_exec_id = current->parent_exec_id;
+ if (clone_flags & CLONE_THREAD)
+ p->exit_signal = -1;
+ else
+ p->exit_signal = current->group_leader->exit_signal;
} else {
p->real_parent = current;
p->parent_exec_id = current->self_exec_id;
+ p->exit_signal = (clone_flags & CSIGNAL);
}
klp_copy_process(p);
@@ -2084,6 +2093,8 @@ static __latent_entropy struct task_struct *copy_process(
trace_task_newtask(p, clone_flags);
uprobe_copy_process(p, clone_flags);
+ copy_oom_score_adj(clone_flags, p);
+
return p;
bad_fork_cancel_cgroup:
diff --git a/kernel/futex.c b/kernel/futex.c
index 920d853a8e9e..526ebcff5a0a 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -341,6 +341,12 @@ static inline bool should_fail_futex(bool fshared)
}
#endif /* CONFIG_FAIL_FUTEX */
+#ifdef CONFIG_COMPAT
+static void compat_exit_robust_list(struct task_struct *curr);
+#else
+static inline void compat_exit_robust_list(struct task_struct *curr) { }
+#endif
+
static inline void futex_get_mm(union futex_key *key)
{
mmgrab(key->private.mm);
@@ -833,6 +839,29 @@ static struct futex_pi_state *alloc_pi_state(void)
return pi_state;
}
+static void pi_state_update_owner(struct futex_pi_state *pi_state,
+ struct task_struct *new_owner)
+{
+ struct task_struct *old_owner = pi_state->owner;
+
+ lockdep_assert_held(&pi_state->pi_mutex.wait_lock);
+
+ if (old_owner) {
+ raw_spin_lock(&old_owner->pi_lock);
+ WARN_ON(list_empty(&pi_state->list));
+ list_del_init(&pi_state->list);
+ raw_spin_unlock(&old_owner->pi_lock);
+ }
+
+ if (new_owner) {
+ raw_spin_lock(&new_owner->pi_lock);
+ WARN_ON(!list_empty(&pi_state->list));
+ list_add(&pi_state->list, &new_owner->pi_state_list);
+ pi_state->owner = new_owner;
+ raw_spin_unlock(&new_owner->pi_lock);
+ }
+}
+
static void get_pi_state(struct futex_pi_state *pi_state)
{
WARN_ON_ONCE(!atomic_inc_not_zero(&pi_state->refcount));
@@ -855,17 +884,12 @@ static void put_pi_state(struct futex_pi_state *pi_state)
* and has cleaned up the pi_state already
*/
if (pi_state->owner) {
- struct task_struct *owner;
+ unsigned long flags;
- raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
- owner = pi_state->owner;
- if (owner) {
- raw_spin_lock(&owner->pi_lock);
- list_del_init(&pi_state->list);
- raw_spin_unlock(&owner->pi_lock);
- }
- rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner);
- raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+ raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags);
+ pi_state_update_owner(pi_state, NULL);
+ rt_mutex_proxy_unlock(&pi_state->pi_mutex);
+ raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags);
}
if (current->pi_state_cache) {
@@ -889,7 +913,7 @@ static void put_pi_state(struct futex_pi_state *pi_state)
* Kernel cleans up PI-state, but userspace is likely hosed.
* (Robust-futex cleanup is separate and might save the day for userspace.)
*/
-void exit_pi_state_list(struct task_struct *curr)
+static void exit_pi_state_list(struct task_struct *curr)
{
struct list_head *next, *head = &curr->pi_state_list;
struct futex_pi_state *pi_state;
@@ -959,7 +983,8 @@ void exit_pi_state_list(struct task_struct *curr)
}
raw_spin_unlock_irq(&curr->pi_lock);
}
-
+#else
+static inline void exit_pi_state_list(struct task_struct *curr) { }
#endif
/*
@@ -1009,7 +1034,8 @@ void exit_pi_state_list(struct task_struct *curr)
* FUTEX_OWNER_DIED bit. See [4]
*
* [10] There is no transient state which leaves owner and user space
- * TID out of sync.
+ * TID out of sync. Except one error case where the kernel is denied
+ * write access to the user address, see fixup_pi_state_owner().
*
*
* Serialization and lifetime rules:
@@ -1168,16 +1194,47 @@ out_error:
return ret;
}
+/**
+ * wait_for_owner_exiting - Block until the owner has exited
+ * @exiting: Pointer to the exiting task
+ *
+ * Caller must hold a refcount on @exiting.
+ */
+static void wait_for_owner_exiting(int ret, struct task_struct *exiting)
+{
+ if (ret != -EBUSY) {
+ WARN_ON_ONCE(exiting);
+ return;
+ }
+
+ if (WARN_ON_ONCE(ret == -EBUSY && !exiting))
+ return;
+
+ mutex_lock(&exiting->futex_exit_mutex);
+ /*
+ * No point in doing state checking here. If the waiter got here
+ * while the task was in exec()->exec_futex_release() then it can
+ * have any FUTEX_STATE_* value when the waiter has acquired the
+ * mutex. OK, if running, EXITING or DEAD if it reached exit()
+ * already. Highly unlikely and not a problem. Just one more round
+ * through the futex maze.
+ */
+ mutex_unlock(&exiting->futex_exit_mutex);
+
+ put_task_struct(exiting);
+}
+
static int handle_exit_race(u32 __user *uaddr, u32 uval,
struct task_struct *tsk)
{
u32 uval2;
/*
- * If PF_EXITPIDONE is not yet set, then try again.
+ * If the futex exit state is not yet FUTEX_STATE_DEAD, tell the
+ * caller that the alleged owner is busy.
*/
- if (tsk && !(tsk->flags & PF_EXITPIDONE))
- return -EAGAIN;
+ if (tsk && tsk->futex_state != FUTEX_STATE_DEAD)
+ return -EBUSY;
/*
* Reread the user space value to handle the following situation:
@@ -1195,8 +1252,9 @@ static int handle_exit_race(u32 __user *uaddr, u32 uval,
* *uaddr = 0xC0000000; tsk = get_task(PID);
* } if (!tsk->flags & PF_EXITING) {
* ... attach();
- * tsk->flags |= PF_EXITPIDONE; } else {
- * if (!(tsk->flags & PF_EXITPIDONE))
+ * tsk->futex_state = } else {
+ * FUTEX_STATE_DEAD; if (tsk->futex_state !=
+ * FUTEX_STATE_DEAD)
* return -EAGAIN;
* return -ESRCH; <--- FAIL
* }
@@ -1227,7 +1285,8 @@ static int handle_exit_race(u32 __user *uaddr, u32 uval,
* it after doing proper sanity checks.
*/
static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
- struct futex_pi_state **ps)
+ struct futex_pi_state **ps,
+ struct task_struct **exiting)
{
pid_t pid = uval & FUTEX_TID_MASK;
struct futex_pi_state *pi_state;
@@ -1252,22 +1311,33 @@ static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
}
/*
- * We need to look at the task state flags to figure out,
- * whether the task is exiting. To protect against the do_exit
- * change of the task flags, we do this protected by
- * p->pi_lock:
+ * We need to look at the task state to figure out, whether the
+ * task is exiting. To protect against the change of the task state
+ * in futex_exit_release(), we do this protected by p->pi_lock:
*/
raw_spin_lock_irq(&p->pi_lock);
- if (unlikely(p->flags & PF_EXITING)) {
+ if (unlikely(p->futex_state != FUTEX_STATE_OK)) {
/*
- * The task is on the way out. When PF_EXITPIDONE is
- * set, we know that the task has finished the
- * cleanup:
+ * The task is on the way out. When the futex state is
+ * FUTEX_STATE_DEAD, we know that the task has finished
+ * the cleanup:
*/
int ret = handle_exit_race(uaddr, uval, p);
raw_spin_unlock_irq(&p->pi_lock);
- put_task_struct(p);
+ /*
+ * If the owner task is between FUTEX_STATE_EXITING and
+ * FUTEX_STATE_DEAD then store the task pointer and keep
+ * the reference on the task struct. The calling code will
+ * drop all locks, wait for the task to reach
+ * FUTEX_STATE_DEAD and then drop the refcount. This is
+ * required to prevent a live lock when the current task
+ * preempted the exiting task between the two states.
+ */
+ if (ret == -EBUSY)
+ *exiting = p;
+ else
+ put_task_struct(p);
return ret;
}
@@ -1306,7 +1376,8 @@ static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
static int lookup_pi_state(u32 __user *uaddr, u32 uval,
struct futex_hash_bucket *hb,
- union futex_key *key, struct futex_pi_state **ps)
+ union futex_key *key, struct futex_pi_state **ps,
+ struct task_struct **exiting)
{
struct futex_q *top_waiter = futex_top_waiter(hb, key);
@@ -1321,7 +1392,7 @@ static int lookup_pi_state(u32 __user *uaddr, u32 uval,
* We are the first waiter - try to look up the owner based on
* @uval and attach to it.
*/
- return attach_to_pi_owner(uaddr, uval, key, ps);
+ return attach_to_pi_owner(uaddr, uval, key, ps, exiting);
}
static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
@@ -1349,6 +1420,8 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
* lookup
* @task: the task to perform the atomic lock work for. This will
* be "current" except in the case of requeue pi.
+ * @exiting: Pointer to store the task pointer of the owner task
+ * which is in the middle of exiting
* @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
*
* Return:
@@ -1357,11 +1430,17 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
* - <0 - error
*
* The hb->lock and futex_key refs shall be held by the caller.
+ *
+ * @exiting is only set when the return value is -EBUSY. If so, this holds
+ * a refcount on the exiting task on return and the caller needs to drop it
+ * after waiting for the exit to complete.
*/
static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
union futex_key *key,
struct futex_pi_state **ps,
- struct task_struct *task, int set_waiters)
+ struct task_struct *task,
+ struct task_struct **exiting,
+ int set_waiters)
{
u32 uval, newval, vpid = task_pid_vnr(task);
struct futex_q *top_waiter;
@@ -1431,7 +1510,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
* attach to the owner. If that fails, no harm done, we only
* set the FUTEX_WAITERS bit in the user space variable.
*/
- return attach_to_pi_owner(uaddr, newval, key, ps);
+ return attach_to_pi_owner(uaddr, newval, key, ps, exiting);
}
/**
@@ -1517,8 +1596,10 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
*/
newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
- if (unlikely(should_fail_futex(true)))
+ if (unlikely(should_fail_futex(true))) {
ret = -EFAULT;
+ goto out_unlock;
+ }
ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
if (!ret && (curval != uval)) {
@@ -1534,26 +1615,15 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
ret = -EINVAL;
}
- if (ret)
- goto out_unlock;
-
- /*
- * This is a point of no return; once we modify the uval there is no
- * going back and subsequent operations must not fail.
- */
-
- raw_spin_lock(&pi_state->owner->pi_lock);
- WARN_ON(list_empty(&pi_state->list));
- list_del_init(&pi_state->list);
- raw_spin_unlock(&pi_state->owner->pi_lock);
-
- raw_spin_lock(&new_owner->pi_lock);
- WARN_ON(!list_empty(&pi_state->list));
- list_add(&pi_state->list, &new_owner->pi_state_list);
- pi_state->owner = new_owner;
- raw_spin_unlock(&new_owner->pi_lock);
-
- postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
+ if (!ret) {
+ /*
+ * This is a point of no return; once we modified the uval
+ * there is no going back and subsequent operations must
+ * not fail.
+ */
+ pi_state_update_owner(pi_state, new_owner);
+ postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
+ }
out_unlock:
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
@@ -1850,6 +1920,8 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
* @key1: the from futex key
* @key2: the to futex key
* @ps: address to store the pi_state pointer
+ * @exiting: Pointer to store the task pointer of the owner task
+ * which is in the middle of exiting
* @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
*
* Try and get the lock on behalf of the top waiter if we can do it atomically.
@@ -1857,16 +1929,20 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
* then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
* hb1 and hb2 must be held by the caller.
*
+ * @exiting is only set when the return value is -EBUSY. If so, this holds
+ * a refcount on the exiting task on return and the caller needs to drop it
+ * after waiting for the exit to complete.
+ *
* Return:
* - 0 - failed to acquire the lock atomically;
* - >0 - acquired the lock, return value is vpid of the top_waiter
* - <0 - error
*/
-static int futex_proxy_trylock_atomic(u32 __user *pifutex,
- struct futex_hash_bucket *hb1,
- struct futex_hash_bucket *hb2,
- union futex_key *key1, union futex_key *key2,
- struct futex_pi_state **ps, int set_waiters)
+static int
+futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1,
+ struct futex_hash_bucket *hb2, union futex_key *key1,
+ union futex_key *key2, struct futex_pi_state **ps,
+ struct task_struct **exiting, int set_waiters)
{
struct futex_q *top_waiter = NULL;
u32 curval;
@@ -1903,7 +1979,7 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
*/
vpid = task_pid_vnr(top_waiter->task);
ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
- set_waiters);
+ exiting, set_waiters);
if (ret == 1) {
requeue_pi_wake_futex(top_waiter, key2, hb2);
return vpid;
@@ -2032,6 +2108,8 @@ retry_private:
}
if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
+ struct task_struct *exiting = NULL;
+
/*
* Attempt to acquire uaddr2 and wake the top waiter. If we
* intend to requeue waiters, force setting the FUTEX_WAITERS
@@ -2039,7 +2117,8 @@ retry_private:
* faults rather in the requeue loop below.
*/
ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
- &key2, &pi_state, nr_requeue);
+ &key2, &pi_state,
+ &exiting, nr_requeue);
/*
* At this point the top_waiter has either taken uaddr2 or is
@@ -2066,7 +2145,8 @@ retry_private:
* If that call succeeds then we have pi_state and an
* initial refcount on it.
*/
- ret = lookup_pi_state(uaddr2, ret, hb2, &key2, &pi_state);
+ ret = lookup_pi_state(uaddr2, ret, hb2, &key2,
+ &pi_state, &exiting);
}
switch (ret) {
@@ -2084,17 +2164,24 @@ retry_private:
if (!ret)
goto retry;
goto out;
+ case -EBUSY:
case -EAGAIN:
/*
* Two reasons for this:
- * - Owner is exiting and we just wait for the
+ * - EBUSY: Owner is exiting and we just wait for the
* exit to complete.
- * - The user space value changed.
+ * - EAGAIN: The user space value changed.
*/
double_unlock_hb(hb1, hb2);
hb_waiters_dec(hb2);
put_futex_key(&key2);
put_futex_key(&key1);
+ /*
+ * Handle the case where the owner is in the middle of
+ * exiting. Wait for the exit to complete otherwise
+ * this task might loop forever, aka. live lock.
+ */
+ wait_for_owner_exiting(ret, exiting);
cond_resched();
goto retry;
default:
@@ -2359,18 +2446,13 @@ static void unqueue_me_pi(struct futex_q *q)
spin_unlock(q->lock_ptr);
}
-static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
- struct task_struct *argowner)
+static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
+ struct task_struct *argowner)
{
+ u32 uval, uninitialized_var(curval), newval, newtid;
struct futex_pi_state *pi_state = q->pi_state;
- u32 uval, uninitialized_var(curval), newval;
struct task_struct *oldowner, *newowner;
- u32 newtid;
- int ret, err = 0;
-
- lockdep_assert_held(q->lock_ptr);
-
- raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
+ int err = 0;
oldowner = pi_state->owner;
@@ -2404,21 +2486,31 @@ retry:
* We raced against a concurrent self; things are
* already fixed up. Nothing to do.
*/
- ret = 0;
- goto out_unlock;
+ return 0;
}
if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
- /* We got the lock after all, nothing to fix. */
- ret = 0;
- goto out_unlock;
+ /* We got the lock. pi_state is correct. Tell caller. */
+ return 1;
}
/*
- * Since we just failed the trylock; there must be an owner.
+ * The trylock just failed, so either there is an owner or
+ * there is a higher priority waiter than this one.
*/
newowner = rt_mutex_owner(&pi_state->pi_mutex);
- BUG_ON(!newowner);
+ /*
+ * If the higher priority waiter has not yet taken over the
+ * rtmutex then newowner is NULL. We can't return here with
+ * that state because it's inconsistent vs. the user space
+ * state. So drop the locks and try again. It's a valid
+ * situation and not any different from the other retry
+ * conditions.
+ */
+ if (unlikely(!newowner)) {
+ err = -EAGAIN;
+ goto handle_err;
+ }
} else {
WARN_ON_ONCE(argowner != current);
if (oldowner == current) {
@@ -2426,8 +2518,7 @@ retry:
* We raced against a concurrent self; things are
* already fixed up. Nothing to do.
*/
- ret = 0;
- goto out_unlock;
+ return 1;
}
newowner = argowner;
}
@@ -2457,22 +2548,9 @@ retry:
* We fixed up user space. Now we need to fix the pi_state
* itself.
*/
- if (pi_state->owner != NULL) {
- raw_spin_lock(&pi_state->owner->pi_lock);
- WARN_ON(list_empty(&pi_state->list));
- list_del_init(&pi_state->list);
- raw_spin_unlock(&pi_state->owner->pi_lock);
- }
-
- pi_state->owner = newowner;
+ pi_state_update_owner(pi_state, newowner);
- raw_spin_lock(&newowner->pi_lock);
- WARN_ON(!list_empty(&pi_state->list));
- list_add(&pi_state->list, &newowner->pi_state_list);
- raw_spin_unlock(&newowner->pi_lock);
- raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
-
- return 0;
+ return argowner == current;
/*
* In order to reschedule or handle a page fault, we need to drop the
@@ -2493,17 +2571,16 @@ handle_err:
switch (err) {
case -EFAULT:
- ret = fault_in_user_writeable(uaddr);
+ err = fault_in_user_writeable(uaddr);
break;
case -EAGAIN:
cond_resched();
- ret = 0;
+ err = 0;
break;
default:
WARN_ON_ONCE(1);
- ret = err;
break;
}
@@ -2513,17 +2590,44 @@ handle_err:
/*
* Check if someone else fixed it for us:
*/
- if (pi_state->owner != oldowner) {
- ret = 0;
- goto out_unlock;
- }
+ if (pi_state->owner != oldowner)
+ return argowner == current;
- if (ret)
- goto out_unlock;
+ /* Retry if err was -EAGAIN or the fault in succeeded */
+ if (!err)
+ goto retry;
- goto retry;
+ /*
+ * fault_in_user_writeable() failed so user state is immutable. At
+ * best we can make the kernel state consistent but user state will
+ * be most likely hosed and any subsequent unlock operation will be
+ * rejected due to PI futex rule [10].
+ *
+ * Ensure that the rtmutex owner is also the pi_state owner despite
+ * the user space value claiming something different. There is no
+ * point in unlocking the rtmutex if current is the owner as it
+ * would need to wait until the next waiter has taken the rtmutex
+ * to guarantee consistent state. Keep it simple. Userspace asked
+ * for this wreckaged state.
+ *
+ * The rtmutex has an owner - either current or some other
+ * task. See the EAGAIN loop above.
+ */
+ pi_state_update_owner(pi_state, rt_mutex_owner(&pi_state->pi_mutex));
-out_unlock:
+ return err;
+}
+
+static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
+ struct task_struct *argowner)
+{
+ struct futex_pi_state *pi_state = q->pi_state;
+ int ret;
+
+ lockdep_assert_held(q->lock_ptr);
+
+ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
+ ret = __fixup_pi_state_owner(uaddr, q, argowner);
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
return ret;
}
@@ -2547,8 +2651,6 @@ static long futex_wait_restart(struct restart_block *restart);
*/
static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
{
- int ret = 0;
-
if (locked) {
/*
* Got the lock. We might not be the anticipated owner if we
@@ -2559,8 +2661,8 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
* stable state, anything else needs more attention.
*/
if (q->pi_state->owner != current)
- ret = fixup_pi_state_owner(uaddr, q, current);
- goto out;
+ return fixup_pi_state_owner(uaddr, q, current);
+ return 1;
}
/*
@@ -2571,24 +2673,17 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
* Another speculative read; pi_state->owner == current is unstable
* but needs our attention.
*/
- if (q->pi_state->owner == current) {
- ret = fixup_pi_state_owner(uaddr, q, NULL);
- goto out;
- }
+ if (q->pi_state->owner == current)
+ return fixup_pi_state_owner(uaddr, q, NULL);
/*
* Paranoia check. If we did not take the lock, then we should not be
- * the owner of the rt_mutex.
+ * the owner of the rt_mutex. Warn and establish consistent state.
*/
- if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) {
- printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
- "pi-state %p\n", ret,
- q->pi_state->pi_mutex.owner,
- q->pi_state->owner);
- }
+ if (WARN_ON_ONCE(rt_mutex_owner(&q->pi_state->pi_mutex) == current))
+ return fixup_pi_state_owner(uaddr, q, current);
-out:
- return ret ? ret : locked;
+ return 0;
}
/**
@@ -2762,14 +2857,13 @@ retry:
goto out;
restart = &current->restart_block;
- restart->fn = futex_wait_restart;
restart->futex.uaddr = uaddr;
restart->futex.val = val;
restart->futex.time = *abs_time;
restart->futex.bitset = bitset;
restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
- ret = -ERESTART_RESTARTBLOCK;
+ ret = set_restart_fn(restart, futex_wait_restart);
out:
if (to) {
@@ -2809,7 +2903,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
ktime_t *time, int trylock)
{
struct hrtimer_sleeper timeout, *to = NULL;
- struct futex_pi_state *pi_state = NULL;
+ struct task_struct *exiting = NULL;
struct rt_mutex_waiter rt_waiter;
struct futex_hash_bucket *hb;
struct futex_q q = futex_q_init;
@@ -2837,7 +2931,8 @@ retry:
retry_private:
hb = queue_lock(&q);
- ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
+ ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current,
+ &exiting, 0);
if (unlikely(ret)) {
/*
* Atomic work succeeded and we got the lock,
@@ -2850,15 +2945,22 @@ retry_private:
goto out_unlock_put_key;
case -EFAULT:
goto uaddr_faulted;
+ case -EBUSY:
case -EAGAIN:
/*
* Two reasons for this:
- * - Task is exiting and we just wait for the
+ * - EBUSY: Task is exiting and we just wait for the
* exit to complete.
- * - The user space value changed.
+ * - EAGAIN: The user space value changed.
*/
queue_unlock(hb);
put_futex_key(&q.key);
+ /*
+ * Handle the case where the owner is in the middle of
+ * exiting. Wait for the exit to complete otherwise
+ * this task might loop forever, aka. live lock.
+ */
+ wait_for_owner_exiting(ret, exiting);
cond_resched();
goto retry;
default:
@@ -2943,23 +3045,9 @@ no_block:
if (res)
ret = (res < 0) ? res : 0;
- /*
- * If fixup_owner() faulted and was unable to handle the fault, unlock
- * it and return the fault to userspace.
- */
- if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) {
- pi_state = q.pi_state;
- get_pi_state(pi_state);
- }
-
/* Unqueue and drop the lock */
unqueue_me_pi(&q);
- if (pi_state) {
- rt_mutex_futex_unlock(&pi_state->pi_mutex);
- put_pi_state(pi_state);
- }
-
goto out_put_key;
out_unlock_put_key:
@@ -3225,7 +3313,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
u32 __user *uaddr2)
{
struct hrtimer_sleeper timeout, *to = NULL;
- struct futex_pi_state *pi_state = NULL;
struct rt_mutex_waiter rt_waiter;
struct futex_hash_bucket *hb;
union futex_key key2 = FUTEX_KEY_INIT;
@@ -3310,16 +3397,17 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
if (q.pi_state && (q.pi_state->owner != current)) {
spin_lock(q.lock_ptr);
ret = fixup_pi_state_owner(uaddr2, &q, current);
- if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
- pi_state = q.pi_state;
- get_pi_state(pi_state);
- }
/*
* Drop the reference to the pi state which
* the requeue_pi() code acquired for us.
*/
put_pi_state(q.pi_state);
spin_unlock(q.lock_ptr);
+ /*
+ * Adjust the return value. It's either -EFAULT or
+ * success (1) but the caller expects 0 for success.
+ */
+ ret = ret < 0 ? ret : 0;
}
} else {
struct rt_mutex *pi_mutex;
@@ -3350,25 +3438,10 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
if (res)
ret = (res < 0) ? res : 0;
- /*
- * If fixup_pi_state_owner() faulted and was unable to handle
- * the fault, unlock the rt_mutex and return the fault to
- * userspace.
- */
- if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
- pi_state = q.pi_state;
- get_pi_state(pi_state);
- }
-
/* Unqueue and drop the lock. */
unqueue_me_pi(&q);
}
- if (pi_state) {
- rt_mutex_futex_unlock(&pi_state->pi_mutex);
- put_pi_state(pi_state);
- }
-
if (ret == -EINTR) {
/*
* We've already been requeued, but cannot restart by calling
@@ -3610,7 +3683,7 @@ static inline int fetch_robust_entry(struct robust_list __user **entry,
*
* We silently return on any sign of list-walking problem.
*/
-void exit_robust_list(struct task_struct *curr)
+static void exit_robust_list(struct task_struct *curr)
{
struct robust_list_head __user *head = curr->robust_list;
struct robust_list __user *entry, *next_entry, *pending;
@@ -3675,6 +3748,114 @@ void exit_robust_list(struct task_struct *curr)
}
}
+static void futex_cleanup(struct task_struct *tsk)
+{
+ if (unlikely(tsk->robust_list)) {
+ exit_robust_list(tsk);
+ tsk->robust_list = NULL;
+ }
+
+#ifdef CONFIG_COMPAT
+ if (unlikely(tsk->compat_robust_list)) {
+ compat_exit_robust_list(tsk);
+ tsk->compat_robust_list = NULL;
+ }
+#endif
+
+ if (unlikely(!list_empty(&tsk->pi_state_list)))
+ exit_pi_state_list(tsk);
+}
+
+/**
+ * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD
+ * @tsk: task to set the state on
+ *
+ * Set the futex exit state of the task lockless. The futex waiter code
+ * observes that state when a task is exiting and loops until the task has
+ * actually finished the futex cleanup. The worst case for this is that the
+ * waiter runs through the wait loop until the state becomes visible.
+ *
+ * This is called from the recursive fault handling path in do_exit().
+ *
+ * This is best effort. Either the futex exit code has run already or
+ * not. If the OWNER_DIED bit has been set on the futex then the waiter can
+ * take it over. If not, the problem is pushed back to user space. If the
+ * futex exit code did not run yet, then an already queued waiter might
+ * block forever, but there is nothing which can be done about that.
+ */
+void futex_exit_recursive(struct task_struct *tsk)
+{
+ /* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */
+ if (tsk->futex_state == FUTEX_STATE_EXITING)
+ mutex_unlock(&tsk->futex_exit_mutex);
+ tsk->futex_state = FUTEX_STATE_DEAD;
+}
+
+static void futex_cleanup_begin(struct task_struct *tsk)
+{
+ /*
+ * Prevent various race issues against a concurrent incoming waiter
+ * including live locks by forcing the waiter to block on
+ * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in
+ * attach_to_pi_owner().
+ */
+ mutex_lock(&tsk->futex_exit_mutex);
+
+ /*
+ * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock.
+ *
+ * This ensures that all subsequent checks of tsk->futex_state in
+ * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with
+ * tsk->pi_lock held.
+ *
+ * It guarantees also that a pi_state which was queued right before
+ * the state change under tsk->pi_lock by a concurrent waiter must
+ * be observed in exit_pi_state_list().
+ */
+ raw_spin_lock_irq(&tsk->pi_lock);
+ tsk->futex_state = FUTEX_STATE_EXITING;
+ raw_spin_unlock_irq(&tsk->pi_lock);
+}
+
+static void futex_cleanup_end(struct task_struct *tsk, int state)
+{
+ /*
+ * Lockless store. The only side effect is that an observer might
+ * take another loop until it becomes visible.
+ */
+ tsk->futex_state = state;
+ /*
+ * Drop the exit protection. This unblocks waiters which observed
+ * FUTEX_STATE_EXITING to reevaluate the state.
+ */
+ mutex_unlock(&tsk->futex_exit_mutex);
+}
+
+void futex_exec_release(struct task_struct *tsk)
+{
+ /*
+ * The state handling is done for consistency, but in the case of
+ * exec() there is no way to prevent futher damage as the PID stays
+ * the same. But for the unlikely and arguably buggy case that a
+ * futex is held on exec(), this provides at least as much state
+ * consistency protection which is possible.
+ */
+ futex_cleanup_begin(tsk);
+ futex_cleanup(tsk);
+ /*
+ * Reset the state to FUTEX_STATE_OK. The task is alive and about
+ * exec a new binary.
+ */
+ futex_cleanup_end(tsk, FUTEX_STATE_OK);
+}
+
+void futex_exit_release(struct task_struct *tsk)
+{
+ futex_cleanup_begin(tsk);
+ futex_cleanup(tsk);
+ futex_cleanup_end(tsk, FUTEX_STATE_DEAD);
+}
+
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
u32 __user *uaddr2, u32 val2, u32 val3)
{
@@ -3686,8 +3867,7 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
if (op & FUTEX_CLOCK_REALTIME) {
flags |= FLAGS_CLOCKRT;
- if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \
- cmd != FUTEX_WAIT_REQUEUE_PI)
+ if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
return -ENOSYS;
}
@@ -3802,7 +3982,7 @@ static void __user *futex_uaddr(struct robust_list __user *entry,
*
* We silently return on any sign of list-walking problem.
*/
-void compat_exit_robust_list(struct task_struct *curr)
+static void compat_exit_robust_list(struct task_struct *curr)
{
struct compat_robust_list_head __user *head = curr->compat_robust_list;
struct robust_list __user *entry, *next_entry, *pending;
diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c
index 6e40ff6be083..291e0797125b 100644
--- a/kernel/gcov/fs.c
+++ b/kernel/gcov/fs.c
@@ -109,9 +109,9 @@ static void *gcov_seq_next(struct seq_file *seq, void *data, loff_t *pos)
{
struct gcov_iterator *iter = data;
+ (*pos)++;
if (gcov_iter_next(iter))
return NULL;
- (*pos)++;
return iter;
}
diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c
index ca5e5c0ef853..5b9e76117ded 100644
--- a/kernel/gcov/gcc_4_7.c
+++ b/kernel/gcov/gcc_4_7.c
@@ -19,7 +19,9 @@
#include <linux/vmalloc.h>
#include "gcov.h"
-#if (__GNUC__ >= 7)
+#if (__GNUC__ >= 10)
+#define GCOV_COUNTERS 8
+#elif (__GNUC__ >= 7)
#define GCOV_COUNTERS 9
#elif (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1)
#define GCOV_COUNTERS 10
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 5f3e2baefca9..d532bf0c5a67 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -80,6 +80,7 @@ config IRQ_FASTEOI_HIERARCHY_HANDLERS
# Generic IRQ IPI support
config GENERIC_IRQ_IPI
bool
+ select IRQ_DOMAIN_HIERARCHY
# Generic MSI interrupt support
config GENERIC_MSI_IRQ
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index e0eda2bd3975..1e42fc2ad4d5 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -1247,14 +1247,26 @@ static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain,
unsigned int irq_base,
unsigned int nr_irqs)
{
- if (domain->ops->free)
- domain->ops->free(domain, irq_base, nr_irqs);
+ unsigned int i;
+
+ if (!domain->ops->free)
+ return;
+
+ for (i = 0; i < nr_irqs; i++) {
+ if (irq_domain_get_irq_data(domain, irq_base + i))
+ domain->ops->free(domain, irq_base + i, 1);
+ }
}
int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
unsigned int irq_base,
unsigned int nr_irqs, void *arg)
{
+ if (!domain->ops->alloc) {
+ pr_debug("domain->ops->alloc() is NULL\n");
+ return -ENOSYS;
+ }
+
return domain->ops->alloc(domain, irq_base, nr_irqs, arg);
}
@@ -1292,11 +1304,6 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
return -EINVAL;
}
- if (!domain->ops->alloc) {
- pr_debug("domain->ops->alloc() is NULL\n");
- return -ENOSYS;
- }
-
if (realloc && irq_base >= 0) {
virq = irq_base;
} else {
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index efcb54ee0922..c377dbb869f8 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -194,9 +194,9 @@ void irq_set_thread_affinity(struct irq_desc *desc)
set_bit(IRQTF_AFFINITY, &action->thread_flags);
}
+#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
static void irq_validate_effective_affinity(struct irq_data *data)
{
-#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
struct irq_chip *chip = irq_data_get_irq_chip(data);
@@ -204,9 +204,19 @@ static void irq_validate_effective_affinity(struct irq_data *data)
return;
pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
chip->name, data->irq);
-#endif
}
+static inline void irq_init_effective_affinity(struct irq_data *data,
+ const struct cpumask *mask)
+{
+ cpumask_copy(irq_data_get_effective_affinity_mask(data), mask);
+}
+#else
+static inline void irq_validate_effective_affinity(struct irq_data *data) { }
+static inline void irq_init_effective_affinity(struct irq_data *data,
+ const struct cpumask *mask) { }
+#endif
+
int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{
@@ -264,6 +274,30 @@ static int irq_try_set_affinity(struct irq_data *data,
return ret;
}
+static bool irq_set_affinity_deactivated(struct irq_data *data,
+ const struct cpumask *mask, bool force)
+{
+ struct irq_desc *desc = irq_data_to_desc(data);
+
+ /*
+ * Handle irq chips which can handle affinity only in activated
+ * state correctly
+ *
+ * If the interrupt is not yet activated, just store the affinity
+ * mask and do not call the chip driver at all. On activation the
+ * driver has to make sure anyway that the interrupt is in a
+ * useable state so startup works.
+ */
+ if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
+ irqd_is_activated(data) || !irqd_affinity_on_activate(data))
+ return false;
+
+ cpumask_copy(desc->irq_common_data.affinity, mask);
+ irq_init_effective_affinity(data, mask);
+ irqd_set(data, IRQD_AFFINITY_SET);
+ return true;
+}
+
int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
bool force)
{
@@ -274,6 +308,9 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
if (!chip || !chip->irq_set_affinity)
return -EINVAL;
+ if (irq_set_affinity_deactivated(data, mask, force))
+ return 0;
+
if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
ret = irq_try_set_affinity(data, mask, force);
} else {
@@ -950,11 +987,15 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
irqreturn_t ret;
local_bh_disable();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT_BASE))
+ local_irq_disable();
ret = action->thread_fn(action->irq, action->dev_id);
if (ret == IRQ_HANDLED)
atomic_inc(&desc->threads_handled);
irq_finalize_oneshot(desc, action);
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT_BASE))
+ local_irq_enable();
local_bh_enable();
return ret;
}
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
index 30cc217b8631..8e586858bcf4 100644
--- a/kernel/irq/matrix.c
+++ b/kernel/irq/matrix.c
@@ -380,6 +380,13 @@ int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
unsigned int cpu, bit;
struct cpumap *cm;
+ /*
+ * Not required in theory, but matrix_find_best_cpu() uses
+ * for_each_cpu() which ignores the cpumask on UP .
+ */
+ if (cpumask_empty(msk))
+ return -EINVAL;
+
cpu = matrix_find_best_cpu(m, msk);
if (cpu == UINT_MAX)
return -ENOSPC;
@@ -416,7 +423,9 @@ void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
return;
- clear_bit(bit, cm->alloc_map);
+ if (WARN_ON_ONCE(!test_and_clear_bit(bit, cm->alloc_map)))
+ return;
+
cm->allocated--;
if(managed)
cm->managed_allocated--;
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index dc1186ce3ecd..604974f2afb1 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -437,22 +437,22 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
can_reserve = msi_check_reservation_mode(domain, info, dev);
- for_each_msi_entry(desc, dev) {
- virq = desc->irq;
- if (desc->nvec_used == 1)
- dev_dbg(dev, "irq %d for MSI\n", virq);
- else
+ /*
+ * This flag is set by the PCI layer as we need to activate
+ * the MSI entries before the PCI layer enables MSI in the
+ * card. Otherwise the card latches a random msi message.
+ */
+ if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY))
+ goto skip_activate;
+
+ for_each_msi_vector(desc, i, dev) {
+ if (desc->irq == i) {
+ virq = desc->irq;
dev_dbg(dev, "irq [%d-%d] for MSI\n",
virq, virq + desc->nvec_used - 1);
- /*
- * This flag is set by the PCI layer as we need to activate
- * the MSI entries before the PCI layer enables MSI in the
- * card. Otherwise the card latches a random msi message.
- */
- if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY))
- continue;
+ }
- irq_data = irq_domain_get_irq_data(domain, desc->irq);
+ irq_data = irq_domain_get_irq_data(domain, i);
if (!can_reserve) {
irqd_clr_can_reserve(irq_data);
if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK)
@@ -463,28 +463,24 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
goto cleanup;
}
+skip_activate:
/*
* If these interrupts use reservation mode, clear the activated bit
* so request_irq() will assign the final vector.
*/
if (can_reserve) {
- for_each_msi_entry(desc, dev) {
- irq_data = irq_domain_get_irq_data(domain, desc->irq);
+ for_each_msi_vector(desc, i, dev) {
+ irq_data = irq_domain_get_irq_data(domain, i);
irqd_clr_activated(irq_data);
}
}
return 0;
cleanup:
- for_each_msi_entry(desc, dev) {
- struct irq_data *irqd;
-
- if (desc->irq == virq)
- break;
-
- irqd = irq_domain_get_irq_data(domain, desc->irq);
- if (irqd_is_activated(irqd))
- irq_domain_deactivate_irq(irqd);
+ for_each_msi_vector(desc, i, dev) {
+ irq_data = irq_domain_get_irq_data(domain, i);
+ if (irqd_is_activated(irq_data))
+ irq_domain_deactivate_irq(irq_data);
}
msi_domain_free_irqs(domain, dev);
return ret;
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 7c8262635b29..01c673721894 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -83,6 +83,7 @@ void static_key_slow_inc_cpuslocked(struct static_key *key)
int v, v1;
STATIC_KEY_CHECK_USE(key);
+ lockdep_assert_cpus_held();
/*
* Careful if we get concurrent static_key_slow_inc() calls;
@@ -128,6 +129,7 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc);
void static_key_enable_cpuslocked(struct static_key *key)
{
STATIC_KEY_CHECK_USE(key);
+ lockdep_assert_cpus_held();
if (atomic_read(&key->enabled) > 0) {
WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
@@ -158,6 +160,7 @@ EXPORT_SYMBOL_GPL(static_key_enable);
void static_key_disable_cpuslocked(struct static_key *key)
{
STATIC_KEY_CHECK_USE(key);
+ lockdep_assert_cpus_held();
if (atomic_read(&key->enabled) != 1) {
WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
@@ -183,6 +186,10 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key,
unsigned long rate_limit,
struct delayed_work *work)
{
+ int val;
+
+ lockdep_assert_cpus_held();
+
/*
* The negative count check is valid even when a negative
* key->enabled is in use by static_key_slow_inc(); a
@@ -190,17 +197,20 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key,
* returns is unbalanced, because all other static_key_slow_inc()
* instances block while the update is in progress.
*/
- if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
- WARN(atomic_read(&key->enabled) < 0,
- "jump label: negative count!\n");
+ val = atomic_fetch_add_unless(&key->enabled, -1, 1);
+ if (val != 1) {
+ WARN(val < 0, "jump label: negative count!\n");
return;
}
- if (rate_limit) {
- atomic_inc(&key->enabled);
- schedule_delayed_work(work, rate_limit);
- } else {
- jump_label_update(key);
+ jump_label_lock();
+ if (atomic_dec_and_test(&key->enabled)) {
+ if (rate_limit) {
+ atomic_inc(&key->enabled);
+ schedule_delayed_work(work, rate_limit);
+ } else {
+ jump_label_update(key);
+ }
}
jump_label_unlock();
}
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index ed87dac8378c..6df6d62e2db8 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -644,19 +644,20 @@ static inline int kallsyms_for_perf(void)
* Otherwise, require CAP_SYSLOG (assuming kptr_restrict isn't set to
* block even that).
*/
-int kallsyms_show_value(void)
+bool kallsyms_show_value(const struct cred *cred)
{
switch (kptr_restrict) {
case 0:
if (kallsyms_for_perf())
- return 1;
+ return true;
/* fallthrough */
case 1:
- if (has_capability_noaudit(current, CAP_SYSLOG))
- return 1;
+ if (security_capable(cred, &init_user_ns, CAP_SYSLOG,
+ CAP_OPT_NOAUDIT) == 0)
+ return true;
/* fallthrough */
default:
- return 0;
+ return false;
}
}
@@ -673,7 +674,11 @@ static int kallsyms_open(struct inode *inode, struct file *file)
return -ENOMEM;
reset_iter(iter, 0);
- iter->show_value = kallsyms_show_value();
+ /*
+ * Instead of checking this on every s_show() call, cache
+ * the result here at open time.
+ */
+ iter->show_value = kallsyms_show_value(file->f_cred);
return 0;
}
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index faeec8255e7e..6b3d7f7211dd 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -1130,7 +1130,6 @@ int kernel_kexec(void)
#ifdef CONFIG_KEXEC_JUMP
if (kexec_image->preserve_context) {
- lock_system_sleep();
pm_prepare_console();
error = freeze_processes();
if (error) {
@@ -1193,7 +1192,6 @@ int kernel_kexec(void)
thaw_processes();
Restore_console:
pm_restore_console();
- unlock_system_sleep();
}
#endif
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index c6a3b6851372..89d41c0a10f1 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -168,6 +168,11 @@ void kimage_file_post_load_cleanup(struct kimage *image)
vfree(pi->sechdrs);
pi->sechdrs = NULL;
+#ifdef CONFIG_IMA_KEXEC
+ vfree(image->ima_buffer);
+ image->ima_buffer = NULL;
+#endif /* CONFIG_IMA_KEXEC */
+
/* See if architecture has anything to cleanup post load */
arch_kimage_file_post_load_cleanup(image);
@@ -626,8 +631,10 @@ static int kexec_calculate_store_digests(struct kimage *image)
sha_region_sz = KEXEC_SEGMENT_MAX * sizeof(struct kexec_sha_region);
sha_regions = vzalloc(sha_region_sz);
- if (!sha_regions)
+ if (!sha_regions) {
+ ret = -ENOMEM;
goto out_free_desc;
+ }
desc->tfm = tfm;
desc->flags = 0;
diff --git a/kernel/kmod.c b/kernel/kmod.c
index bc6addd9152b..a2de58de6ab6 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -120,7 +120,7 @@ out:
* invoke it.
*
* If module auto-loading support is disabled then this function
- * becomes a no-operation.
+ * simply returns -ENOENT.
*/
int __request_module(bool wait, const char *fmt, ...)
{
@@ -137,7 +137,7 @@ int __request_module(bool wait, const char *fmt, ...)
WARN_ON_ONCE(wait && current_is_async());
if (!modprobe_path[0])
- return 0;
+ return -ENOENT;
va_start(args, fmt);
ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 92aad49b82f9..d4435fd6fc8b 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -599,11 +599,12 @@ static void kprobe_optimizer(struct work_struct *work)
mutex_unlock(&module_mutex);
mutex_unlock(&text_mutex);
cpus_read_unlock();
- mutex_unlock(&kprobe_mutex);
/* Step 5: Kick optimizer again if needed */
if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
kick_kprobe_optimizer();
+
+ mutex_unlock(&kprobe_mutex);
}
/* Wait for completing optimization and unoptimization */
@@ -1064,9 +1065,20 @@ static int disarm_kprobe_ftrace(struct kprobe *p)
return ret;
}
#else /* !CONFIG_KPROBES_ON_FTRACE */
-#define prepare_kprobe(p) arch_prepare_kprobe(p)
-#define arm_kprobe_ftrace(p) (-ENODEV)
-#define disarm_kprobe_ftrace(p) (-ENODEV)
+static inline int prepare_kprobe(struct kprobe *p)
+{
+ return arch_prepare_kprobe(p);
+}
+
+static inline int arm_kprobe_ftrace(struct kprobe *p)
+{
+ return -ENODEV;
+}
+
+static inline int disarm_kprobe_ftrace(struct kprobe *p)
+{
+ return -ENODEV;
+}
#endif
/* Arm a kprobe with text_mutex */
@@ -1226,6 +1238,26 @@ __releases(hlist_lock)
}
NOKPROBE_SYMBOL(kretprobe_table_unlock);
+struct kprobe kprobe_busy = {
+ .addr = (void *) get_kprobe,
+};
+
+void kprobe_busy_begin(void)
+{
+ struct kprobe_ctlblk *kcb;
+
+ preempt_disable();
+ __this_cpu_write(current_kprobe, &kprobe_busy);
+ kcb = get_kprobe_ctlblk();
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+}
+
+void kprobe_busy_end(void)
+{
+ __this_cpu_write(current_kprobe, NULL);
+ preempt_enable();
+}
+
/*
* This function is called from finish_task_switch when task tk becomes dead,
* so that we can recycle any function-return probe instances associated
@@ -1243,6 +1275,8 @@ void kprobe_flush_task(struct task_struct *tk)
/* Early boot. kretprobe_table_locks not yet initialized. */
return;
+ kprobe_busy_begin();
+
INIT_HLIST_HEAD(&empty_rp);
hash = hash_ptr(tk, KPROBE_HASH_BITS);
head = &kretprobe_inst_table[hash];
@@ -1256,6 +1290,8 @@ void kprobe_flush_task(struct task_struct *tk)
hlist_del(&ri->hlist);
kfree(ri);
}
+
+ kprobe_busy_end();
}
NOKPROBE_SYMBOL(kprobe_flush_task);
@@ -1885,28 +1921,48 @@ bool __weak arch_kprobe_on_func_entry(unsigned long offset)
return !offset;
}
-bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
+/**
+ * kprobe_on_func_entry() -- check whether given address is function entry
+ * @addr: Target address
+ * @sym: Target symbol name
+ * @offset: The offset from the symbol or the address
+ *
+ * This checks whether the given @addr+@offset or @sym+@offset is on the
+ * function entry address or not.
+ * This returns 0 if it is the function entry, or -EINVAL if it is not.
+ * And also it returns -ENOENT if it fails the symbol or address lookup.
+ * Caller must pass @addr or @sym (either one must be NULL), or this
+ * returns -EINVAL.
+ */
+int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
{
kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);
if (IS_ERR(kp_addr))
- return false;
+ return PTR_ERR(kp_addr);
- if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) ||
- !arch_kprobe_on_func_entry(offset))
- return false;
+ if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset))
+ return -ENOENT;
- return true;
+ if (!arch_kprobe_on_func_entry(offset))
+ return -EINVAL;
+
+ return 0;
}
int register_kretprobe(struct kretprobe *rp)
{
- int ret = 0;
+ int ret;
struct kretprobe_instance *inst;
int i;
void *addr;
- if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset))
+ ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset);
+ if (ret)
+ return ret;
+
+ /* If only rp->kp.addr is specified, check reregistering kprobes */
+ if (rp->kp.addr && check_kprobe_rereg(&rp->kp))
return -EINVAL;
if (kretprobe_blacklist_size) {
@@ -2036,6 +2092,9 @@ static void kill_kprobe(struct kprobe *p)
{
struct kprobe *kp;
+ if (WARN_ON_ONCE(kprobe_gone(p)))
+ return;
+
p->flags |= KPROBE_FLAG_GONE;
if (kprobe_aggrprobe(p)) {
/*
@@ -2052,6 +2111,14 @@ static void kill_kprobe(struct kprobe *p)
* the original probed function (which will be freed soon) any more.
*/
arch_remove_kprobe(p);
+
+ /*
+ * The module is going away. We should disarm the kprobe which
+ * is using ftrace, because ftrace framework is still available at
+ * MODULE_STATE_GOING notification.
+ */
+ if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed)
+ disarm_kprobe_ftrace(p);
}
/* Disable one kprobe */
@@ -2211,7 +2278,10 @@ static int kprobes_module_callback(struct notifier_block *nb,
mutex_lock(&kprobe_mutex);
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
- hlist_for_each_entry_rcu(p, head, hlist)
+ hlist_for_each_entry_rcu(p, head, hlist) {
+ if (kprobe_gone(p))
+ continue;
+
if (within_module_init((unsigned long)p->addr, mod) ||
(checkcore &&
within_module_core((unsigned long)p->addr, mod))) {
@@ -2228,6 +2298,7 @@ static int kprobes_module_callback(struct notifier_block *nb,
*/
kill_kprobe(p);
}
+ }
}
mutex_unlock(&kprobe_mutex);
return NOTIFY_DONE;
@@ -2309,7 +2380,7 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
else
kprobe_type = "k";
- if (!kallsyms_show_value())
+ if (!kallsyms_show_value(pi->file->f_cred))
addr = NULL;
if (sym)
@@ -2410,7 +2481,7 @@ static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
* If /proc/kallsyms is not showing kernel address, we won't
* show them here either.
*/
- if (!kallsyms_show_value())
+ if (!kallsyms_show_value(m->file->f_cred))
seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
(void *)ent->start_addr);
else
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 087d18d771b5..81abfac35127 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -190,8 +190,15 @@ static void __kthread_parkme(struct kthread *self)
if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
break;
+ /*
+ * Thread is going to call schedule(), do not preempt it,
+ * or the caller of kthread_park() may spend more time in
+ * wait_task_inactive().
+ */
+ preempt_disable();
complete(&self->parked);
- schedule();
+ schedule_preempt_disabled();
+ preempt_enable();
}
__set_current_state(TASK_RUNNING);
}
@@ -236,8 +243,14 @@ static int kthread(void *_create)
/* OK, tell user we're spawned, wait for stop or wakeup */
__set_current_state(TASK_UNINTERRUPTIBLE);
create->result = current;
+ /*
+ * Thread is going to call schedule(), do not preempt it,
+ * or the creator may spend more time in wait_task_inactive().
+ */
+ preempt_disable();
complete(done);
- schedule();
+ schedule_preempt_disabled();
+ preempt_enable();
ret = -EINTR;
if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
@@ -447,11 +460,36 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
return p;
kthread_bind(p, cpu);
/* CPU hotplug need to bind once again when unparking the thread. */
- set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
to_kthread(p)->cpu = cpu;
return p;
}
+void kthread_set_per_cpu(struct task_struct *k, int cpu)
+{
+ struct kthread *kthread = to_kthread(k);
+ if (!kthread)
+ return;
+
+ WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
+
+ if (cpu < 0) {
+ clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
+ return;
+ }
+
+ kthread->cpu = cpu;
+ set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
+}
+
+bool kthread_is_per_cpu(struct task_struct *k)
+{
+ struct kthread *kthread = to_kthread(k);
+ if (!kthread)
+ return false;
+
+ return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
+}
+
/**
* kthread_unpark - unpark a thread created by kthread_create().
* @k: thread created by kthread_create().
@@ -850,7 +888,8 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
/* Move the work from worker->delayed_work_list. */
WARN_ON_ONCE(list_empty(&work->node));
list_del_init(&work->node);
- kthread_insert_work(worker, work, &worker->work_list);
+ if (!work->canceling)
+ kthread_insert_work(worker, work, &worker->work_list);
spin_unlock(&worker->lock);
}
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 1e272f6a01e7..126c6d524a0f 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -722,7 +722,8 @@ static bool assign_lock_key(struct lockdep_map *lock)
/* Debug-check: all keys must be persistent! */
debug_locks_off();
pr_err("INFO: trying to register non-static key.\n");
- pr_err("the code is fine but needs lockdep annotation.\n");
+ pr_err("The code is fine but needs lockdep annotation, or maybe\n");
+ pr_err("you didn't initialize this object before use?\n");
pr_err("turning off the locking correctness validator.\n");
dump_stack();
return false;
@@ -1260,9 +1261,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
this.class = class;
raw_local_irq_save(flags);
+ current->lockdep_recursion = 1;
arch_spin_lock(&lockdep_lock);
ret = __lockdep_count_forward_deps(&this);
arch_spin_unlock(&lockdep_lock);
+ current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
return ret;
@@ -1287,9 +1290,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
this.class = class;
raw_local_irq_save(flags);
+ current->lockdep_recursion = 1;
arch_spin_lock(&lockdep_lock);
ret = __lockdep_count_backward_deps(&this);
arch_spin_unlock(&lockdep_lock);
+ current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
return ret;
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index 6fcc4650f0c4..53cc3bb7025a 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -394,7 +394,7 @@ static void seq_lock_time(struct seq_file *m, struct lock_time *lt)
seq_time(m, lt->min);
seq_time(m, lt->max);
seq_time(m, lt->total);
- seq_time(m, lt->nr ? div_s64(lt->total, lt->nr) : 0);
+ seq_time(m, lt->nr ? div64_u64(lt->total, lt->nr) : 0);
}
static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 7d0b0ed74404..95395ef5922a 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -710,10 +710,10 @@ static void __torture_print_stats(char *page,
if (statp[i].n_lock_fail)
fail = true;
sum += statp[i].n_lock_acquired;
- if (max < statp[i].n_lock_fail)
- max = statp[i].n_lock_fail;
- if (min > statp[i].n_lock_fail)
- min = statp[i].n_lock_fail;
+ if (max < statp[i].n_lock_acquired)
+ max = statp[i].n_lock_acquired;
+ if (min > statp[i].n_lock_acquired)
+ min = statp[i].n_lock_acquired;
}
page += sprintf(page,
"%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
index 9aa713629387..839df4383799 100644
--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -57,7 +57,7 @@ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
task->blocked_on = waiter;
}
-void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task)
{
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
@@ -65,7 +65,7 @@ void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
task->blocked_on = NULL;
- list_del_init(&waiter->list);
+ INIT_LIST_HEAD(&waiter->list);
waiter->task = NULL;
}
diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
index 1edd3f45a4ec..53e631e1d76d 100644
--- a/kernel/locking/mutex-debug.h
+++ b/kernel/locking/mutex-debug.h
@@ -22,7 +22,7 @@ extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
extern void debug_mutex_add_waiter(struct mutex *lock,
struct mutex_waiter *waiter,
struct task_struct *task);
-extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+extern void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task);
extern void debug_mutex_unlock(struct mutex *lock);
extern void debug_mutex_init(struct mutex *lock, const char *name,
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 3f8a35104285..354151fef06a 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -177,7 +177,7 @@ static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_wait
* Add @waiter to a given location in the lock wait_list and set the
* FLAG_WAITERS flag if it's the first waiter.
*/
-static void __sched
+static void
__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct list_head *list)
{
@@ -188,6 +188,16 @@ __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
}
+static void
+__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
+{
+ list_del(&waiter->list);
+ if (likely(list_empty(&lock->wait_list)))
+ __mutex_clear_flag(lock, MUTEX_FLAGS);
+
+ debug_mutex_remove_waiter(lock, waiter, current);
+}
+
/*
* Give up ownership to a specific task, when @task = NULL, this is equivalent
* to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
@@ -609,7 +619,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
*/
static __always_inline bool
mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
- const bool use_ww_ctx, struct mutex_waiter *waiter)
+ struct mutex_waiter *waiter)
{
if (!waiter) {
/*
@@ -685,7 +695,7 @@ fail:
#else
static __always_inline bool
mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
- const bool use_ww_ctx, struct mutex_waiter *waiter)
+ struct mutex_waiter *waiter)
{
return false;
}
@@ -905,10 +915,13 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
struct ww_mutex *ww;
int ret;
+ if (!use_ww_ctx)
+ ww_ctx = NULL;
+
might_sleep();
ww = container_of(lock, struct ww_mutex, base);
- if (use_ww_ctx && ww_ctx) {
+ if (ww_ctx) {
if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
return -EALREADY;
@@ -925,10 +938,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
if (__mutex_trylock(lock) ||
- mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
+ mutex_optimistic_spin(lock, ww_ctx, NULL)) {
/* got the lock, yay! */
lock_acquired(&lock->dep_map, ip);
- if (use_ww_ctx && ww_ctx)
+ if (ww_ctx)
ww_mutex_set_context_fastpath(ww, ww_ctx);
preempt_enable();
return 0;
@@ -939,7 +952,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
* After waiting to acquire the wait_lock, try again.
*/
if (__mutex_trylock(lock)) {
- if (use_ww_ctx && ww_ctx)
+ if (ww_ctx)
__ww_mutex_check_waiters(lock, ww_ctx);
goto skip_wait;
@@ -992,7 +1005,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
goto err;
}
- if (use_ww_ctx && ww_ctx) {
+ if (ww_ctx) {
ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
if (ret)
goto err;
@@ -1005,7 +1018,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
* ww_mutex needs to always recheck its position since its waiter
* list is not FIFO ordered.
*/
- if ((use_ww_ctx && ww_ctx) || !first) {
+ if (ww_ctx || !first) {
first = __mutex_waiter_is_first(lock, &waiter);
if (first)
__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
@@ -1018,7 +1031,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
* or we must see its unlock and acquire.
*/
if (__mutex_trylock(lock) ||
- (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
+ (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
break;
spin_lock(&lock->wait_lock);
@@ -1027,7 +1040,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
acquired:
__set_current_state(TASK_RUNNING);
- if (use_ww_ctx && ww_ctx) {
+ if (ww_ctx) {
/*
* Wound-Wait; we stole the lock (!first_waiter), check the
* waiters as anyone might want to wound us.
@@ -1037,9 +1050,7 @@ acquired:
__ww_mutex_check_waiters(lock, ww_ctx);
}
- mutex_remove_waiter(lock, &waiter, current);
- if (likely(list_empty(&lock->wait_list)))
- __mutex_clear_flag(lock, MUTEX_FLAGS);
+ __mutex_remove_waiter(lock, &waiter);
debug_mutex_free_waiter(&waiter);
@@ -1047,7 +1058,7 @@ skip_wait:
/* got the lock - cleanup and rejoice! */
lock_acquired(&lock->dep_map, ip);
- if (use_ww_ctx && ww_ctx)
+ if (ww_ctx)
ww_mutex_lock_acquired(ww, ww_ctx);
spin_unlock(&lock->wait_lock);
@@ -1056,7 +1067,7 @@ skip_wait:
err:
__set_current_state(TASK_RUNNING);
- mutex_remove_waiter(lock, &waiter, current);
+ __mutex_remove_waiter(lock, &waiter);
err_early_kill:
spin_unlock(&lock->wait_lock);
debug_mutex_free_waiter(&waiter);
diff --git a/kernel/locking/mutex.h b/kernel/locking/mutex.h
index 1c2287d3fa71..f0c710b1d192 100644
--- a/kernel/locking/mutex.h
+++ b/kernel/locking/mutex.h
@@ -10,12 +10,10 @@
* !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs:
*/
-#define mutex_remove_waiter(lock, waiter, task) \
- __list_del((waiter)->list.prev, (waiter)->list.next)
-
#define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
#define debug_mutex_free_waiter(waiter) do { } while (0)
#define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
+#define debug_mutex_remove_waiter(lock, waiter, ti) do { } while (0)
#define debug_mutex_unlock(lock) do { } while (0)
#define debug_mutex_init(lock, name, key) do { } while (0)
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
index c7471c3fb798..16c09cda3b02 100644
--- a/kernel/locking/qrwlock.c
+++ b/kernel/locking/qrwlock.c
@@ -70,6 +70,8 @@ EXPORT_SYMBOL(queued_read_lock_slowpath);
*/
void queued_write_lock_slowpath(struct qrwlock *lock)
{
+ int cnts;
+
/* Put the writer into the wait queue */
arch_spin_lock(&lock->wait_lock);
@@ -83,9 +85,8 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
/* When no more readers or writers, set the locked flag */
do {
- atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING);
- } while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING,
- _QW_LOCKED) != _QW_WAITING);
+ cnts = atomic_cond_read_relaxed(&lock->cnts, VAL == _QW_WAITING);
+ } while (!atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED));
unlock:
arch_spin_unlock(&lock->wait_lock);
}
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 9562aaa2afdc..a5ec4f68527e 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1719,8 +1719,7 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
* possible because it belongs to the pi_state which is about to be freed
* and it is not longer visible to other tasks.
*/
-void rt_mutex_proxy_unlock(struct rt_mutex *lock,
- struct task_struct *proxy_owner)
+void rt_mutex_proxy_unlock(struct rt_mutex *lock)
{
debug_rt_mutex_proxy_unlock(lock);
rt_mutex_set_owner(lock, NULL);
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
index d1d62f942be2..ca6fb489007b 100644
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -133,8 +133,7 @@ enum rtmutex_chainwalk {
extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner);
-extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
- struct task_struct *proxy_owner);
+extern void rt_mutex_proxy_unlock(struct rt_mutex *lock);
extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter,
diff --git a/kernel/module.c b/kernel/module.c
index 20fc0efc679c..92d8610742c7 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -415,7 +415,7 @@ static bool each_symbol_in_section(const struct symsearch *arr,
}
/* Returns true as soon as fn returns true, otherwise false. */
-bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
+static bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
struct module *owner,
void *data),
void *data)
@@ -476,7 +476,6 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
}
return false;
}
-EXPORT_SYMBOL_GPL(each_symbol_section);
struct find_symbol_arg {
/* Input */
@@ -488,6 +487,7 @@ struct find_symbol_arg {
struct module *owner;
const s32 *crc;
const struct kernel_symbol *sym;
+ enum mod_license license;
};
static bool check_symbol(const struct symsearch *syms,
@@ -497,9 +497,9 @@ static bool check_symbol(const struct symsearch *syms,
struct find_symbol_arg *fsa = data;
if (!fsa->gplok) {
- if (syms->licence == GPL_ONLY)
+ if (syms->license == GPL_ONLY)
return false;
- if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
+ if (syms->license == WILL_BE_GPL_ONLY && fsa->warn) {
pr_warn("Symbol %s is being used by a non-GPL module, "
"which will not be allowed in the future\n",
fsa->name);
@@ -521,6 +521,7 @@ static bool check_symbol(const struct symsearch *syms,
fsa->owner = owner;
fsa->crc = symversion(syms->crcs, symnum);
fsa->sym = &syms->start[symnum];
+ fsa->license = syms->license;
return true;
}
@@ -568,9 +569,10 @@ static bool find_symbol_in_section(const struct symsearch *syms,
/* Find a symbol and return it, along with, (optional) crc and
* (optional) module which owns it. Needs preempt disabled or module_mutex. */
-const struct kernel_symbol *find_symbol(const char *name,
+static const struct kernel_symbol *find_symbol(const char *name,
struct module **owner,
const s32 **crc,
+ enum mod_license *license,
bool gplok,
bool warn)
{
@@ -585,13 +587,14 @@ const struct kernel_symbol *find_symbol(const char *name,
*owner = fsa.owner;
if (crc)
*crc = fsa.crc;
+ if (license)
+ *license = fsa.license;
return fsa.sym;
}
pr_debug("Failed to find symbol %s\n", name);
return NULL;
}
-EXPORT_SYMBOL_GPL(find_symbol);
/*
* Search for module by name: must hold module_mutex (or preempt disabled
@@ -851,7 +854,7 @@ static int add_module_usage(struct module *a, struct module *b)
}
/* Module a uses b: caller needs module_mutex() */
-int ref_module(struct module *a, struct module *b)
+static int ref_module(struct module *a, struct module *b)
{
int err;
@@ -870,7 +873,6 @@ int ref_module(struct module *a, struct module *b)
}
return 0;
}
-EXPORT_SYMBOL_GPL(ref_module);
/* Clear the unload stuff of the module. */
static void module_unload_free(struct module *mod)
@@ -1059,7 +1061,7 @@ void __symbol_put(const char *symbol)
struct module *owner;
preempt_disable();
- if (!find_symbol(symbol, &owner, NULL, true, false))
+ if (!find_symbol(symbol, &owner, NULL, NULL, true, false))
BUG();
module_put(owner);
preempt_enable();
@@ -1151,11 +1153,10 @@ static inline void module_unload_free(struct module *mod)
{
}
-int ref_module(struct module *a, struct module *b)
+static int ref_module(struct module *a, struct module *b)
{
return strong_try_module_get(b);
}
-EXPORT_SYMBOL_GPL(ref_module);
static inline int module_unload_init(struct module *mod)
{
@@ -1338,7 +1339,7 @@ static inline int check_modstruct_version(const struct load_info *info,
* locking is necessary -- use preempt_disable() to placate lockdep.
*/
preempt_disable();
- if (!find_symbol("module_layout", NULL, &crc, true, false)) {
+ if (!find_symbol("module_layout", NULL, &crc, NULL, true, false)) {
preempt_enable();
BUG();
}
@@ -1378,6 +1379,25 @@ static inline int same_magic(const char *amagic, const char *bmagic,
}
#endif /* CONFIG_MODVERSIONS */
+static bool inherit_taint(struct module *mod, struct module *owner)
+{
+ if (!owner || !test_bit(TAINT_PROPRIETARY_MODULE, &owner->taints))
+ return true;
+
+ if (mod->using_gplonly_symbols) {
+ pr_err("%s: module using GPL-only symbols uses symbols from proprietary module %s.\n",
+ mod->name, owner->name);
+ return false;
+ }
+
+ if (!test_bit(TAINT_PROPRIETARY_MODULE, &mod->taints)) {
+ pr_warn("%s: module uses symbols from proprietary module %s, inheriting taint.\n",
+ mod->name, owner->name);
+ set_bit(TAINT_PROPRIETARY_MODULE, &mod->taints);
+ }
+ return true;
+}
+
/* Resolve a symbol for this module. I.e. if we find one, record usage. */
static const struct kernel_symbol *resolve_symbol(struct module *mod,
const struct load_info *info,
@@ -1387,6 +1407,7 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod,
struct module *owner;
const struct kernel_symbol *sym;
const s32 *crc;
+ enum mod_license license;
int err;
/*
@@ -1396,11 +1417,19 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod,
*/
sched_annotate_sleep();
mutex_lock(&module_mutex);
- sym = find_symbol(name, &owner, &crc,
+ sym = find_symbol(name, &owner, &crc, &license,
!(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
if (!sym)
goto unlock;
+ if (license == GPL_ONLY)
+ mod->using_gplonly_symbols = true;
+
+ if (!inherit_taint(mod, owner)) {
+ sym = NULL;
+ goto getname;
+ }
+
if (!check_version(info, name, mod, crc)) {
sym = ERR_PTR(-EINVAL);
goto getname;
@@ -1451,8 +1480,7 @@ static inline bool sect_empty(const Elf_Shdr *sect)
}
struct module_sect_attr {
- struct module_attribute mattr;
- char *name;
+ struct bin_attribute battr;
unsigned long address;
};
@@ -1462,13 +1490,34 @@ struct module_sect_attrs {
struct module_sect_attr attrs[0];
};
-static ssize_t module_sect_show(struct module_attribute *mattr,
- struct module_kobject *mk, char *buf)
+#define MODULE_SECT_READ_SIZE (3 /* "0x", "\n" */ + (BITS_PER_LONG / 4))
+static ssize_t module_sect_read(struct file *file, struct kobject *kobj,
+ struct bin_attribute *battr,
+ char *buf, loff_t pos, size_t count)
{
struct module_sect_attr *sattr =
- container_of(mattr, struct module_sect_attr, mattr);
- return sprintf(buf, "0x%px\n", kptr_restrict < 2 ?
- (void *)sattr->address : NULL);
+ container_of(battr, struct module_sect_attr, battr);
+ char bounce[MODULE_SECT_READ_SIZE + 1];
+ size_t wrote;
+
+ if (pos != 0)
+ return -EINVAL;
+
+ /*
+ * Since we're a binary read handler, we must account for the
+ * trailing NUL byte that sprintf will write: if "buf" is
+ * too small to hold the NUL, or the NUL is exactly the last
+ * byte, the read will look like it got truncated by one byte.
+ * Since there is no way to ask sprintf nicely to not write
+ * the NUL, we have to use a bounce buffer.
+ */
+ wrote = scnprintf(bounce, sizeof(bounce), "0x%px\n",
+ kallsyms_show_value(file->f_cred)
+ ? (void *)sattr->address : NULL);
+ count = min(count, wrote);
+ memcpy(buf, bounce, count);
+
+ return count;
}
static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
@@ -1476,7 +1525,7 @@ static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
unsigned int section;
for (section = 0; section < sect_attrs->nsections; section++)
- kfree(sect_attrs->attrs[section].name);
+ kfree(sect_attrs->attrs[section].battr.attr.name);
kfree(sect_attrs);
}
@@ -1485,43 +1534,41 @@ static void add_sect_attrs(struct module *mod, const struct load_info *info)
unsigned int nloaded = 0, i, size[2];
struct module_sect_attrs *sect_attrs;
struct module_sect_attr *sattr;
- struct attribute **gattr;
+ struct bin_attribute **gattr;
/* Count loaded sections and allocate structures */
for (i = 0; i < info->hdr->e_shnum; i++)
if (!sect_empty(&info->sechdrs[i]))
nloaded++;
- size[0] = ALIGN(sizeof(*sect_attrs)
- + nloaded * sizeof(sect_attrs->attrs[0]),
- sizeof(sect_attrs->grp.attrs[0]));
- size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]);
+ size[0] = ALIGN(struct_size(sect_attrs, attrs, nloaded),
+ sizeof(sect_attrs->grp.bin_attrs[0]));
+ size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.bin_attrs[0]);
sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
if (sect_attrs == NULL)
return;
/* Setup section attributes. */
sect_attrs->grp.name = "sections";
- sect_attrs->grp.attrs = (void *)sect_attrs + size[0];
+ sect_attrs->grp.bin_attrs = (void *)sect_attrs + size[0];
sect_attrs->nsections = 0;
sattr = &sect_attrs->attrs[0];
- gattr = &sect_attrs->grp.attrs[0];
+ gattr = &sect_attrs->grp.bin_attrs[0];
for (i = 0; i < info->hdr->e_shnum; i++) {
Elf_Shdr *sec = &info->sechdrs[i];
if (sect_empty(sec))
continue;
+ sysfs_bin_attr_init(&sattr->battr);
sattr->address = sec->sh_addr;
- sattr->name = kstrdup(info->secstrings + sec->sh_name,
- GFP_KERNEL);
- if (sattr->name == NULL)
+ sattr->battr.attr.name =
+ kstrdup(info->secstrings + sec->sh_name, GFP_KERNEL);
+ if (sattr->battr.attr.name == NULL)
goto out;
sect_attrs->nsections++;
- sysfs_attr_init(&sattr->mattr.attr);
- sattr->mattr.show = module_sect_show;
- sattr->mattr.store = NULL;
- sattr->mattr.attr.name = sattr->name;
- sattr->mattr.attr.mode = S_IRUSR;
- *(gattr++) = &(sattr++)->mattr.attr;
+ sattr->battr.read = module_sect_read;
+ sattr->battr.size = MODULE_SECT_READ_SIZE;
+ sattr->battr.attr.mode = 0400;
+ *(gattr++) = &(sattr++)->battr;
}
*gattr = NULL;
@@ -1611,7 +1658,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
continue;
if (info->sechdrs[i].sh_type == SHT_NOTE) {
sysfs_bin_attr_init(nattr);
- nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
+ nattr->attr.name = mod->sect_attrs->attrs[loaded].battr.attr.name;
nattr->attr.mode = S_IRUGO;
nattr->size = info->sechdrs[i].sh_size;
nattr->private = (void *) info->sechdrs[i].sh_addr;
@@ -1788,7 +1835,6 @@ static int mod_sysfs_init(struct module *mod)
if (err)
mod_kobject_put(mod);
- /* delay uevent until full sysfs population */
out:
return err;
}
@@ -1825,7 +1871,6 @@ static int mod_sysfs_setup(struct module *mod,
add_sect_attrs(mod, info);
add_notes_attrs(mod, info);
- kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
return 0;
out_unreg_modinfo_attrs:
@@ -2210,7 +2255,7 @@ void *__symbol_get(const char *symbol)
const struct kernel_symbol *sym;
preempt_disable();
- sym = find_symbol(symbol, &owner, NULL, true, true);
+ sym = find_symbol(symbol, &owner, NULL, NULL, true, true);
if (sym && strong_try_module_get(owner))
sym = NULL;
preempt_enable();
@@ -2246,7 +2291,7 @@ static int verify_export_symbols(struct module *mod)
for (i = 0; i < ARRAY_SIZE(arr); i++) {
for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
if (find_symbol(kernel_symbol_name(s), &owner, NULL,
- true, false)) {
+ NULL, true, false)) {
pr_err("%s: exports duplicate symbol %s"
" (owned by %s)\n",
mod->name, kernel_symbol_name(s),
@@ -2258,6 +2303,21 @@ static int verify_export_symbols(struct module *mod)
return 0;
}
+static bool ignore_undef_symbol(Elf_Half emachine, const char *name)
+{
+ /*
+ * On x86, PIC code and Clang non-PIC code may have call foo@PLT. GNU as
+ * before 2.37 produces an unreferenced _GLOBAL_OFFSET_TABLE_ on x86-64.
+ * i386 has a similar problem but may not deserve a fix.
+ *
+ * If we ever have to ignore many symbols, consider refactoring the code to
+ * only warn if referenced by a relocation.
+ */
+ if (emachine == EM_386 || emachine == EM_X86_64)
+ return !strcmp(name, "_GLOBAL_OFFSET_TABLE_");
+ return false;
+}
+
/* Change all symbols so that st_value encodes the pointer directly. */
static int simplify_symbols(struct module *mod, const struct load_info *info)
{
@@ -2303,8 +2363,10 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
break;
}
- /* Ok if weak. */
- if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
+ /* Ok if weak or ignored. */
+ if (!ksym &&
+ (ELF_ST_BIND(sym[i].st_info) == STB_WEAK ||
+ ignore_undef_symbol(info->hdr->e_machine, name)))
break;
ret = PTR_ERR(ksym) ?: -ENOENT;
@@ -3481,6 +3543,9 @@ static noinline int do_init_module(struct module *mod)
blocking_notifier_call_chain(&module_notify_list,
MODULE_STATE_LIVE, mod);
+ /* Delay uevent until module has finished its init routine */
+ kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
+
/*
* We need to finish all async code before the module init sequence
* is done. This has potential to deadlock. For example, a newly
@@ -3823,6 +3888,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
MODULE_STATE_GOING, mod);
klp_module_going(mod);
bug_cleanup:
+ mod->state = MODULE_STATE_GOING;
/* module_bug_cleanup needs module_mutex protection */
mutex_lock(&module_mutex);
module_bug_cleanup(mod);
@@ -4258,7 +4324,7 @@ static int modules_open(struct inode *inode, struct file *file)
if (!err) {
struct seq_file *m = file->private_data;
- m->private = kallsyms_show_value() ? NULL : (void *)8ul;
+ m->private = kallsyms_show_value(file->f_cred) ? NULL : (void *)8ul;
}
return err;
@@ -4348,7 +4414,6 @@ struct module *__module_address(unsigned long addr)
}
return mod;
}
-EXPORT_SYMBOL_GPL(__module_address);
/*
* is_module_text_address - is this address inside module code?
@@ -4387,7 +4452,6 @@ struct module *__module_text_address(unsigned long addr)
}
return mod;
}
-EXPORT_SYMBOL_GPL(__module_text_address);
/* Don't grab lock, we're oopsing. */
void print_modules(void)
diff --git a/kernel/padata.c b/kernel/padata.c
index c280cb153915..93e4fb2d9f2e 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -167,23 +167,12 @@ EXPORT_SYMBOL(padata_do_parallel);
*/
static struct padata_priv *padata_get_next(struct parallel_data *pd)
{
- int cpu, num_cpus;
- unsigned int next_nr, next_index;
struct padata_parallel_queue *next_queue;
struct padata_priv *padata;
struct padata_list *reorder;
+ int cpu = pd->cpu;
- num_cpus = cpumask_weight(pd->cpumask.pcpu);
-
- /*
- * Calculate the percpu reorder queue and the sequence
- * number of the next object.
- */
- next_nr = pd->processed;
- next_index = next_nr % num_cpus;
- cpu = padata_index_to_cpu(pd, next_index);
next_queue = per_cpu_ptr(pd->pqueue, cpu);
-
reorder = &next_queue->reorder;
spin_lock(&reorder->lock);
@@ -194,7 +183,8 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd)
list_del_init(&padata->list);
atomic_dec(&pd->reorder_objects);
- pd->processed++;
+ pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1,
+ false);
spin_unlock(&reorder->lock);
goto out;
@@ -217,6 +207,7 @@ static void padata_reorder(struct parallel_data *pd)
struct padata_priv *padata;
struct padata_serial_queue *squeue;
struct padata_instance *pinst = pd->pinst;
+ struct padata_parallel_queue *next_queue;
/*
* We need to ensure that only one cpu can work on dequeueing of
@@ -248,7 +239,6 @@ static void padata_reorder(struct parallel_data *pd)
* so exit immediately.
*/
if (PTR_ERR(padata) == -ENODATA) {
- del_timer(&pd->timer);
spin_unlock_bh(&pd->lock);
return;
}
@@ -267,70 +257,29 @@ static void padata_reorder(struct parallel_data *pd)
/*
* The next object that needs serialization might have arrived to
- * the reorder queues in the meantime, we will be called again
- * from the timer function if no one else cares for it.
+ * the reorder queues in the meantime.
*
- * Ensure reorder_objects is read after pd->lock is dropped so we see
- * an increment from another task in padata_do_serial. Pairs with
+ * Ensure reorder queue is read after pd->lock is dropped so we see
+ * new objects from another task in padata_do_serial. Pairs with
* smp_mb__after_atomic in padata_do_serial.
*/
smp_mb();
- if (atomic_read(&pd->reorder_objects)
- && !(pinst->flags & PADATA_RESET))
- mod_timer(&pd->timer, jiffies + HZ);
- else
- del_timer(&pd->timer);
- return;
+ next_queue = per_cpu_ptr(pd->pqueue, pd->cpu);
+ if (!list_empty(&next_queue->reorder.list))
+ queue_work(pinst->wq, &pd->reorder_work);
}
static void invoke_padata_reorder(struct work_struct *work)
{
- struct padata_parallel_queue *pqueue;
struct parallel_data *pd;
local_bh_disable();
- pqueue = container_of(work, struct padata_parallel_queue, reorder_work);
- pd = pqueue->pd;
+ pd = container_of(work, struct parallel_data, reorder_work);
padata_reorder(pd);
local_bh_enable();
}
-static void padata_reorder_timer(struct timer_list *t)
-{
- struct parallel_data *pd = from_timer(pd, t, timer);
- unsigned int weight;
- int target_cpu, cpu;
-
- cpu = get_cpu();
-
- /* We don't lock pd here to not interfere with parallel processing
- * padata_reorder() calls on other CPUs. We just need any CPU out of
- * the cpumask.pcpu set. It would be nice if it's the right one but
- * it doesn't matter if we're off to the next one by using an outdated
- * pd->processed value.
- */
- weight = cpumask_weight(pd->cpumask.pcpu);
- target_cpu = padata_index_to_cpu(pd, pd->processed % weight);
-
- /* ensure to call the reorder callback on the correct CPU */
- if (cpu != target_cpu) {
- struct padata_parallel_queue *pqueue;
- struct padata_instance *pinst;
-
- /* The timer function is serialized wrt itself -- no locking
- * needed.
- */
- pinst = pd->pinst;
- pqueue = per_cpu_ptr(pd->pqueue, target_cpu);
- queue_work_on(target_cpu, pinst->wq, &pqueue->reorder_work);
- } else {
- padata_reorder(pd);
- }
-
- put_cpu();
-}
-
static void padata_serial_worker(struct work_struct *serial_work)
{
struct padata_serial_queue *squeue;
@@ -375,47 +324,23 @@ static void padata_serial_worker(struct work_struct *serial_work)
*/
void padata_do_serial(struct padata_priv *padata)
{
- int cpu;
- struct padata_parallel_queue *pqueue;
- struct parallel_data *pd;
- int reorder_via_wq = 0;
-
- pd = padata->pd;
-
- cpu = get_cpu();
-
- /* We need to run on the same CPU padata_do_parallel(.., padata, ..)
- * was called on -- or, at least, enqueue the padata object into the
- * correct per-cpu queue.
- */
- if (cpu != padata->cpu) {
- reorder_via_wq = 1;
- cpu = padata->cpu;
- }
-
- pqueue = per_cpu_ptr(pd->pqueue, cpu);
+ struct parallel_data *pd = padata->pd;
+ struct padata_parallel_queue *pqueue = per_cpu_ptr(pd->pqueue,
+ padata->cpu);
spin_lock(&pqueue->reorder.lock);
- atomic_inc(&pd->reorder_objects);
list_add_tail(&padata->list, &pqueue->reorder.list);
+ atomic_inc(&pd->reorder_objects);
spin_unlock(&pqueue->reorder.lock);
/*
- * Ensure the atomic_inc of reorder_objects above is ordered correctly
+ * Ensure the addition to the reorder list is ordered correctly
* with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
* in padata_reorder.
*/
smp_mb__after_atomic();
- put_cpu();
-
- /* If we're running on the wrong CPU, call padata_reorder() via a
- * kernel worker.
- */
- if (reorder_via_wq)
- queue_work_on(cpu, pd->pinst->wq, &pqueue->reorder_work);
- else
- padata_reorder(pd);
+ padata_reorder(pd);
}
EXPORT_SYMBOL(padata_do_serial);
@@ -471,14 +396,12 @@ static void padata_init_pqueues(struct parallel_data *pd)
continue;
}
- pqueue->pd = pd;
pqueue->cpu_index = cpu_index;
cpu_index++;
__padata_list_init(&pqueue->reorder);
__padata_list_init(&pqueue->parallel);
INIT_WORK(&pqueue->work, padata_parallel_worker);
- INIT_WORK(&pqueue->reorder_work, invoke_padata_reorder);
atomic_set(&pqueue->num_obj, 0);
}
}
@@ -506,12 +429,13 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
padata_init_pqueues(pd);
padata_init_squeues(pd);
- timer_setup(&pd->timer, padata_reorder_timer, 0);
atomic_set(&pd->seq_nr, -1);
atomic_set(&pd->reorder_objects, 0);
atomic_set(&pd->refcnt, 1);
pd->pinst = pinst;
spin_lock_init(&pd->lock);
+ pd->cpu = cpumask_first(pd->cpumask.pcpu);
+ INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
return pd;
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index f5ce9f7ec132..28db51274ed0 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -842,17 +842,6 @@ static int software_resume(void)
/* Check if the device is there */
swsusp_resume_device = name_to_dev_t(resume_file);
-
- /*
- * name_to_dev_t is ineffective to verify parition if resume_file is in
- * integer format. (e.g. major:minor)
- */
- if (isdigit(resume_file[0]) && resume_wait) {
- int partno;
- while (!get_gendisk(swsusp_resume_device, &partno))
- msleep(10);
- }
-
if (!swsusp_resume_device) {
/*
* Some device discovery might still be in progress; we need
@@ -901,6 +890,13 @@ static int software_resume(void)
error = freeze_processes();
if (error)
goto Close_Finish;
+
+ error = freeze_kernel_threads();
+ if (error) {
+ thaw_processes();
+ goto Close_Finish;
+ }
+
error = load_image_and_restore();
thaw_processes();
Finish:
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index d7f6c1a288d3..e9494c29f1ca 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -491,10 +491,10 @@ static int swap_writer_finish(struct swap_map_handle *handle,
unsigned int flags, int error)
{
if (!error) {
- flush_swap_writer(handle);
pr_info("S");
error = mark_swapfiles(handle, flags);
pr_cont("|\n");
+ flush_swap_writer(handle);
}
if (error)
diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h
index 0f1898820cba..c27d47058841 100644
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
@@ -35,6 +35,9 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args);
void __printk_safe_enter(void);
void __printk_safe_exit(void);
+void printk_safe_init(void);
+bool printk_percpu_data_ready(void);
+
#define printk_safe_enter_irqsave(flags) \
do { \
local_irq_save(flags); \
@@ -76,4 +79,6 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args) { return 0; }
#define printk_safe_enter_irq() local_irq_disable()
#define printk_safe_exit_irq() local_irq_enable()
+static inline void printk_safe_init(void) { }
+static inline bool printk_percpu_data_ready(void) { return false; }
#endif /* CONFIG_PRINTK */
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 7a2fdc097c8c..cf272aba362b 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -443,6 +443,18 @@ static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
static char *log_buf = __log_buf;
static u32 log_buf_len = __LOG_BUF_LEN;
+/*
+ * We cannot access per-CPU data (e.g. per-CPU flush irq_work) before
+ * per_cpu_areas are initialised. This variable is set to true when
+ * it's safe to access per-CPU data.
+ */
+static bool __printk_percpu_data_ready __read_mostly;
+
+bool printk_percpu_data_ready(void)
+{
+ return __printk_percpu_data_ready;
+}
+
/* Return log buffer address */
char *log_buf_addr_get(void)
{
@@ -1101,12 +1113,28 @@ static void __init log_buf_add_cpu(void)
static inline void log_buf_add_cpu(void) {}
#endif /* CONFIG_SMP */
+static void __init set_percpu_data_ready(void)
+{
+ printk_safe_init();
+ /* Make sure we set this flag only after printk_safe() init is done */
+ barrier();
+ __printk_percpu_data_ready = true;
+}
+
void __init setup_log_buf(int early)
{
unsigned long flags;
char *new_log_buf;
unsigned int free;
+ /*
+ * Some archs call setup_log_buf() multiple times - first is very
+ * early, e.g. from setup_arch(), and second - when percpu_areas
+ * are initialised.
+ */
+ if (!early)
+ set_percpu_data_ready();
+
if (log_buf != __log_buf)
return;
@@ -2120,6 +2148,9 @@ static int __init console_setup(char *str)
char *s, *options, *brl_options = NULL;
int idx;
+ if (str[0] == 0)
+ return 1;
+
if (_braille_console_setup(&str, &brl_options))
return 1;
@@ -2913,6 +2944,9 @@ static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = {
void wake_up_klogd(void)
{
+ if (!printk_percpu_data_ready())
+ return;
+
preempt_disable();
if (waitqueue_active(&log_wait)) {
this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
@@ -2923,6 +2957,9 @@ void wake_up_klogd(void)
void defer_console_output(void)
{
+ if (!printk_percpu_data_ready())
+ return;
+
preempt_disable();
__this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT);
irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
index 0913b4d385de..0dbc06033113 100644
--- a/kernel/printk/printk_safe.c
+++ b/kernel/printk/printk_safe.c
@@ -39,7 +39,6 @@
* There are situations when we want to make sure that all buffers
* were handled or when IRQs are blocked.
*/
-static int printk_safe_irq_ready __read_mostly;
#define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) - \
sizeof(atomic_t) - \
@@ -56,6 +55,8 @@ struct printk_safe_seq_buf {
static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq);
static DEFINE_PER_CPU(int, printk_context);
+static DEFINE_RAW_SPINLOCK(safe_read_lock);
+
#ifdef CONFIG_PRINTK_NMI
static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
#endif
@@ -63,7 +64,7 @@ static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
/* Get flushed in a more safe context. */
static void queue_flush_work(struct printk_safe_seq_buf *s)
{
- if (printk_safe_irq_ready)
+ if (printk_percpu_data_ready())
irq_work_queue(&s->work);
}
@@ -191,8 +192,6 @@ static void report_message_lost(struct printk_safe_seq_buf *s)
*/
static void __printk_safe_flush(struct irq_work *work)
{
- static raw_spinlock_t read_lock =
- __RAW_SPIN_LOCK_INITIALIZER(read_lock);
struct printk_safe_seq_buf *s =
container_of(work, struct printk_safe_seq_buf, work);
unsigned long flags;
@@ -206,7 +205,7 @@ static void __printk_safe_flush(struct irq_work *work)
* different CPUs. This is especially important when printing
* a backtrace.
*/
- raw_spin_lock_irqsave(&read_lock, flags);
+ raw_spin_lock_irqsave(&safe_read_lock, flags);
i = 0;
more:
@@ -243,7 +242,7 @@ more:
out:
report_message_lost(s);
- raw_spin_unlock_irqrestore(&read_lock, flags);
+ raw_spin_unlock_irqrestore(&safe_read_lock, flags);
}
/**
@@ -289,6 +288,14 @@ void printk_safe_flush_on_panic(void)
raw_spin_lock_init(&logbuf_lock);
}
+ if (raw_spin_is_locked(&safe_read_lock)) {
+ if (num_online_cpus() > 1)
+ return;
+
+ debug_locks_off();
+ raw_spin_lock_init(&safe_read_lock);
+ }
+
printk_safe_flush();
}
@@ -414,14 +421,6 @@ void __init printk_safe_init(void)
#endif
}
- /*
- * In the highly unlikely event that a NMI were to trigger at
- * this moment. Make sure IRQ work is set up before this
- * variable is set.
- */
- barrier();
- printk_safe_irq_ready = 1;
-
/* Flush pending messages that did not have scheduled IRQ works. */
printk_safe_flush();
}
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index b93eb4eaf7ac..af74e843221b 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -163,6 +163,21 @@ void __ptrace_unlink(struct task_struct *child)
spin_unlock(&child->sighand->siglock);
}
+static bool looks_like_a_spurious_pid(struct task_struct *task)
+{
+ if (task->exit_code != ((PTRACE_EVENT_EXEC << 8) | SIGTRAP))
+ return false;
+
+ if (task_pid_vnr(task) == task->ptrace_message)
+ return false;
+ /*
+ * The tracee changed its pid but the PTRACE_EVENT_EXEC event
+ * was not wait()'ed, most probably debugger targets the old
+ * leader which was destroyed in de_thread().
+ */
+ return true;
+}
+
/* Ensure that nothing can wake it up, even SIGKILL */
static bool ptrace_freeze_traced(struct task_struct *task)
{
@@ -173,7 +188,8 @@ static bool ptrace_freeze_traced(struct task_struct *task)
return ret;
spin_lock_irq(&task->sighand->siglock);
- if (task_is_traced(task) && !__fatal_signal_pending(task)) {
+ if (task_is_traced(task) && !looks_like_a_spurious_pid(task) &&
+ !__fatal_signal_pending(task)) {
task->state = __TASK_TRACED;
ret = true;
}
@@ -258,17 +274,11 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
return ret;
}
-static bool ptrace_has_cap(const struct cred *cred, struct user_namespace *ns,
- unsigned int mode)
+static bool ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
{
- int ret;
-
if (mode & PTRACE_MODE_NOAUDIT)
- ret = security_capable(cred, ns, CAP_SYS_PTRACE, CAP_OPT_NOAUDIT);
- else
- ret = security_capable(cred, ns, CAP_SYS_PTRACE, CAP_OPT_NONE);
-
- return ret == 0;
+ return ns_capable_noaudit(ns, CAP_SYS_PTRACE);
+ return ns_capable(ns, CAP_SYS_PTRACE);
}
/* Returns 0 on success, -errno on denial. */
@@ -320,7 +330,7 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
gid_eq(caller_gid, tcred->sgid) &&
gid_eq(caller_gid, tcred->gid))
goto ok;
- if (ptrace_has_cap(cred, tcred->user_ns, mode))
+ if (ptrace_has_cap(tcred->user_ns, mode))
goto ok;
rcu_read_unlock();
return -EPERM;
@@ -339,7 +349,7 @@ ok:
mm = task->mm;
if (mm &&
((get_dumpable(mm) != SUID_DUMP_USER) &&
- !ptrace_has_cap(cred, mm->user_ns, mode)))
+ !ptrace_has_cap(mm->user_ns, mode)))
return -EPERM;
return security_ptrace_access_check(task, mode);
diff --git a/kernel/reboot.c b/kernel/reboot.c
index 8fb44dec9ad7..45bea54f9aee 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -539,22 +539,22 @@ static int __init reboot_setup(char *str)
break;
case 's':
- {
- int rc;
-
- if (isdigit(*(str+1))) {
- rc = kstrtoint(str+1, 0, &reboot_cpu);
- if (rc)
- return rc;
- } else if (str[1] == 'm' && str[2] == 'p' &&
- isdigit(*(str+3))) {
- rc = kstrtoint(str+3, 0, &reboot_cpu);
- if (rc)
- return rc;
- } else
+ if (isdigit(*(str+1)))
+ reboot_cpu = simple_strtoul(str+1, NULL, 0);
+ else if (str[1] == 'm' && str[2] == 'p' &&
+ isdigit(*(str+3)))
+ reboot_cpu = simple_strtoul(str+3, NULL, 0);
+ else
reboot_mode = REBOOT_SOFT;
+ if (reboot_cpu >= num_possible_cpus()) {
+ pr_err("Ignoring the CPU number in reboot= option. "
+ "CPU %d exceeds possible cpu number %d\n",
+ reboot_cpu, num_possible_cpus());
+ reboot_cpu = 0;
+ break;
+ }
break;
- }
+
case 'g':
reboot_mode = REBOOT_GPIO;
break;
diff --git a/kernel/relay.c b/kernel/relay.c
index 9e0f52375487..735cb208f023 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -197,6 +197,7 @@ free_buf:
static void relay_destroy_channel(struct kref *kref)
{
struct rchan *chan = container_of(kref, struct rchan, kref);
+ free_percpu(chan->buf);
kfree(chan);
}
@@ -581,6 +582,11 @@ struct rchan *relay_open(const char *base_filename,
return NULL;
chan->buf = alloc_percpu(struct rchan_buf *);
+ if (!chan->buf) {
+ kfree(chan);
+ return NULL;
+ }
+
chan->version = RELAYFS_CHANNEL_VERSION;
chan->n_subbufs = n_subbufs;
chan->subbuf_size = subbuf_size;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2befd2c4ce9e..013b1c6cb4ed 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -24,7 +24,7 @@
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL)
+#ifdef CONFIG_SCHED_DEBUG
/*
* Debugging: various feature bits
*
@@ -2345,6 +2345,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
* Silence PROVE_RCU.
*/
raw_spin_lock_irqsave(&p->pi_lock, flags);
+ rseq_migrate(p);
/*
* We're setting the CPU for the first time, we don't migrate,
* so use __set_task_cpu().
@@ -2409,6 +2410,7 @@ void wake_up_new_task(struct task_struct *p)
* as we're not fully set-up yet.
*/
p->recent_used_cpu = task_cpu(p);
+ rseq_migrate(p);
__set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
#endif
rq = __task_rq_lock(p, &rf);
@@ -3862,7 +3864,8 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
*/
if (dl_prio(prio)) {
if (!dl_prio(p->normal_prio) ||
- (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
+ (pi_task && dl_prio(pi_task->prio) &&
+ dl_entity_preempt(&pi_task->dl, &p->dl))) {
p->dl.dl_boosted = 1;
queue_flag |= ENQUEUE_REPLENISH;
} else
@@ -4981,12 +4984,8 @@ static void do_sched_yield(void)
schedstat_inc(rq->yld_count);
current->sched_class->yield_task(rq);
- /*
- * Since we are going to call schedule() anyway, there's
- * no need to preempt or enable interrupts:
- */
preempt_disable();
- rq_unlock(rq, &rf);
+ rq_unlock_irq(rq, &rf);
sched_preempt_enable_no_resched();
schedule();
@@ -5571,13 +5570,14 @@ void idle_task_exit(void)
struct mm_struct *mm = current->active_mm;
BUG_ON(cpu_online(smp_processor_id()));
+ BUG_ON(current != this_rq()->idle);
if (mm != &init_mm) {
switch_mm(mm, &init_mm, current);
- current->active_mm = &init_mm;
finish_arch_post_lock_switch();
}
- mmdrop(mm);
+
+ /* finish_cpu(), as ran on the BP, will clean up the active_mm state */
}
/*
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index ebec37cb3be9..aa592dc3cb40 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -2464,7 +2464,7 @@ int sched_dl_global_validate(void)
u64 period = global_rt_period();
u64 new_bw = to_ratio(period, runtime);
struct dl_bw *dl_b;
- int cpu, ret = 0;
+ int cpu, cpus, ret = 0;
unsigned long flags;
/*
@@ -2479,9 +2479,10 @@ int sched_dl_global_validate(void)
for_each_possible_cpu(cpu) {
rcu_read_lock_sched();
dl_b = dl_bw_of(cpu);
+ cpus = dl_bw_cpus(cpu);
raw_spin_lock_irqsave(&dl_b->lock, flags);
- if (new_bw < dl_b->total_bw)
+ if (new_bw * cpus < dl_b->total_bw)
ret = -EBUSY;
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
@@ -2688,6 +2689,7 @@ void __dl_clear_params(struct task_struct *p)
dl_se->dl_bw = 0;
dl_se->dl_density = 0;
+ dl_se->dl_boosted = 0;
dl_se->dl_throttled = 0;
dl_se->dl_yielded = 0;
dl_se->dl_non_contending = 0;
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 78fadf0438ea..9518606fa1e5 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -11,8 +11,6 @@
*/
#include "sched.h"
-static DEFINE_SPINLOCK(sched_debug_lock);
-
/*
* This allows printing both to /proc/sched_debug and
* to the console
@@ -434,16 +432,37 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
#endif
#ifdef CONFIG_CGROUP_SCHED
+static DEFINE_SPINLOCK(sched_debug_lock);
static char group_path[PATH_MAX];
-static char *task_group_path(struct task_group *tg)
+static void task_group_path(struct task_group *tg, char *path, int plen)
{
- if (autogroup_path(tg, group_path, PATH_MAX))
- return group_path;
+ if (autogroup_path(tg, path, plen))
+ return;
- cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
+ cgroup_path(tg->css.cgroup, path, plen);
+}
- return group_path;
+/*
+ * Only 1 SEQ_printf_task_group_path() caller can use the full length
+ * group_path[] for cgroup path. Other simultaneous callers will have
+ * to use a shorter stack buffer. A "..." suffix is appended at the end
+ * of the stack buffer so that it will show up in case the output length
+ * matches the given buffer size to indicate possible path name truncation.
+ */
+#define SEQ_printf_task_group_path(m, tg, fmt...) \
+{ \
+ if (spin_trylock(&sched_debug_lock)) { \
+ task_group_path(tg, group_path, sizeof(group_path)); \
+ SEQ_printf(m, fmt, group_path); \
+ spin_unlock(&sched_debug_lock); \
+ } else { \
+ char buf[128]; \
+ char *bufend = buf + sizeof(buf) - 3; \
+ task_group_path(tg, buf, bufend - buf); \
+ strcpy(bufend - 1, "..."); \
+ SEQ_printf(m, fmt, buf); \
+ } \
}
#endif
@@ -470,7 +489,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
#endif
#ifdef CONFIG_CGROUP_SCHED
- SEQ_printf(m, " %s", task_group_path(task_group(p)));
+ SEQ_printf_task_group_path(m, task_group(p), " %s")
#endif
SEQ_printf(m, "\n");
@@ -507,7 +526,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
#ifdef CONFIG_FAIR_GROUP_SCHED
SEQ_printf(m, "\n");
- SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
+ SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
#else
SEQ_printf(m, "\n");
SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
@@ -579,7 +598,7 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
{
#ifdef CONFIG_RT_GROUP_SCHED
SEQ_printf(m, "\n");
- SEQ_printf(m, "rt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
+ SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
#else
SEQ_printf(m, "\n");
SEQ_printf(m, "rt_rq[%d]:\n", cpu);
@@ -631,7 +650,6 @@ void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
static void print_cpu(struct seq_file *m, int cpu)
{
struct rq *rq = cpu_rq(cpu);
- unsigned long flags;
#ifdef CONFIG_X86
{
@@ -690,13 +708,11 @@ do { \
}
#undef P
- spin_lock_irqsave(&sched_debug_lock, flags);
print_cfs_stats(m, cpu);
print_rt_stats(m, cpu);
print_dl_stats(m, cpu);
print_rq(m, rq, cpu);
- spin_unlock_irqrestore(&sched_debug_lock, flags);
SEQ_printf(m, "\n");
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 86ccaaf0c1bf..80392cdd5f3b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2697,7 +2697,7 @@ void task_tick_numa(struct rq *rq, struct task_struct *curr)
/*
* We don't care about NUMA placement if we don't have memory.
*/
- if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
+ if ((curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work)
return;
/*
@@ -7337,7 +7337,15 @@ static int detach_tasks(struct lb_env *env)
if (!can_migrate_task(p, env))
goto next;
- load = task_h_load(p);
+ /*
+ * Depending of the number of CPUs and tasks and the
+ * cgroup hierarchy, task_h_load() can return a null
+ * value. Make sure that env->imbalance decreases
+ * otherwise detach_tasks() will stop only after
+ * detaching up to loop_max tasks.
+ */
+ load = max_t(unsigned long, task_h_load(p), 1);
+
if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
goto next;
@@ -9200,7 +9208,12 @@ static void kick_ilb(unsigned int flags)
{
int ilb_cpu;
- nohz.next_balance++;
+ /*
+ * Increase nohz.next_balance only when if full ilb is triggered but
+ * not if we only update stats.
+ */
+ if (flags & NOHZ_BALANCE_KICK)
+ nohz.next_balance = jiffies+1;
ilb_cpu = find_new_ilb();
@@ -9495,6 +9508,14 @@ static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
}
}
+ /*
+ * next_balance will be updated only when there is a need.
+ * When the CPU is attached to null domain for ex, it will not be
+ * updated.
+ */
+ if (likely(update_next_balance))
+ nohz.next_balance = next_balance;
+
/* Newly idle CPU doesn't need an update */
if (idle != CPU_NEWLY_IDLE) {
update_blocked_averages(this_cpu);
@@ -9515,14 +9536,6 @@ abort:
if (has_blocked_load)
WRITE_ONCE(nohz.has_blocked, 1);
- /*
- * next_balance will be updated only when there is a need.
- * When the CPU is attached to null domain for ex, it will not be
- * updated.
- */
- if (likely(update_next_balance))
- nohz.next_balance = next_balance;
-
return ret;
}
@@ -9890,16 +9903,22 @@ static void propagate_entity_cfs_rq(struct sched_entity *se)
{
struct cfs_rq *cfs_rq;
+ list_add_leaf_cfs_rq(cfs_rq_of(se));
+
/* Start to propagate at parent */
se = se->parent;
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
- if (cfs_rq_throttled(cfs_rq))
- break;
+ if (!cfs_rq_throttled(cfs_rq)){
+ update_load_avg(cfs_rq, se, UPDATE_TG);
+ list_add_leaf_cfs_rq(cfs_rq);
+ continue;
+ }
- update_load_avg(cfs_rq, se, UPDATE_TG);
+ if (list_add_leaf_cfs_rq(cfs_rq))
+ break;
}
}
#else
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 94bec97bd5e2..7b7ba91e319b 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -123,7 +123,13 @@ static inline void cpu_load_update_active(struct rq *this_rq) { }
#ifdef CONFIG_64BIT
# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
# define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT)
-# define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT)
+# define scale_load_down(w) \
+({ \
+ unsigned long __w = (w); \
+ if (__w) \
+ __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \
+ __w; \
+})
#else
# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT)
# define scale_load(w) (w)
@@ -241,30 +247,6 @@ struct rt_bandwidth {
void __dl_clear_params(struct task_struct *p);
-/*
- * To keep the bandwidth of -deadline tasks and groups under control
- * we need some place where:
- * - store the maximum -deadline bandwidth of the system (the group);
- * - cache the fraction of that bandwidth that is currently allocated.
- *
- * This is all done in the data structure below. It is similar to the
- * one used for RT-throttling (rt_bandwidth), with the main difference
- * that, since here we are only interested in admission control, we
- * do not decrease any runtime while the group "executes", neither we
- * need a timer to replenish it.
- *
- * With respect to SMP, the bandwidth is given on a per-CPU basis,
- * meaning that:
- * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU;
- * - dl_total_bw array contains, in the i-eth element, the currently
- * allocated bandwidth on the i-eth CPU.
- * Moreover, groups consume bandwidth on each CPU, while tasks only
- * consume bandwidth on the CPU they're running on.
- * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw
- * that will be shown the next time the proc or cgroup controls will
- * be red. It on its turn can be changed by writing on its own
- * control.
- */
struct dl_bandwidth {
raw_spinlock_t dl_runtime_lock;
u64 dl_runtime;
@@ -276,6 +258,24 @@ static inline int dl_bandwidth_enabled(void)
return sysctl_sched_rt_runtime >= 0;
}
+/*
+ * To keep the bandwidth of -deadline tasks under control
+ * we need some place where:
+ * - store the maximum -deadline bandwidth of each cpu;
+ * - cache the fraction of bandwidth that is currently allocated in
+ * each root domain;
+ *
+ * This is all done in the data structure below. It is similar to the
+ * one used for RT-throttling (rt_bandwidth), with the main difference
+ * that, since here we are only interested in admission control, we
+ * do not decrease any runtime while the group "executes", neither we
+ * need a timer to replenish it.
+ *
+ * With respect to SMP, bandwidth is given on a per root domain basis,
+ * meaning that:
+ * - bw (< 100%) is the deadline bandwidth of each CPU;
+ * - total_bw is the currently allocated bandwidth in each root domain;
+ */
struct dl_bw {
raw_spinlock_t lock;
u64 bw;
@@ -1355,7 +1355,7 @@ enum {
#undef SCHED_FEAT
-#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL)
+#ifdef CONFIG_SCHED_DEBUG
/*
* To support run-time toggling of sched features, all the translation units
@@ -1363,6 +1363,7 @@ enum {
*/
extern const_debug unsigned int sysctl_sched_features;
+#ifdef CONFIG_JUMP_LABEL
#define SCHED_FEAT(name, enabled) \
static __always_inline bool static_branch_##name(struct static_key *key) \
{ \
@@ -1375,7 +1376,13 @@ static __always_inline bool static_branch_##name(struct static_key *key) \
extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
-#else /* !(SCHED_DEBUG && CONFIG_JUMP_LABEL) */
+#else /* !CONFIG_JUMP_LABEL */
+
+#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
+
+#endif /* CONFIG_JUMP_LABEL */
+
+#else /* !SCHED_DEBUG */
/*
* Each translation unit has its own copy of sysctl_sched_features to allow
@@ -1391,7 +1398,7 @@ static const_debug __maybe_unused unsigned int sysctl_sched_features =
#define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
-#endif /* SCHED_DEBUG && CONFIG_JUMP_LABEL */
+#endif /* SCHED_DEBUG */
extern struct static_key_false sched_numa_balancing;
extern struct static_key_false sched_schedstats;
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 74b694392f2f..f58efa5cc647 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1098,7 +1098,7 @@ sd_init(struct sched_domain_topology_level *tl,
sd_flags = (*tl->sd_flags)();
if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
"wrong sd_flags in topology description\n"))
- sd_flags &= ~TOPOLOGY_SD_FLAGS;
+ sd_flags &= TOPOLOGY_SD_FLAGS;
*sd = (struct sched_domain){
.min_interval = sd_weight,
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 56e69203b658..a9dd2325bdda 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -36,7 +36,7 @@
#include <linux/filter.h>
#include <linux/pid.h>
#include <linux/ptrace.h>
-#include <linux/security.h>
+#include <linux/capability.h>
#include <linux/tracehook.h>
#include <linux/uaccess.h>
@@ -383,8 +383,7 @@ static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
* behavior of privileged children.
*/
if (!task_no_new_privs(current) &&
- security_capable(current_cred(), current_user_ns(),
- CAP_SYS_ADMIN, CAP_OPT_NOAUDIT) != 0)
+ !ns_capable_noaudit(current_user_ns(), CAP_SYS_ADMIN))
return ERR_PTR(-EACCES);
/* Allocate a new seccomp_filter */
@@ -772,6 +771,8 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
const bool recheck_after_trace)
{
BUG();
+
+ return -1;
}
#endif
diff --git a/kernel/signal.c b/kernel/signal.c
index c42eaf39b572..a02a25acf205 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -385,16 +385,17 @@ static bool task_participate_group_stop(struct task_struct *task)
void task_join_group_stop(struct task_struct *task)
{
+ unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
+ struct signal_struct *sig = current->signal;
+
+ if (sig->group_stop_count) {
+ sig->group_stop_count++;
+ mask |= JOBCTL_STOP_CONSUME;
+ } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
+ return;
+
/* Have the new thread join an on-going signal group stop */
- unsigned long jobctl = current->jobctl;
- if (jobctl & JOBCTL_STOP_PENDING) {
- struct signal_struct *sig = current->signal;
- unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
- unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
- if (task_set_jobctl_pending(task, signr | gstop)) {
- sig->group_stop_count++;
- }
- }
+ task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
}
/*
@@ -1837,7 +1838,7 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
* This is only possible if parent == real_parent.
* Check if it has changed security domain.
*/
- if (tsk->parent_exec_id != tsk->parent->self_exec_id)
+ if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
sig = SIGCHLD;
}
diff --git a/kernel/smp.c b/kernel/smp.c
index 084c8b3a2681..00d208ef07c7 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -103,12 +103,12 @@ void __init call_function_init(void)
* previous function call. For multi-cpu calls its even more interesting
* as we'll have to ensure no other cpu is observing our csd.
*/
-static __always_inline void csd_lock_wait(call_single_data_t *csd)
+static __always_inline void csd_lock_wait(struct __call_single_data *csd)
{
smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
}
-static __always_inline void csd_lock(call_single_data_t *csd)
+static __always_inline void csd_lock(struct __call_single_data *csd)
{
csd_lock_wait(csd);
csd->flags |= CSD_FLAG_LOCK;
@@ -121,7 +121,7 @@ static __always_inline void csd_lock(call_single_data_t *csd)
smp_wmb();
}
-static __always_inline void csd_unlock(call_single_data_t *csd)
+static __always_inline void csd_unlock(struct __call_single_data *csd)
{
WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
@@ -138,7 +138,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
* for execution on the given CPU. data must already have
* ->func, ->info, and ->flags set.
*/
-static int generic_exec_single(int cpu, call_single_data_t *csd,
+static int generic_exec_single(int cpu, struct __call_single_data *csd,
smp_call_func_t func, void *info)
{
if (cpu == smp_processor_id()) {
@@ -323,7 +323,7 @@ EXPORT_SYMBOL(smp_call_function_single);
* NOTE: Be careful, there is unfortunately no current debugging facility to
* validate the correctness of this serialization.
*/
-int smp_call_function_single_async(int cpu, call_single_data_t *csd)
+int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
{
int err = 0;
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index c230c2dd48e1..84c16654d859 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -187,6 +187,7 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
kfree(td);
return PTR_ERR(tsk);
}
+ kthread_set_per_cpu(tsk, cpu);
/*
* Park the thread so that it could start right on the CPU
* when it is available.
diff --git a/kernel/sys.c b/kernel/sys.c
index 096932a45046..baf60a3aa34b 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1275,11 +1275,13 @@ SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
{
- struct oldold_utsname tmp = {};
+ struct oldold_utsname tmp;
if (!name)
return -EFAULT;
+ memset(&tmp, 0, sizeof(tmp));
+
down_read(&uts_sem);
memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN);
memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 4c4fd4339d33..a5d75bc38eea 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -68,6 +68,8 @@
#include <linux/mount.h>
#include <linux/pipe_fs_i.h>
+#include "../lib/kstrtox.h"
+
#include <linux/uaccess.h>
#include <asm/processor.h>
@@ -2069,6 +2071,41 @@ static void proc_skip_char(char **buf, size_t *size, const char v)
}
}
+/**
+ * strtoul_lenient - parse an ASCII formatted integer from a buffer and only
+ * fail on overflow
+ *
+ * @cp: kernel buffer containing the string to parse
+ * @endp: pointer to store the trailing characters
+ * @base: the base to use
+ * @res: where the parsed integer will be stored
+ *
+ * In case of success 0 is returned and @res will contain the parsed integer,
+ * @endp will hold any trailing characters.
+ * This function will fail the parse on overflow. If there wasn't an overflow
+ * the function will defer the decision what characters count as invalid to the
+ * caller.
+ */
+static int strtoul_lenient(const char *cp, char **endp, unsigned int base,
+ unsigned long *res)
+{
+ unsigned long long result;
+ unsigned int rv;
+
+ cp = _parse_integer_fixup_radix(cp, &base);
+ rv = _parse_integer(cp, base, &result);
+ if ((rv & KSTRTOX_OVERFLOW) || (result != (unsigned long)result))
+ return -ERANGE;
+
+ cp += rv;
+
+ if (endp)
+ *endp = (char *)cp;
+
+ *res = (unsigned long)result;
+ return 0;
+}
+
#define TMPBUFLEN 22
/**
* proc_get_long - reads an ASCII formatted integer from a user buffer
@@ -2112,7 +2149,8 @@ static int proc_get_long(char **buf, size_t *size,
if (!isdigit(*p))
return -EINVAL;
- *val = simple_strtoul(p, &p, 0);
+ if (strtoul_lenient(p, &p, 0, val))
+ return -EINVAL;
len = p - tmp;
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 9eece67f29f3..6a2ba39889bd 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -822,9 +822,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
if (flags == TIMER_ABSTIME)
return -ERESTARTNOHAND;
- restart->fn = alarm_timer_nsleep_restart;
restart->nanosleep.clockid = type;
restart->nanosleep.expires = exp;
+ set_restart_fn(restart, alarm_timer_nsleep_restart);
return ret;
}
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 7362554416fd..0e04b24cec81 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -536,8 +536,11 @@ static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
}
/*
- * Recomputes cpu_base::*next_timer and returns the earliest expires_next but
- * does not set cpu_base::*expires_next, that is done by hrtimer_reprogram.
+ * Recomputes cpu_base::*next_timer and returns the earliest expires_next
+ * but does not set cpu_base::*expires_next, that is done by
+ * hrtimer[_force]_reprogram and hrtimer_interrupt only. When updating
+ * cpu_base::*expires_next right away, reprogramming logic would no longer
+ * work.
*
* When a softirq is pending, we can ignore the HRTIMER_ACTIVE_SOFT bases,
* those timers will get run whenever the softirq gets handled, at the end of
@@ -578,6 +581,37 @@ __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_
return expires_next;
}
+static ktime_t hrtimer_update_next_event(struct hrtimer_cpu_base *cpu_base)
+{
+ ktime_t expires_next, soft = KTIME_MAX;
+
+ /*
+ * If the soft interrupt has already been activated, ignore the
+ * soft bases. They will be handled in the already raised soft
+ * interrupt.
+ */
+ if (!cpu_base->softirq_activated) {
+ soft = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT);
+ /*
+ * Update the soft expiry time. clock_settime() might have
+ * affected it.
+ */
+ cpu_base->softirq_expires_next = soft;
+ }
+
+ expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD);
+ /*
+ * If a softirq timer is expiring first, update cpu_base->next_timer
+ * and program the hardware with the soft expiry time.
+ */
+ if (expires_next > soft) {
+ cpu_base->next_timer = cpu_base->softirq_next_timer;
+ expires_next = soft;
+ }
+
+ return expires_next;
+}
+
static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
{
ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
@@ -618,23 +652,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
{
ktime_t expires_next;
- /*
- * Find the current next expiration time.
- */
- expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
-
- if (cpu_base->next_timer && cpu_base->next_timer->is_soft) {
- /*
- * When the softirq is activated, hrtimer has to be
- * programmed with the first hard hrtimer because soft
- * timer interrupt could occur too late.
- */
- if (cpu_base->softirq_activated)
- expires_next = __hrtimer_get_next_event(cpu_base,
- HRTIMER_ACTIVE_HARD);
- else
- cpu_base->softirq_expires_next = expires_next;
- }
+ expires_next = hrtimer_update_next_event(cpu_base);
if (skip_equal && expires_next == cpu_base->expires_next)
return;
@@ -1520,8 +1538,8 @@ retry:
__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
- /* Reevaluate the clock bases for the next expiry */
- expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
+ /* Reevaluate the clock bases for the [soft] next expiry */
+ expires_next = hrtimer_update_next_event(cpu_base);
/*
* Store the new expiry value so the migration code can verify
* against it.
@@ -1753,9 +1771,9 @@ long hrtimer_nanosleep(const struct timespec64 *rqtp,
}
restart = &current->restart_block;
- restart->fn = hrtimer_nanosleep_restart;
restart->nanosleep.clockid = t.timer.base->clockid;
restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
+ set_restart_fn(restart, hrtimer_nanosleep_restart);
out:
destroy_hrtimer_on_stack(&t.timer);
return ret;
diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
index 9a65713c8309..2e2b335ef101 100644
--- a/kernel/time/itimer.c
+++ b/kernel/time/itimer.c
@@ -154,10 +154,6 @@ static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
u64 oval, nval, ointerval, ninterval;
struct cpu_itimer *it = &tsk->signal->it[clock_id];
- /*
- * Use the to_ktime conversion because that clamps the maximum
- * value to KTIME_MAX and avoid multiplication overflows.
- */
nval = ktime_to_ns(timeval_to_ktime(value->it_value));
ninterval = ktime_to_ns(timeval_to_ktime(value->it_interval));
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index d62d7ae5201c..bfaa44a80c03 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -1371,8 +1371,8 @@ static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
if (flags & TIMER_ABSTIME)
return -ERESTARTNOHAND;
- restart_block->fn = posix_cpu_nsleep_restart;
restart_block->nanosleep.clockid = which_clock;
+ set_restart_fn(restart_block, posix_cpu_nsleep_restart);
}
return error;
}
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index 5a01c4fdbfef..48758108e055 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -1166,8 +1166,8 @@ COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
err = kc->clock_adj(which_clock, &ktx);
- if (err >= 0)
- err = compat_put_timex(utp, &ktx);
+ if (err >= 0 && compat_put_timex(utp, &ktx))
+ return -EFAULT;
return err;
}
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index a02e0f6b287c..0a3cc37e4b83 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -15,6 +15,7 @@
#include <linux/err.h>
#include <linux/hrtimer.h>
#include <linux/interrupt.h>
+#include <linux/nmi.h>
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/sched.h>
@@ -520,6 +521,7 @@ void tick_unfreeze(void)
trace_suspend_resume(TPS("timekeeping_freeze"),
smp_processor_id(), false);
} else {
+ touch_softlockup_watchdog();
tick_resume_local();
}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 81ee5b83c920..c66fd11d94bc 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1004,9 +1004,8 @@ static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
((int)sizeof(u64)*8 - fls64(mult) < fls64(rem)))
return -EOVERFLOW;
tmp *= mult;
- rem *= mult;
- do_div(rem, div);
+ rem = div64_u64(rem * mult, div);
*base = tmp + rem;
return 0;
}
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index ae64cb819a9a..a6e88d9bb931 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -44,6 +44,7 @@
#include <linux/sched/debug.h>
#include <linux/slab.h>
#include <linux/compat.h>
+#include <linux/random.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
@@ -519,8 +520,8 @@ static int calc_wheel_index(unsigned long expires, unsigned long clk)
* Force expire obscene large timeouts to expire at the
* capacity limit of the wheel.
*/
- if (expires >= WHEEL_TIMEOUT_CUTOFF)
- expires = WHEEL_TIMEOUT_MAX;
+ if (delta >= WHEEL_TIMEOUT_CUTOFF)
+ expires = clk + WHEEL_TIMEOUT_MAX;
idx = calc_index(expires, LVL_DEPTH - 1);
}
@@ -580,7 +581,15 @@ trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
* Set the next expiry time and kick the CPU so it can reevaluate the
* wheel:
*/
- base->next_expiry = timer->expires;
+ if (time_before(timer->expires, base->clk)) {
+ /*
+ * Prevent from forward_timer_base() moving the base->clk
+ * backward
+ */
+ base->next_expiry = base->clk;
+ } else {
+ base->next_expiry = timer->expires;
+ }
wake_up_nohz_cpu(base->cpu);
}
@@ -899,10 +908,13 @@ static inline void forward_timer_base(struct timer_base *base)
* If the next expiry value is > jiffies, then we fast forward to
* jiffies otherwise we forward to the next expiry value.
*/
- if (time_after(base->next_expiry, jnow))
+ if (time_after(base->next_expiry, jnow)) {
base->clk = jnow;
- else
+ } else {
+ if (WARN_ON_ONCE(time_before(base->next_expiry, base->clk)))
+ return;
base->clk = base->next_expiry;
+ }
#endif
}
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 5e3de28c7677..e656d1e232da 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -476,7 +476,7 @@ config KPROBE_EVENTS
config KPROBE_EVENTS_ON_NOTRACE
bool "Do NOT protect notrace function from kprobe events"
depends on KPROBE_EVENTS
- depends on KPROBES_ON_FTRACE
+ depends on DYNAMIC_FTRACE
default n
help
This is only for the developers who want to debug ftrace itself
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 2868d85f1fb1..645048bb1e86 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -3,6 +3,9 @@
* Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
*
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/blkdev.h>
#include <linux/blktrace_api.h>
@@ -336,6 +339,7 @@ static void put_probe_ref(void)
static void blk_trace_cleanup(struct blk_trace *bt)
{
+ synchronize_rcu();
blk_trace_free(bt);
put_probe_ref();
}
@@ -494,6 +498,16 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
*/
strreplace(buts->name, '/', '_');
+ /*
+ * bdev can be NULL, as with scsi-generic, this is a helpful as
+ * we can be.
+ */
+ if (q->blk_trace) {
+ pr_warn("Concurrent blktraces are not allowed on %s\n",
+ buts->name);
+ return -EBUSY;
+ }
+
bt = kzalloc(sizeof(*bt), GFP_KERNEL);
if (!bt)
return -ENOMEM;
@@ -507,14 +521,34 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
if (!bt->msg_data)
goto err;
- ret = -ENOENT;
-
- dir = debugfs_lookup(buts->name, blk_debugfs_root);
- if (!dir)
+#ifdef CONFIG_BLK_DEBUG_FS
+ /*
+ * When tracing whole make_request drivers (multiqueue) block devices,
+ * reuse the existing debugfs directory created by the block layer on
+ * init. For request-based block devices, all partitions block devices,
+ * and scsi-generic block devices we create a temporary new debugfs
+ * directory that will be removed once the trace ends.
+ */
+ if (q->mq_ops && bdev && bdev == bdev->bd_contains)
+ dir = q->debugfs_dir;
+ else
+#endif
bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
if (!dir)
goto err;
+ /*
+ * As blktrace relies on debugfs for its interface the debugfs directory
+ * is required, contrary to the usual mantra of not checking for debugfs
+ * files or directories.
+ */
+ if (IS_ERR_OR_NULL(dir)) {
+ pr_warn("debugfs_dir not present for %s so skipping\n",
+ buts->name);
+ ret = -ENOENT;
+ goto err;
+ }
+
bt->dev = dev;
atomic_set(&bt->dropped, 0);
INIT_LIST_HEAD(&bt->running_list);
@@ -557,8 +591,6 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
ret = 0;
err:
- if (dir && !bt->dir)
- dput(dir);
if (ret)
blk_trace_free(bt);
return ret;
@@ -636,8 +668,10 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name,
static int __blk_trace_startstop(struct request_queue *q, int start)
{
int ret;
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
+ bt = rcu_dereference_protected(q->blk_trace,
+ lockdep_is_held(&q->blk_trace_mutex));
if (bt == NULL)
return -EINVAL;
@@ -746,8 +780,8 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
void blk_trace_shutdown(struct request_queue *q)
{
mutex_lock(&q->blk_trace_mutex);
-
- if (q->blk_trace) {
+ if (rcu_dereference_protected(q->blk_trace,
+ lockdep_is_held(&q->blk_trace_mutex))) {
__blk_trace_startstop(q, 0);
__blk_trace_remove(q);
}
@@ -759,8 +793,10 @@ void blk_trace_shutdown(struct request_queue *q)
static union kernfs_node_id *
blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
{
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
+ /* We don't use the 'bt' value here except as an optimization... */
+ bt = rcu_dereference_protected(q->blk_trace, 1);
if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
return NULL;
@@ -805,10 +841,14 @@ static void blk_add_trace_rq(struct request *rq, int error,
unsigned int nr_bytes, u32 what,
union kernfs_node_id *cgid)
{
- struct blk_trace *bt = rq->q->blk_trace;
+ struct blk_trace *bt;
- if (likely(!bt))
+ rcu_read_lock();
+ bt = rcu_dereference(rq->q->blk_trace);
+ if (likely(!bt)) {
+ rcu_read_unlock();
return;
+ }
if (blk_rq_is_passthrough(rq))
what |= BLK_TC_ACT(BLK_TC_PC);
@@ -817,6 +857,7 @@ static void blk_add_trace_rq(struct request *rq, int error,
__blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq),
rq->cmd_flags, what, error, 0, NULL, cgid);
+ rcu_read_unlock();
}
static void blk_add_trace_rq_insert(void *ignore,
@@ -862,14 +903,19 @@ static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
u32 what, int error)
{
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
- if (likely(!bt))
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
+ if (likely(!bt)) {
+ rcu_read_unlock();
return;
+ }
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
bio_op(bio), bio->bi_opf, what, error, 0, NULL,
blk_trace_bio_get_cgid(q, bio));
+ rcu_read_unlock();
}
static void blk_add_trace_bio_bounce(void *ignore,
@@ -914,11 +960,14 @@ static void blk_add_trace_getrq(void *ignore,
if (bio)
blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
else {
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
if (bt)
__blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0,
NULL, NULL);
+ rcu_read_unlock();
}
}
@@ -930,27 +979,35 @@ static void blk_add_trace_sleeprq(void *ignore,
if (bio)
blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
else {
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
if (bt)
__blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ,
0, 0, NULL, NULL);
+ rcu_read_unlock();
}
}
static void blk_add_trace_plug(void *ignore, struct request_queue *q)
{
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
if (bt)
__blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, NULL);
+ rcu_read_unlock();
}
static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
unsigned int depth, bool explicit)
{
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
if (bt) {
__be64 rpdu = cpu_to_be64(depth);
u32 what;
@@ -962,22 +1019,28 @@ static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
__blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, NULL);
}
+ rcu_read_unlock();
}
static void blk_add_trace_split(void *ignore,
struct request_queue *q, struct bio *bio,
unsigned int pdu)
{
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
if (bt) {
__be64 rpdu = cpu_to_be64(pdu);
__blk_add_trace(bt, bio->bi_iter.bi_sector,
bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf,
- BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu),
- &rpdu, blk_trace_bio_get_cgid(q, bio));
+ BLK_TA_SPLIT,
+ blk_status_to_errno(bio->bi_status),
+ sizeof(rpdu), &rpdu,
+ blk_trace_bio_get_cgid(q, bio));
}
+ rcu_read_unlock();
}
/**
@@ -997,19 +1060,25 @@ static void blk_add_trace_bio_remap(void *ignore,
struct request_queue *q, struct bio *bio,
dev_t dev, sector_t from)
{
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
struct blk_io_trace_remap r;
- if (likely(!bt))
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
+ if (likely(!bt)) {
+ rcu_read_unlock();
return;
+ }
r.device_from = cpu_to_be32(dev);
r.device_to = cpu_to_be32(bio_dev(bio));
r.sector_from = cpu_to_be64(from);
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
- bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status,
+ bio_op(bio), bio->bi_opf, BLK_TA_REMAP,
+ blk_status_to_errno(bio->bi_status),
sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
+ rcu_read_unlock();
}
/**
@@ -1030,11 +1099,15 @@ static void blk_add_trace_rq_remap(void *ignore,
struct request *rq, dev_t dev,
sector_t from)
{
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
struct blk_io_trace_remap r;
- if (likely(!bt))
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
+ if (likely(!bt)) {
+ rcu_read_unlock();
return;
+ }
r.device_from = cpu_to_be32(dev);
r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
@@ -1043,6 +1116,7 @@ static void blk_add_trace_rq_remap(void *ignore,
__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
rq_data_dir(rq), 0, BLK_TA_REMAP, 0,
sizeof(r), &r, blk_trace_request_get_cgid(q, rq));
+ rcu_read_unlock();
}
/**
@@ -1060,14 +1134,19 @@ void blk_add_driver_data(struct request_queue *q,
struct request *rq,
void *data, size_t len)
{
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
- if (likely(!bt))
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
+ if (likely(!bt)) {
+ rcu_read_unlock();
return;
+ }
__blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
BLK_TA_DRV_DATA, 0, len, data,
blk_trace_request_get_cgid(q, rq));
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(blk_add_driver_data);
@@ -1219,21 +1298,10 @@ static inline __u16 t_error(const struct trace_entry *ent)
static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg)
{
- const __u64 *val = pdu_start(ent, has_cg);
+ const __be64 *val = pdu_start(ent, has_cg);
return be64_to_cpu(*val);
}
-static void get_pdu_remap(const struct trace_entry *ent,
- struct blk_io_trace_remap *r, bool has_cg)
-{
- const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg);
- __u64 sector_from = __r->sector_from;
-
- r->device_from = be32_to_cpu(__r->device_from);
- r->device_to = be32_to_cpu(__r->device_to);
- r->sector_from = be64_to_cpu(sector_from);
-}
-
typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act,
bool has_cg);
@@ -1359,13 +1427,13 @@ static void blk_log_with_error(struct trace_seq *s,
static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
{
- struct blk_io_trace_remap r = { .device_from = 0, };
+ const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg);
- get_pdu_remap(ent, &r, has_cg);
trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
t_sector(ent), t_sec(ent),
- MAJOR(r.device_from), MINOR(r.device_from),
- (unsigned long long)r.sector_from);
+ MAJOR(be32_to_cpu(__r->device_from)),
+ MINOR(be32_to_cpu(__r->device_from)),
+ be64_to_cpu(__r->sector_from));
}
static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
@@ -1594,6 +1662,7 @@ static int blk_trace_remove_queue(struct request_queue *q)
return -EINVAL;
put_probe_ref();
+ synchronize_rcu();
blk_trace_free(bt);
return 0;
}
@@ -1755,6 +1824,7 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
struct hd_struct *p = dev_to_part(dev);
struct request_queue *q;
struct block_device *bdev;
+ struct blk_trace *bt;
ssize_t ret = -ENXIO;
bdev = bdget(part_devt(p));
@@ -1767,21 +1837,23 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
mutex_lock(&q->blk_trace_mutex);
+ bt = rcu_dereference_protected(q->blk_trace,
+ lockdep_is_held(&q->blk_trace_mutex));
if (attr == &dev_attr_enable) {
- ret = sprintf(buf, "%u\n", !!q->blk_trace);
+ ret = sprintf(buf, "%u\n", !!bt);
goto out_unlock_bdev;
}
- if (q->blk_trace == NULL)
+ if (bt == NULL)
ret = sprintf(buf, "disabled\n");
else if (attr == &dev_attr_act_mask)
- ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
+ ret = blk_trace_mask2str(buf, bt->act_mask);
else if (attr == &dev_attr_pid)
- ret = sprintf(buf, "%u\n", q->blk_trace->pid);
+ ret = sprintf(buf, "%u\n", bt->pid);
else if (attr == &dev_attr_start_lba)
- ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
+ ret = sprintf(buf, "%llu\n", bt->start_lba);
else if (attr == &dev_attr_end_lba)
- ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
+ ret = sprintf(buf, "%llu\n", bt->end_lba);
out_unlock_bdev:
mutex_unlock(&q->blk_trace_mutex);
@@ -1798,6 +1870,7 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
struct block_device *bdev;
struct request_queue *q;
struct hd_struct *p;
+ struct blk_trace *bt;
u64 value;
ssize_t ret = -EINVAL;
@@ -1828,8 +1901,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
mutex_lock(&q->blk_trace_mutex);
+ bt = rcu_dereference_protected(q->blk_trace,
+ lockdep_is_held(&q->blk_trace_mutex));
if (attr == &dev_attr_enable) {
- if (!!value == !!q->blk_trace) {
+ if (!!value == !!bt) {
ret = 0;
goto out_unlock_bdev;
}
@@ -1841,18 +1916,21 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
}
ret = 0;
- if (q->blk_trace == NULL)
+ if (bt == NULL) {
ret = blk_trace_setup_queue(q, bdev);
+ bt = rcu_dereference_protected(q->blk_trace,
+ lockdep_is_held(&q->blk_trace_mutex));
+ }
if (ret == 0) {
if (attr == &dev_attr_act_mask)
- q->blk_trace->act_mask = value;
+ bt->act_mask = value;
else if (attr == &dev_attr_pid)
- q->blk_trace->pid = value;
+ bt->pid = value;
else if (attr == &dev_attr_start_lba)
- q->blk_trace->start_lba = value;
+ bt->start_lba = value;
else if (attr == &dev_attr_end_lba)
- q->blk_trace->end_lba = value;
+ bt->end_lba = value;
}
out_unlock_bdev:
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 0c379cd40bea..f8d82b36dd66 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1650,6 +1650,8 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
static struct ftrace_ops *
ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
static struct ftrace_ops *
+ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude);
+static struct ftrace_ops *
ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
@@ -1787,7 +1789,7 @@ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
* to it.
*/
if (ftrace_rec_count(rec) == 1 &&
- ftrace_find_tramp_ops_any(rec))
+ ftrace_find_tramp_ops_any_other(rec, ops))
rec->flags |= FTRACE_FL_TRAMP;
else
rec->flags &= ~FTRACE_FL_TRAMP;
@@ -2216,6 +2218,24 @@ ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
}
static struct ftrace_ops *
+ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude)
+{
+ struct ftrace_ops *op;
+ unsigned long ip = rec->ip;
+
+ do_for_each_ftrace_op(op, ftrace_ops_list) {
+
+ if (op == op_exclude || !op->trampoline)
+ continue;
+
+ if (hash_contains_ip(ip, op->func_hash))
+ return op;
+ } while_for_each_ftrace_op(op);
+
+ return NULL;
+}
+
+static struct ftrace_ops *
ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
struct ftrace_ops *op)
{
@@ -5003,7 +5023,10 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
parser = &iter->parser;
if (trace_parser_loaded(parser)) {
- ftrace_match_records(iter->hash, parser->buffer, parser->idx);
+ int enable = !(iter->flags & FTRACE_ITER_NOTRACE);
+
+ ftrace_process_regex(iter, parser->buffer,
+ parser->idx, enable);
}
trace_parser_put(parser);
@@ -5665,8 +5688,11 @@ static int referenced_filters(struct dyn_ftrace *rec)
int cnt = 0;
for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
- if (ops_references_rec(ops, rec))
- cnt++;
+ if (ops_references_rec(ops, rec)) {
+ cnt++;
+ if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
+ rec->flags |= FTRACE_FL_REGS;
+ }
}
return cnt;
@@ -5843,8 +5869,8 @@ void ftrace_module_enable(struct module *mod)
if (ftrace_start_up)
cnt += referenced_filters(rec);
- /* This clears FTRACE_FL_DISABLED */
- rec->flags = cnt;
+ rec->flags &= ~FTRACE_FL_DISABLED;
+ rec->flags += cnt;
if (ftrace_start_up && cnt) {
int failed = __ftrace_replace_code(rec, 1);
@@ -6367,16 +6393,14 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
{
int bit;
- if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching())
- return;
-
bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
if (bit < 0)
return;
preempt_disable_notrace();
- op->func(ip, parent_ip, op, regs);
+ if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching())
+ op->func(ip, parent_ip, op, regs);
preempt_enable_notrace();
trace_clear_recursion(bit);
@@ -6447,12 +6471,12 @@ void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
if (enable) {
register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
tr);
- register_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
+ register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
tr);
} else {
unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
tr);
- unregister_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
+ unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
tr);
}
}
@@ -6854,7 +6878,6 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
}
if (t->ret_stack == NULL) {
- atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0);
t->curr_ret_stack = -1;
t->curr_ret_depth = -1;
@@ -7067,7 +7090,6 @@ static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
static void
graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
{
- atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0);
t->ftrace_timestamp = 0;
/* make curr_ret_stack visible before we add the ret_stack */
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 805aef83b5cf..360129e47540 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -444,14 +444,16 @@ struct rb_event_info {
/*
* Used for which event context the event is in.
- * NMI = 0
- * IRQ = 1
- * SOFTIRQ = 2
- * NORMAL = 3
+ * TRANSITION = 0
+ * NMI = 1
+ * IRQ = 2
+ * SOFTIRQ = 3
+ * NORMAL = 4
*
* See trace_recursive_lock() comment below for more details.
*/
enum {
+ RB_CTX_TRANSITION,
RB_CTX_NMI,
RB_CTX_IRQ,
RB_CTX_SOFTIRQ,
@@ -1692,18 +1694,18 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long nr_pages;
- int cpu, err = 0;
+ int cpu, err;
/*
* Always succeed at resizing a non-existent buffer:
*/
if (!buffer)
- return size;
+ return 0;
/* Make sure the requested buffer exists */
if (cpu_id != RING_BUFFER_ALL_CPUS &&
!cpumask_test_cpu(cpu_id, buffer->cpumask))
- return size;
+ return 0;
nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
@@ -1843,7 +1845,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
}
mutex_unlock(&buffer->mutex);
- return size;
+ return 0;
out_err:
for_each_buffer_cpu(buffer, cpu) {
@@ -2333,7 +2335,7 @@ rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
if (unlikely(info->add_timestamp)) {
bool abs = ring_buffer_time_stamp_abs(cpu_buffer->buffer);
- event = rb_add_time_stamp(event, info->delta, abs);
+ event = rb_add_time_stamp(event, abs ? info->delta : delta, abs);
length -= RB_LEN_TIME_EXTEND;
delta = 0;
}
@@ -2620,10 +2622,10 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
* a bit of overhead in something as critical as function tracing,
* we use a bitmask trick.
*
- * bit 0 = NMI context
- * bit 1 = IRQ context
- * bit 2 = SoftIRQ context
- * bit 3 = normal context.
+ * bit 1 = NMI context
+ * bit 2 = IRQ context
+ * bit 3 = SoftIRQ context
+ * bit 4 = normal context.
*
* This works because this is the order of contexts that can
* preempt other contexts. A SoftIRQ never preempts an IRQ
@@ -2646,6 +2648,30 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
* The least significant bit can be cleared this way, and it
* just so happens that it is the same bit corresponding to
* the current context.
+ *
+ * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit
+ * is set when a recursion is detected at the current context, and if
+ * the TRANSITION bit is already set, it will fail the recursion.
+ * This is needed because there's a lag between the changing of
+ * interrupt context and updating the preempt count. In this case,
+ * a false positive will be found. To handle this, one extra recursion
+ * is allowed, and this is done by the TRANSITION bit. If the TRANSITION
+ * bit is already set, then it is considered a recursion and the function
+ * ends. Otherwise, the TRANSITION bit is set, and that bit is returned.
+ *
+ * On the trace_recursive_unlock(), the TRANSITION bit will be the first
+ * to be cleared. Even if it wasn't the context that set it. That is,
+ * if an interrupt comes in while NORMAL bit is set and the ring buffer
+ * is called before preempt_count() is updated, since the check will
+ * be on the NORMAL bit, the TRANSITION bit will then be set. If an
+ * NMI then comes in, it will set the NMI bit, but when the NMI code
+ * does the trace_recursive_unlock() it will clear the TRANSTION bit
+ * and leave the NMI bit set. But this is fine, because the interrupt
+ * code that set the TRANSITION bit will then clear the NMI bit when it
+ * calls trace_recursive_unlock(). If another NMI comes in, it will
+ * set the TRANSITION bit and continue.
+ *
+ * Note: The TRANSITION bit only handles a single transition between context.
*/
static __always_inline int
@@ -2661,8 +2687,16 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
bit = pc & NMI_MASK ? RB_CTX_NMI :
pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
- if (unlikely(val & (1 << (bit + cpu_buffer->nest))))
- return 1;
+ if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) {
+ /*
+ * It is possible that this was called by transitioning
+ * between interrupt context, and preempt_count() has not
+ * been updated yet. In this case, use the TRANSITION bit.
+ */
+ bit = RB_CTX_TRANSITION;
+ if (val & (1 << (bit + cpu_buffer->nest)))
+ return 1;
+ }
val |= (1 << (bit + cpu_buffer->nest));
cpu_buffer->current_context = val;
@@ -2677,8 +2711,8 @@ trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->current_context - (1 << cpu_buffer->nest);
}
-/* The recursive locking above uses 4 bits */
-#define NESTED_BITS 4
+/* The recursive locking above uses 5 bits */
+#define NESTED_BITS 5
/**
* ring_buffer_nest_start - Allow to trace while nested
@@ -4359,6 +4393,8 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return;
+ /* prevent another thread from changing buffer sizes */
+ mutex_lock(&buffer->mutex);
atomic_inc(&buffer->resize_disabled);
atomic_inc(&cpu_buffer->record_disabled);
@@ -4382,6 +4418,8 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
atomic_dec(&cpu_buffer->record_disabled);
atomic_dec(&buffer->resize_disabled);
+
+ mutex_unlock(&buffer->mutex);
}
EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c41f7d1ab5fa..4eea58a907f0 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1935,14 +1935,13 @@ static void tracing_stop_tr(struct trace_array *tr)
static int trace_save_cmdline(struct task_struct *tsk)
{
- unsigned pid, idx;
+ unsigned tpid, idx;
/* treat recording of idle task as a success */
if (!tsk->pid)
return 1;
- if (unlikely(tsk->pid > PID_MAX_DEFAULT))
- return 0;
+ tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
/*
* It's not the end of the world if we don't get
@@ -1953,26 +1952,15 @@ static int trace_save_cmdline(struct task_struct *tsk)
if (!arch_spin_trylock(&trace_cmdline_lock))
return 0;
- idx = savedcmd->map_pid_to_cmdline[tsk->pid];
+ idx = savedcmd->map_pid_to_cmdline[tpid];
if (idx == NO_CMDLINE_MAP) {
idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
- /*
- * Check whether the cmdline buffer at idx has a pid
- * mapped. We are going to overwrite that entry so we
- * need to clear the map_pid_to_cmdline. Otherwise we
- * would read the new comm for the old pid.
- */
- pid = savedcmd->map_cmdline_to_pid[idx];
- if (pid != NO_CMDLINE_MAP)
- savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
-
- savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
- savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
-
+ savedcmd->map_pid_to_cmdline[tpid] = idx;
savedcmd->cmdline_idx = idx;
}
+ savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
set_cmdline(idx, tsk->comm);
arch_spin_unlock(&trace_cmdline_lock);
@@ -1983,6 +1971,7 @@ static int trace_save_cmdline(struct task_struct *tsk)
static void __trace_find_cmdline(int pid, char comm[])
{
unsigned map;
+ int tpid;
if (!pid) {
strcpy(comm, "<idle>");
@@ -1994,16 +1983,16 @@ static void __trace_find_cmdline(int pid, char comm[])
return;
}
- if (pid > PID_MAX_DEFAULT) {
- strcpy(comm, "<...>");
- return;
+ tpid = pid & (PID_MAX_DEFAULT - 1);
+ map = savedcmd->map_pid_to_cmdline[tpid];
+ if (map != NO_CMDLINE_MAP) {
+ tpid = savedcmd->map_cmdline_to_pid[map];
+ if (tpid == pid) {
+ strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
+ return;
+ }
}
-
- map = savedcmd->map_pid_to_cmdline[pid];
- if (map != NO_CMDLINE_MAP)
- strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
- else
- strcpy(comm, "<...>");
+ strcpy(comm, "<...>");
}
void trace_find_cmdline(int pid, char comm[])
@@ -2292,7 +2281,7 @@ trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
(entry = this_cpu_read(trace_buffered_event))) {
/* Try to use the per cpu buffer first */
val = this_cpu_inc_return(trace_buffered_event_cnt);
- if (val == 1) {
+ if ((len < (PAGE_SIZE - sizeof(*entry))) && val == 1) {
trace_event_setup(entry, type, flags, pc);
entry->array[0] = len;
return entry;
@@ -2416,7 +2405,7 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
* two. They are not that meaningful.
*/
ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
- ftrace_trace_userstack(buffer, flags, pc);
+ ftrace_trace_userstack(tr, buffer, flags, pc);
}
/*
@@ -2645,7 +2634,8 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
size *= sizeof(unsigned long);
event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
- sizeof(*entry) + size, flags, pc);
+ (sizeof(*entry) - sizeof(entry->caller)) + size,
+ flags, pc);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
@@ -2736,14 +2726,15 @@ void trace_dump_stack(int skip)
static DEFINE_PER_CPU(int, user_stack_count);
void
-ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
+ftrace_trace_userstack(struct trace_array *tr,
+ struct ring_buffer *buffer, unsigned long flags, int pc)
{
struct trace_event_call *call = &event_user_stack;
struct ring_buffer_event *event;
struct userstack_entry *entry;
struct stack_trace trace;
- if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
+ if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
return;
/*
@@ -2819,7 +2810,7 @@ static char *get_trace_buf(void)
/* Interrupts must see nesting incremented before we use the buffer */
barrier();
- return &buffer->buffer[buffer->nesting][0];
+ return &buffer->buffer[buffer->nesting - 1][0];
}
static void put_trace_buf(void)
@@ -3037,6 +3028,9 @@ int trace_array_printk(struct trace_array *tr,
if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
return 0;
+ if (!tr)
+ return -ENOENT;
+
va_start(ap, fmt);
ret = trace_array_vprintk(tr, ip, fmt, ap);
va_end(ap);
@@ -7750,6 +7744,19 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
*/
allocate_snapshot = false;
#endif
+
+ /*
+ * Because of some magic with the way alloc_percpu() works on
+ * x86_64, we need to synchronize the pgd of all the tables,
+ * otherwise the trace events that happen in x86_64 page fault
+ * handlers can't cope with accessing the chance that a
+ * alloc_percpu()'d memory might be touched in the page fault trace
+ * event. Oh, and we need to audit all other alloc_percpu() and vmalloc()
+ * calls in tracing, because something might get triggered within a
+ * page fault trace event!
+ */
+ vmalloc_sync_mappings();
+
return 0;
}
@@ -8513,7 +8520,7 @@ __init static int tracer_alloc_buffers(void)
goto out_free_buffer_mask;
/* Only allocate trace_printk buffers if a trace_printk exists */
- if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
+ if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
/* Must be called before global_trace.buffer is allocated */
trace_printk_init_buffers();
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index ee0c6a313ed1..afaeef6c15b3 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -534,6 +534,12 @@ enum {
TRACE_GRAPH_DEPTH_START_BIT,
TRACE_GRAPH_DEPTH_END_BIT,
+
+ /*
+ * When transitioning between context, the preempt_count() may
+ * not be correct. Allow for a single recursion to cover this case.
+ */
+ TRACE_TRANSITION_BIT,
};
#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
@@ -588,14 +594,27 @@ static __always_inline int trace_test_and_set_recursion(int start, int max)
return 0;
bit = trace_get_context_bit() + start;
- if (unlikely(val & (1 << bit)))
- return -1;
+ if (unlikely(val & (1 << bit))) {
+ /*
+ * It could be that preempt_count has not been updated during
+ * a switch between contexts. Allow for a single recursion.
+ */
+ bit = TRACE_TRANSITION_BIT;
+ if (trace_recursion_test(bit))
+ return -1;
+ trace_recursion_set(bit);
+ barrier();
+ return bit + 1;
+ }
+
+ /* Normal check passed, clear the transition to allow it again */
+ trace_recursion_clear(TRACE_TRANSITION_BIT);
val |= 1 << bit;
current->trace_recursion = val;
barrier();
- return bit;
+ return bit + 1;
}
static __always_inline void trace_clear_recursion(int bit)
@@ -605,6 +624,7 @@ static __always_inline void trace_clear_recursion(int bit)
if (!bit)
return;
+ bit--;
bit = 1 << bit;
val &= ~bit;
@@ -725,13 +745,15 @@ void update_max_tr_single(struct trace_array *tr,
#endif /* CONFIG_TRACER_MAX_TRACE */
#ifdef CONFIG_STACKTRACE
-void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
+void ftrace_trace_userstack(struct trace_array *tr,
+ struct ring_buffer *buffer, unsigned long flags,
int pc);
void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
int pc);
#else
-static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
+static inline void ftrace_trace_userstack(struct trace_array *tr,
+ struct ring_buffer *buffer,
unsigned long flags, int pc)
{
}
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index aaf6793ededa..c1637f90c8a3 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -95,33 +95,49 @@ u64 notrace trace_clock_global(void)
{
unsigned long flags;
int this_cpu;
- u64 now;
+ u64 now, prev_time;
raw_local_irq_save(flags);
this_cpu = raw_smp_processor_id();
- now = sched_clock_cpu(this_cpu);
+
/*
- * If in an NMI context then dont risk lockups and return the
- * cpu_clock() time:
+ * The global clock "guarantees" that the events are ordered
+ * between CPUs. But if two events on two different CPUS call
+ * trace_clock_global at roughly the same time, it really does
+ * not matter which one gets the earlier time. Just make sure
+ * that the same CPU will always show a monotonic clock.
+ *
+ * Use a read memory barrier to get the latest written
+ * time that was recorded.
*/
- if (unlikely(in_nmi()))
- goto out;
+ smp_rmb();
+ prev_time = READ_ONCE(trace_clock_struct.prev_time);
+ now = sched_clock_cpu(this_cpu);
- arch_spin_lock(&trace_clock_struct.lock);
+ /* Make sure that now is always greater than prev_time */
+ if ((s64)(now - prev_time) < 0)
+ now = prev_time + 1;
/*
- * TODO: if this happens often then maybe we should reset
- * my_scd->clock to prev_time+1, to make sure
- * we start ticking with the local clock from now on?
+ * If in an NMI context then dont risk lockups and simply return
+ * the current time.
*/
- if ((s64)(now - trace_clock_struct.prev_time) < 0)
- now = trace_clock_struct.prev_time + 1;
+ if (unlikely(in_nmi()))
+ goto out;
- trace_clock_struct.prev_time = now;
+ /* Tracing can cause strange recursion, always use a try lock */
+ if (arch_spin_trylock(&trace_clock_struct.lock)) {
+ /* Reread prev_time in case it was already updated */
+ prev_time = READ_ONCE(trace_clock_struct.prev_time);
+ if ((s64)(now - prev_time) < 0)
+ now = prev_time + 1;
- arch_spin_unlock(&trace_clock_struct.lock);
+ trace_clock_struct.prev_time = now;
+ /* The unlock acts as the wmb for the above rmb */
+ arch_spin_unlock(&trace_clock_struct.lock);
+ }
out:
raw_local_irq_restore(flags);
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index 06bb2fd9a56c..a97aad105d36 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -179,7 +179,7 @@ FTRACE_ENTRY(kernel_stack, stack_entry,
F_STRUCT(
__field( int, size )
- __dynamic_array(unsigned long, caller )
+ __array( unsigned long, caller, FTRACE_STACK_ENTRIES )
),
F_printk("\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index ec340e1cbffc..ea43be6b9cc3 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -534,12 +534,12 @@ void trace_event_follow_fork(struct trace_array *tr, bool enable)
if (enable) {
register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork,
tr, INT_MIN);
- register_trace_prio_sched_process_exit(event_filter_pid_sched_process_exit,
+ register_trace_prio_sched_process_free(event_filter_pid_sched_process_exit,
tr, INT_MAX);
} else {
unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork,
tr);
- unregister_trace_sched_process_exit(event_filter_pid_sched_process_exit,
+ unregister_trace_sched_process_free(event_filter_pid_sched_process_exit,
tr);
}
}
@@ -800,6 +800,8 @@ static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
char *event = NULL, *sub = NULL, *match;
int ret;
+ if (!tr)
+ return -ENOENT;
/*
* The buf format can be <subsystem>:<event-name>
* *:<event-name> means any event by that name.
@@ -1111,7 +1113,8 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
mutex_lock(&event_mutex);
list_for_each_entry(file, &tr->events, list) {
call = file->event_call;
- if (!trace_event_name(call) || !call->class || !call->class->reg)
+ if ((call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) ||
+ !trace_event_name(call) || !call->class || !call->class->reg)
continue;
if (system && strcmp(call->class->system, system->name) != 0)
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index b949c3917c67..9be3d1d1fcb4 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -451,8 +451,10 @@ predicate_parse(const char *str, int nr_parens, int nr_preds,
switch (*next) {
case '(': /* #2 */
- if (top - op_stack > nr_parens)
- return ERR_PTR(-EINVAL);
+ if (top - op_stack > nr_parens) {
+ ret = -EINVAL;
+ goto out_free;
+ }
*(++top) = invert;
continue;
case '!': /* #3 */
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index dbd3c97d1501..3ed2d7f7e571 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -4225,7 +4225,6 @@ static int parse_var_defs(struct hist_trigger_data *hist_data)
s = kstrdup(field_str, GFP_KERNEL);
if (!s) {
- kfree(hist_data->attrs->var_defs.name[n_vars]);
ret = -ENOMEM;
goto free;
}
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index 9300e8bbf08a..0c3b1551cfca 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -211,11 +211,17 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file)
static int trigger_process_regex(struct trace_event_file *file, char *buff)
{
- char *command, *next = buff;
+ char *command, *next;
struct event_command *p;
int ret = -EINVAL;
+ next = buff = skip_spaces(buff);
command = strsep(&next, ": \t");
+ if (next) {
+ next = skip_spaces(next);
+ if (!*next)
+ next = NULL;
+ }
command = (command[0] != '!') ? command : command + 1;
mutex_lock(&trigger_cmd_mutex);
@@ -624,8 +630,14 @@ event_trigger_callback(struct event_command *cmd_ops,
int ret;
/* separate the trigger from the filter (t:n [if filter]) */
- if (param && isdigit(param[0]))
+ if (param && isdigit(param[0])) {
trigger = strsep(&param, " \t");
+ if (param) {
+ param = skip_spaces(param);
+ if (!*param)
+ param = NULL;
+ }
+ }
trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
@@ -1081,14 +1093,10 @@ register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
struct event_trigger_data *data,
struct trace_event_file *file)
{
- int ret = register_trigger(glob, ops, data, file);
-
- if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) {
- unregister_trigger(glob, ops, data, file);
- ret = 0;
- }
+ if (tracing_alloc_snapshot_instance(file->tr) != 0)
+ return 0;
- return ret;
+ return register_trigger(glob, ops, data, file);
}
static int
@@ -1365,6 +1373,11 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
trigger = strsep(&param, " \t");
if (!trigger)
return -EINVAL;
+ if (param) {
+ param = skip_spaces(param);
+ if (!*param)
+ param = NULL;
+ }
system = strsep(&trigger, ":");
if (!trigger)
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
index 8030e24dbf14..ade6c3070c62 100644
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -270,6 +270,7 @@ static bool disable_migrate;
static void move_to_next_cpu(void)
{
struct cpumask *current_mask = &save_cpumask;
+ struct trace_array *tr = hwlat_trace;
int next_cpu;
if (disable_migrate)
@@ -283,7 +284,7 @@ static void move_to_next_cpu(void)
goto disable;
get_online_cpus();
- cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
+ cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
next_cpu = cpumask_next(smp_processor_id(), current_mask);
put_online_cpus();
@@ -354,13 +355,13 @@ static int start_kthread(struct trace_array *tr)
struct task_struct *kthread;
int next_cpu;
- if (WARN_ON(hwlat_kthread))
+ if (hwlat_kthread)
return 0;
/* Just pick the first CPU on first iteration */
current_mask = &save_cpumask;
get_online_cpus();
- cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
+ cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
put_online_cpus();
next_cpu = cpumask_first(current_mask);
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index c61b2b0a99e9..61eff45653f5 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -112,9 +112,9 @@ bool trace_kprobe_on_func_entry(struct trace_event_call *call)
{
struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
- return kprobe_on_func_entry(tk->rp.kp.addr,
+ return (kprobe_on_func_entry(tk->rp.kp.addr,
tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
- tk->rp.kp.addr ? 0 : tk->rp.kp.offset);
+ tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0);
}
bool trace_kprobe_error_injectable(struct trace_event_call *call)
@@ -517,7 +517,7 @@ disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
return ret;
}
-#if defined(CONFIG_KPROBES_ON_FTRACE) && \
+#if defined(CONFIG_DYNAMIC_FTRACE) && \
!defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
static bool __within_notrace_func(unsigned long addr)
{
@@ -538,7 +538,7 @@ static bool __within_notrace_func(unsigned long addr)
static bool within_notrace_func(struct trace_kprobe *tk)
{
- unsigned long addr = addr = trace_kprobe_address(tk);
+ unsigned long addr = trace_kprobe_address(tk);
char symname[KSYM_NAME_LEN], *p;
if (!__within_notrace_func(addr))
@@ -975,6 +975,8 @@ static int probes_seq_show(struct seq_file *m, void *v)
int i;
seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
+ if (trace_kprobe_is_return(tk) && tk->rp.maxactive)
+ seq_printf(m, "%d", tk->rp.maxactive);
seq_printf(m, ":%s/%s", tk->tp.call.class->system,
trace_event_name(&tk->tp.call));
diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c
index 71f553cceb3c..0e373cb0106b 100644
--- a/kernel/trace/trace_preemptirq.c
+++ b/kernel/trace/trace_preemptirq.c
@@ -59,14 +59,14 @@ EXPORT_SYMBOL(trace_hardirqs_on_caller);
__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
{
+ lockdep_hardirqs_off(CALLER_ADDR0);
+
if (!this_cpu_read(tracing_irq_cpu)) {
this_cpu_write(tracing_irq_cpu, 1);
tracer_hardirqs_off(CALLER_ADDR0, caller_addr);
if (!in_nmi())
trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
}
-
- lockdep_hardirqs_off(CALLER_ADDR0);
}
EXPORT_SYMBOL(trace_hardirqs_off_caller);
#endif /* CONFIG_TRACE_IRQFLAGS */
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 11e9daa4a568..330ba2b081ed 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -492,8 +492,13 @@ trace_selftest_function_recursion(void)
unregister_ftrace_function(&test_rec_probe);
ret = -1;
- if (trace_selftest_recursion_cnt != 1) {
- pr_cont("*callback not called once (%d)* ",
+ /*
+ * Recursion allows for transitions between context,
+ * and may call the callback twice.
+ */
+ if (trace_selftest_recursion_cnt != 1 &&
+ trace_selftest_recursion_cnt != 2) {
+ pr_cont("*callback not called once (or twice) (%d)* ",
trace_selftest_recursion_cnt);
goto out;
}
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index a3be42304485..d5ce69231912 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -66,6 +66,12 @@ struct tp_probes {
struct tracepoint_func probes[0];
};
+/* Called in removal of a func but failed to allocate a new tp_funcs */
+static void tp_stub_func(void)
+{
+ return;
+}
+
static inline void *allocate_probes(int count)
{
struct tp_probes *p = kmalloc(count * sizeof(struct tracepoint_func)
@@ -144,6 +150,7 @@ func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func,
{
struct tracepoint_func *old, *new;
int nr_probes = 0;
+ int stub_funcs = 0;
int pos = -1;
if (WARN_ON(!tp_func->func))
@@ -160,14 +167,34 @@ func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func,
if (old[nr_probes].func == tp_func->func &&
old[nr_probes].data == tp_func->data)
return ERR_PTR(-EEXIST);
+ if (old[nr_probes].func == tp_stub_func)
+ stub_funcs++;
}
}
- /* + 2 : one for new probe, one for NULL func */
- new = allocate_probes(nr_probes + 2);
+ /* + 2 : one for new probe, one for NULL func - stub functions */
+ new = allocate_probes(nr_probes + 2 - stub_funcs);
if (new == NULL)
return ERR_PTR(-ENOMEM);
if (old) {
- if (pos < 0) {
+ if (stub_funcs) {
+ /* Need to copy one at a time to remove stubs */
+ int probes = 0;
+
+ pos = -1;
+ for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
+ if (old[nr_probes].func == tp_stub_func)
+ continue;
+ if (pos < 0 && old[nr_probes].prio < prio)
+ pos = probes++;
+ new[probes++] = old[nr_probes];
+ }
+ nr_probes = probes;
+ if (pos < 0)
+ pos = probes;
+ else
+ nr_probes--; /* Account for insertion */
+
+ } else if (pos < 0) {
pos = nr_probes;
memcpy(new, old, nr_probes * sizeof(struct tracepoint_func));
} else {
@@ -201,8 +228,9 @@ static void *func_remove(struct tracepoint_func **funcs,
/* (N -> M), (N > 1, M >= 0) probes */
if (tp_func->func) {
for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
- if (old[nr_probes].func == tp_func->func &&
- old[nr_probes].data == tp_func->data)
+ if ((old[nr_probes].func == tp_func->func &&
+ old[nr_probes].data == tp_func->data) ||
+ old[nr_probes].func == tp_stub_func)
nr_del++;
}
}
@@ -221,14 +249,32 @@ static void *func_remove(struct tracepoint_func **funcs,
/* N -> M, (N > 1, M > 0) */
/* + 1 for NULL */
new = allocate_probes(nr_probes - nr_del + 1);
- if (new == NULL)
- return ERR_PTR(-ENOMEM);
- for (i = 0; old[i].func; i++)
- if (old[i].func != tp_func->func
- || old[i].data != tp_func->data)
- new[j++] = old[i];
- new[nr_probes - nr_del].func = NULL;
- *funcs = new;
+ if (new) {
+ for (i = 0; old[i].func; i++)
+ if ((old[i].func != tp_func->func
+ || old[i].data != tp_func->data)
+ && old[i].func != tp_stub_func)
+ new[j++] = old[i];
+ new[nr_probes - nr_del].func = NULL;
+ *funcs = new;
+ } else {
+ /*
+ * Failed to allocate, replace the old function
+ * with calls to tp_stub_func.
+ */
+ for (i = 0; old[i].func; i++)
+ if (old[i].func == tp_func->func &&
+ old[i].data == tp_func->data) {
+ old[i].func = tp_stub_func;
+ /* Set the prio to the next event. */
+ if (old[i + 1].func)
+ old[i].prio =
+ old[i + 1].prio;
+ else
+ old[i].prio = -1;
+ }
+ *funcs = old;
+ }
}
debug_print_probes(*funcs);
return old;
@@ -284,10 +330,12 @@ static int tracepoint_remove_func(struct tracepoint *tp,
tp_funcs = rcu_dereference_protected(tp->funcs,
lockdep_is_held(&tracepoints_mutex));
old = func_remove(&tp_funcs, func);
- if (IS_ERR(old)) {
- WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
+ if (WARN_ON_ONCE(IS_ERR(old)))
return PTR_ERR(old);
- }
+
+ if (tp_funcs == old)
+ /* Failed allocating new tp_funcs, replaced func with stub */
+ return 0;
if (!tp_funcs) {
/* Removed last function */
diff --git a/kernel/umh.c b/kernel/umh.c
index c449858946af..16653319c8ce 100644
--- a/kernel/umh.c
+++ b/kernel/umh.c
@@ -13,6 +13,7 @@
#include <linux/cred.h>
#include <linux/file.h>
#include <linux/fdtable.h>
+#include <linux/fs_struct.h>
#include <linux/workqueue.h>
#include <linux/security.h>
#include <linux/mount.h>
@@ -73,6 +74,14 @@ static int call_usermodehelper_exec_async(void *data)
spin_unlock_irq(&current->sighand->siglock);
/*
+ * Initial kernel threads share ther FS with init, in order to
+ * get the init root directory. But we've now created a new
+ * thread that is going to execve a user process and has its own
+ * 'struct fs_struct'. Reset umask to the default.
+ */
+ current->fs->umask = 0022;
+
+ /*
* Our parent (unbound workqueue) runs with elevated scheduling
* priority. Avoid propagating that into the userspace child.
*/
@@ -522,6 +531,11 @@ EXPORT_SYMBOL_GPL(fork_usermode_blob);
* Runs a user-space application. The application is started
* asynchronously if wait is not set, and runs as a child of system workqueues.
* (ie. it runs with full root capabilities and optimized affinity).
+ *
+ * Note: successful return value does not guarantee the helper was called at
+ * all. You can't rely on sub_info->{init,cleanup} being called even for
+ * UMH_WAIT_* wait modes as STATIC_USERMODEHELPER_PATH="" turns all helpers
+ * into a successful no-op.
*/
int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
{
diff --git a/kernel/up.c b/kernel/up.c
index 42c46bf3e0a5..2080f75e0e65 100644
--- a/kernel/up.c
+++ b/kernel/up.c
@@ -23,7 +23,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
}
EXPORT_SYMBOL(smp_call_function_single);
-int smp_call_function_single_async(int cpu, call_single_data_t *csd)
+int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
{
unsigned long flags;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index eef77c82d2e1..1cc49340b68a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1377,7 +1377,6 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
*/
lockdep_assert_irqs_disabled();
- debug_work_activate(work);
/* if draining, only works from the same workqueue are allowed */
if (unlikely(wq->flags & __WQ_DRAINING) &&
@@ -1460,6 +1459,7 @@ retry:
worklist = &pwq->delayed_works;
}
+ debug_work_activate(work);
insert_work(pwq, work, worklist, work_flags);
spin_unlock(&pwq->pool->lock);
@@ -1729,12 +1729,6 @@ static void worker_attach_to_pool(struct worker *worker,
mutex_lock(&wq_pool_attach_mutex);
/*
- * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
- * online CPUs. It'll be re-applied when any of the CPUs come up.
- */
- set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
-
- /*
* The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
* stable across this function. See the comments above the flag
* definition for details.
@@ -1742,6 +1736,9 @@ static void worker_attach_to_pool(struct worker *worker,
if (pool->flags & POOL_DISASSOCIATED)
worker->flags |= WORKER_UNBOUND;
+ if (worker->rescue_wq)
+ set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
+
list_add_tail(&worker->node, &pool->workers);
worker->pool = pool;
@@ -3554,17 +3551,24 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
* is updated and visible.
*/
if (!freezable || !workqueue_freezing) {
+ bool kick = false;
+
pwq->max_active = wq->saved_max_active;
while (!list_empty(&pwq->delayed_works) &&
- pwq->nr_active < pwq->max_active)
+ pwq->nr_active < pwq->max_active) {
pwq_activate_first_delayed(pwq);
+ kick = true;
+ }
/*
* Need to kick a worker after thawed or an unbound wq's
- * max_active is bumped. It's a slow path. Do it always.
+ * max_active is bumped. In realtime scenarios, always kicking a
+ * worker will cause interference on the isolated cpu cores, so
+ * let's kick iff work items were activated.
*/
- wake_up_worker(pwq->pool);
+ if (kick)
+ wake_up_worker(pwq->pool);
} else {
pwq->max_active = 0;
}
diff --git a/lib/Makefile b/lib/Makefile
index 0ab808318202..1d7a705d7207 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -269,6 +269,8 @@ obj-$(CONFIG_UCS2_STRING) += ucs2_string.o
obj-$(CONFIG_UBSAN) += ubsan.o
UBSAN_SANITIZE_ubsan.o := n
+KASAN_SANITIZE_ubsan.o := n
+CFLAGS_ubsan.o := $(call cc-option, -fno-stack-protector) $(DISABLE_STACKLEAK_PLUGIN)
obj-$(CONFIG_SBITMAP) += sbitmap.o
diff --git a/lib/bug.c b/lib/bug.c
index 1077366f496b..f4fcac5dd766 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -155,30 +155,27 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
file = NULL;
line = 0;
- warning = 0;
- if (bug) {
#ifdef CONFIG_DEBUG_BUGVERBOSE
#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
- file = bug->file;
+ file = bug->file;
#else
- file = (const char *)bug + bug->file_disp;
+ file = (const char *)bug + bug->file_disp;
#endif
- line = bug->line;
+ line = bug->line;
#endif
- warning = (bug->flags & BUGFLAG_WARNING) != 0;
- once = (bug->flags & BUGFLAG_ONCE) != 0;
- done = (bug->flags & BUGFLAG_DONE) != 0;
-
- if (warning && once) {
- if (done)
- return BUG_TRAP_TYPE_WARN;
-
- /*
- * Since this is the only store, concurrency is not an issue.
- */
- bug->flags |= BUGFLAG_DONE;
- }
+ warning = (bug->flags & BUGFLAG_WARNING) != 0;
+ once = (bug->flags & BUGFLAG_ONCE) != 0;
+ done = (bug->flags & BUGFLAG_DONE) != 0;
+
+ if (warning && once) {
+ if (done)
+ return BUG_TRAP_TYPE_WARN;
+
+ /*
+ * Since this is the only store, concurrency is not an issue.
+ */
+ bug->flags |= BUGFLAG_DONE;
}
if (warning) {
diff --git a/lib/crc32.c b/lib/crc32.c
index a6c9afafc8c8..1a5d08470044 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -328,7 +328,7 @@ static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
return crc;
}
-#if CRC_LE_BITS == 1
+#if CRC_BE_BITS == 1
u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
{
return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE);
diff --git a/lib/crc32test.c b/lib/crc32test.c
index 97d6a57cefcc..61ddce2cff77 100644
--- a/lib/crc32test.c
+++ b/lib/crc32test.c
@@ -683,7 +683,6 @@ static int __init crc32c_test(void)
/* reduce OS noise */
local_irq_save(flags);
- local_irq_disable();
nsec = ktime_get_ns();
for (i = 0; i < 100; i++) {
@@ -694,7 +693,6 @@ static int __init crc32c_test(void)
nsec = ktime_get_ns() - nsec;
local_irq_restore(flags);
- local_irq_enable();
pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS);
@@ -768,7 +766,6 @@ static int __init crc32_test(void)
/* reduce OS noise */
local_irq_save(flags);
- local_irq_disable();
nsec = ktime_get_ns();
for (i = 0; i < 100; i++) {
@@ -783,7 +780,6 @@ static int __init crc32_test(void)
nsec = ktime_get_ns() - nsec;
local_irq_restore(flags);
- local_irq_enable();
pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n",
CRC_LE_BITS, CRC_BE_BITS);
diff --git a/lib/devres.c b/lib/devres.c
index aa0f5308ac6b..75ea32d9b661 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -9,6 +9,7 @@
enum devm_ioremap_type {
DEVM_IOREMAP = 0,
DEVM_IOREMAP_NC,
+ DEVM_IOREMAP_UC,
DEVM_IOREMAP_WC,
};
@@ -39,6 +40,9 @@ static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset,
case DEVM_IOREMAP_NC:
addr = ioremap_nocache(offset, size);
break;
+ case DEVM_IOREMAP_UC:
+ addr = ioremap_uc(offset, size);
+ break;
case DEVM_IOREMAP_WC:
addr = ioremap_wc(offset, size);
break;
@@ -69,6 +73,21 @@ void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
EXPORT_SYMBOL(devm_ioremap);
/**
+ * devm_ioremap_uc - Managed ioremap_uc()
+ * @dev: Generic device to remap IO address for
+ * @offset: Resource address to map
+ * @size: Size of map
+ *
+ * Managed ioremap_uc(). Map is automatically unmapped on driver detach.
+ */
+void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset,
+ resource_size_t size)
+{
+ return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_UC);
+}
+EXPORT_SYMBOL_GPL(devm_ioremap_uc);
+
+/**
* devm_ioremap_nocache - Managed ioremap_nocache()
* @dev: Generic device to remap IO address for
* @offset: Resource address to map
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index dbf2b457e47e..9305ff43fc15 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -85,22 +85,22 @@ static struct { unsigned flag:8; char opt_char; } opt_array[] = {
{ _DPRINTK_FLAGS_NONE, '_' },
};
+struct flagsbuf { char buf[ARRAY_SIZE(opt_array)+1]; };
+
/* format a string into buf[] which describes the _ddebug's flags */
-static char *ddebug_describe_flags(struct _ddebug *dp, char *buf,
- size_t maxlen)
+static char *ddebug_describe_flags(unsigned int flags, struct flagsbuf *fb)
{
- char *p = buf;
+ char *p = fb->buf;
int i;
- BUG_ON(maxlen < 6);
for (i = 0; i < ARRAY_SIZE(opt_array); ++i)
- if (dp->flags & opt_array[i].flag)
+ if (flags & opt_array[i].flag)
*p++ = opt_array[i].opt_char;
- if (p == buf)
+ if (p == fb->buf)
*p++ = '_';
*p = '\0';
- return buf;
+ return fb->buf;
}
#define vpr_info(fmt, ...) \
@@ -142,7 +142,7 @@ static int ddebug_change(const struct ddebug_query *query,
struct ddebug_table *dt;
unsigned int newflags;
unsigned int nfound = 0;
- char flagbuf[10];
+ struct flagsbuf fbuf;
/* search for matching ddebugs */
mutex_lock(&ddebug_lock);
@@ -199,8 +199,7 @@ static int ddebug_change(const struct ddebug_query *query,
vpr_info("changed %s:%d [%s]%s =%s\n",
trim_prefix(dp->filename), dp->lineno,
dt->mod_name, dp->function,
- ddebug_describe_flags(dp, flagbuf,
- sizeof(flagbuf)));
+ ddebug_describe_flags(dp->flags, &fbuf));
}
}
mutex_unlock(&ddebug_lock);
@@ -779,7 +778,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p)
{
struct ddebug_iter *iter = m->private;
struct _ddebug *dp = p;
- char flagsbuf[10];
+ struct flagsbuf flags;
vpr_info("called m=%p p=%p\n", m, p);
@@ -792,7 +791,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p)
seq_printf(m, "%s:%u [%s]%s =%s \"",
trim_prefix(dp->filename), dp->lineno,
iter->table->mod_name, dp->function,
- ddebug_describe_flags(dp, flagsbuf, sizeof(flagsbuf)));
+ ddebug_describe_flags(dp->flags, &flags));
seq_escape(m, dp->format, "\t\r\n\"");
seq_puts(m, "\"\n");
diff --git a/lib/find_bit.c b/lib/find_bit.c
index ee3df93ba69a..8a5492173267 100644
--- a/lib/find_bit.c
+++ b/lib/find_bit.c
@@ -153,18 +153,6 @@ EXPORT_SYMBOL(find_last_bit);
#ifdef __BIG_ENDIAN
-/* include/linux/byteorder does not support "unsigned long" type */
-static inline unsigned long ext2_swab(const unsigned long y)
-{
-#if BITS_PER_LONG == 64
- return (unsigned long) __swab64((u64) y);
-#elif BITS_PER_LONG == 32
- return (unsigned long) __swab32((u32) y);
-#else
-#error BITS_PER_LONG not defined
-#endif
-}
-
#if !defined(find_next_bit_le) || !defined(find_next_zero_bit_le)
static inline unsigned long _find_next_bit_le(const unsigned long *addr1,
const unsigned long *addr2, unsigned long nbits,
@@ -181,7 +169,7 @@ static inline unsigned long _find_next_bit_le(const unsigned long *addr1,
tmp ^= invert;
/* Handle 1st word. */
- tmp &= ext2_swab(BITMAP_FIRST_WORD_MASK(start));
+ tmp &= swab(BITMAP_FIRST_WORD_MASK(start));
start = round_down(start, BITS_PER_LONG);
while (!tmp) {
@@ -195,7 +183,7 @@ static inline unsigned long _find_next_bit_le(const unsigned long *addr1,
tmp ^= invert;
}
- return min(start + __ffs(ext2_swab(tmp)), nbits);
+ return min(start + __ffs(swab(tmp)), nbits);
}
#endif
diff --git a/lib/fonts/font_10x18.c b/lib/fonts/font_10x18.c
index 532f0ff89a96..e02f9df24d1e 100644
--- a/lib/fonts/font_10x18.c
+++ b/lib/fonts/font_10x18.c
@@ -8,8 +8,8 @@
#define FONTDATAMAX 9216
-static const unsigned char fontdata_10x18[FONTDATAMAX] = {
-
+static const struct font_data fontdata_10x18 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, 0x00, /* 0000000000 */
0x00, 0x00, /* 0000000000 */
@@ -5129,8 +5129,7 @@ static const unsigned char fontdata_10x18[FONTDATAMAX] = {
0x00, 0x00, /* 0000000000 */
0x00, 0x00, /* 0000000000 */
0x00, 0x00, /* 0000000000 */
-
-};
+} };
const struct font_desc font_10x18 = {
@@ -5138,7 +5137,7 @@ const struct font_desc font_10x18 = {
.name = "10x18",
.width = 10,
.height = 18,
- .data = fontdata_10x18,
+ .data = fontdata_10x18.data,
#ifdef __sparc__
.pref = 5,
#else
diff --git a/lib/fonts/font_6x10.c b/lib/fonts/font_6x10.c
index 09b2cc03435b..6e3c4b7691c8 100644
--- a/lib/fonts/font_6x10.c
+++ b/lib/fonts/font_6x10.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/font.h>
-static const unsigned char fontdata_6x10[] = {
+#define FONTDATAMAX 2560
+static const struct font_data fontdata_6x10 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3074,14 +3076,13 @@ static const unsigned char fontdata_6x10[] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
-
-};
+} };
const struct font_desc font_6x10 = {
.idx = FONT6x10_IDX,
.name = "6x10",
.width = 6,
.height = 10,
- .data = fontdata_6x10,
+ .data = fontdata_6x10.data,
.pref = 0,
};
diff --git a/lib/fonts/font_6x11.c b/lib/fonts/font_6x11.c
index d7136c33f1f0..2d22a24e816f 100644
--- a/lib/fonts/font_6x11.c
+++ b/lib/fonts/font_6x11.c
@@ -9,8 +9,8 @@
#define FONTDATAMAX (11*256)
-static const unsigned char fontdata_6x11[FONTDATAMAX] = {
-
+static const struct font_data fontdata_6x11 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3338,8 +3338,7 @@ static const unsigned char fontdata_6x11[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
-
-};
+} };
const struct font_desc font_vga_6x11 = {
@@ -3347,7 +3346,7 @@ const struct font_desc font_vga_6x11 = {
.name = "ProFont6x11",
.width = 6,
.height = 11,
- .data = fontdata_6x11,
+ .data = fontdata_6x11.data,
/* Try avoiding this font if possible unless on MAC */
.pref = -2000,
};
diff --git a/lib/fonts/font_7x14.c b/lib/fonts/font_7x14.c
index 89752d0b23e8..9cc7ae2e03f7 100644
--- a/lib/fonts/font_7x14.c
+++ b/lib/fonts/font_7x14.c
@@ -8,8 +8,8 @@
#define FONTDATAMAX 3584
-static const unsigned char fontdata_7x14[FONTDATAMAX] = {
-
+static const struct font_data fontdata_7x14 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -4105,8 +4105,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
-
-};
+} };
const struct font_desc font_7x14 = {
@@ -4114,6 +4113,6 @@ const struct font_desc font_7x14 = {
.name = "7x14",
.width = 7,
.height = 14,
- .data = fontdata_7x14,
+ .data = fontdata_7x14.data,
.pref = 0,
};
diff --git a/lib/fonts/font_8x16.c b/lib/fonts/font_8x16.c
index b7ab1f5fbdb8..bab25dc59e8d 100644
--- a/lib/fonts/font_8x16.c
+++ b/lib/fonts/font_8x16.c
@@ -10,8 +10,8 @@
#define FONTDATAMAX 4096
-static const unsigned char fontdata_8x16[FONTDATAMAX] = {
-
+static const struct font_data fontdata_8x16 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4619,8 +4619,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
-
-};
+} };
const struct font_desc font_vga_8x16 = {
@@ -4628,7 +4627,7 @@ const struct font_desc font_vga_8x16 = {
.name = "VGA8x16",
.width = 8,
.height = 16,
- .data = fontdata_8x16,
+ .data = fontdata_8x16.data,
.pref = 0,
};
EXPORT_SYMBOL(font_vga_8x16);
diff --git a/lib/fonts/font_8x8.c b/lib/fonts/font_8x8.c
index 2328ebc8bab5..109d0572368f 100644
--- a/lib/fonts/font_8x8.c
+++ b/lib/fonts/font_8x8.c
@@ -9,8 +9,8 @@
#define FONTDATAMAX 2048
-static const unsigned char fontdata_8x8[FONTDATAMAX] = {
-
+static const struct font_data fontdata_8x8 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2570,8 +2570,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
-
-};
+} };
const struct font_desc font_vga_8x8 = {
@@ -2579,6 +2578,6 @@ const struct font_desc font_vga_8x8 = {
.name = "VGA8x8",
.width = 8,
.height = 8,
- .data = fontdata_8x8,
+ .data = fontdata_8x8.data,
.pref = 0,
};
diff --git a/lib/fonts/font_acorn_8x8.c b/lib/fonts/font_acorn_8x8.c
index 0ff0e85d4481..fb395f0d4031 100644
--- a/lib/fonts/font_acorn_8x8.c
+++ b/lib/fonts/font_acorn_8x8.c
@@ -3,7 +3,10 @@
#include <linux/font.h>
-static const unsigned char acorndata_8x8[] = {
+#define FONTDATAMAX 2048
+
+static const struct font_data acorndata_8x8 = {
+{ 0, 0, FONTDATAMAX, 0 }, {
/* 00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* ^@ */
/* 01 */ 0x7e, 0x81, 0xa5, 0x81, 0xbd, 0x99, 0x81, 0x7e, /* ^A */
/* 02 */ 0x7e, 0xff, 0xbd, 0xff, 0xc3, 0xe7, 0xff, 0x7e, /* ^B */
@@ -260,14 +263,14 @@ static const unsigned char acorndata_8x8[] = {
/* FD */ 0x38, 0x04, 0x18, 0x20, 0x3c, 0x00, 0x00, 0x00,
/* FE */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* FF */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
-};
+} };
const struct font_desc font_acorn_8x8 = {
.idx = ACORN8x8_IDX,
.name = "Acorn8x8",
.width = 8,
.height = 8,
- .data = acorndata_8x8,
+ .data = acorndata_8x8.data,
#ifdef CONFIG_ARCH_ACORN
.pref = 20,
#else
diff --git a/lib/fonts/font_mini_4x6.c b/lib/fonts/font_mini_4x6.c
index 838caa1cfef7..592774a90917 100644
--- a/lib/fonts/font_mini_4x6.c
+++ b/lib/fonts/font_mini_4x6.c
@@ -43,8 +43,8 @@ __END__;
#define FONTDATAMAX 1536
-static const unsigned char fontdata_mini_4x6[FONTDATAMAX] = {
-
+static const struct font_data fontdata_mini_4x6 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
/*{*/
/* Char 0: ' ' */
0xee, /*= [*** ] */
@@ -2145,14 +2145,14 @@ static const unsigned char fontdata_mini_4x6[FONTDATAMAX] = {
0xee, /*= [*** ] */
0x00, /*= [ ] */
/*}*/
-};
+} };
const struct font_desc font_mini_4x6 = {
.idx = MINI4x6_IDX,
.name = "MINI4x6",
.width = 4,
.height = 6,
- .data = fontdata_mini_4x6,
+ .data = fontdata_mini_4x6.data,
.pref = 3,
};
diff --git a/lib/fonts/font_pearl_8x8.c b/lib/fonts/font_pearl_8x8.c
index b15d3c342c5b..a6f95ebce950 100644
--- a/lib/fonts/font_pearl_8x8.c
+++ b/lib/fonts/font_pearl_8x8.c
@@ -14,8 +14,8 @@
#define FONTDATAMAX 2048
-static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
-
+static const struct font_data fontdata_pearl8x8 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2575,14 +2575,13 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
-
-};
+} };
const struct font_desc font_pearl_8x8 = {
.idx = PEARL8x8_IDX,
.name = "PEARL8x8",
.width = 8,
.height = 8,
- .data = fontdata_pearl8x8,
+ .data = fontdata_pearl8x8.data,
.pref = 2,
};
diff --git a/lib/fonts/font_sun12x22.c b/lib/fonts/font_sun12x22.c
index 955d6eee3959..a5b65bd49604 100644
--- a/lib/fonts/font_sun12x22.c
+++ b/lib/fonts/font_sun12x22.c
@@ -3,8 +3,8 @@
#define FONTDATAMAX 11264
-static const unsigned char fontdata_sun12x22[FONTDATAMAX] = {
-
+static const struct font_data fontdata_sun12x22 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
@@ -6148,8 +6148,7 @@ static const unsigned char fontdata_sun12x22[FONTDATAMAX] = {
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
-
-};
+} };
const struct font_desc font_sun_12x22 = {
@@ -6157,7 +6156,7 @@ const struct font_desc font_sun_12x22 = {
.name = "SUN12x22",
.width = 12,
.height = 22,
- .data = fontdata_sun12x22,
+ .data = fontdata_sun12x22.data,
#ifdef __sparc__
.pref = 5,
#else
diff --git a/lib/fonts/font_sun8x16.c b/lib/fonts/font_sun8x16.c
index 03d71e53954a..e577e76a6a7c 100644
--- a/lib/fonts/font_sun8x16.c
+++ b/lib/fonts/font_sun8x16.c
@@ -3,7 +3,8 @@
#define FONTDATAMAX 4096
-static const unsigned char fontdata_sun8x16[FONTDATAMAX] = {
+static const struct font_data fontdata_sun8x16 = {
+{ 0, 0, FONTDATAMAX, 0 }, {
/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x7e,0x81,0xa5,0x81,0x81,0xbd,0x99,0x81,0x81,0x7e,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x7e,0xff,0xdb,0xff,0xff,0xc3,0xe7,0xff,0xff,0x7e,0x00,0x00,0x00,0x00,
@@ -260,14 +261,14 @@ static const unsigned char fontdata_sun8x16[FONTDATAMAX] = {
/* */ 0x00,0x70,0xd8,0x30,0x60,0xc8,0xf8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x7c,0x7c,0x7c,0x7c,0x7c,0x7c,0x7c,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-};
+} };
const struct font_desc font_sun_8x16 = {
.idx = SUN8x16_IDX,
.name = "SUN8x16",
.width = 8,
.height = 16,
- .data = fontdata_sun8x16,
+ .data = fontdata_sun8x16.data,
#ifdef __sparc__
.pref = 10,
#else
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 7e85d1e37a6e..0b8ee173cf3a 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -83,14 +83,14 @@ static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
* users set the same bit, one user will return remain bits, otherwise
* return 0.
*/
-static int bitmap_set_ll(unsigned long *map, int start, int nr)
+static int bitmap_set_ll(unsigned long *map, unsigned long start, unsigned long nr)
{
unsigned long *p = map + BIT_WORD(start);
- const int size = start + nr;
+ const unsigned long size = start + nr;
int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
- while (nr - bits_to_set >= 0) {
+ while (nr >= bits_to_set) {
if (set_bits_ll(p, mask_to_set))
return nr;
nr -= bits_to_set;
@@ -118,14 +118,15 @@ static int bitmap_set_ll(unsigned long *map, int start, int nr)
* users clear the same bit, one user will return remain bits,
* otherwise return 0.
*/
-static int bitmap_clear_ll(unsigned long *map, int start, int nr)
+static unsigned long
+bitmap_clear_ll(unsigned long *map, unsigned long start, unsigned long nr)
{
unsigned long *p = map + BIT_WORD(start);
- const int size = start + nr;
+ const unsigned long size = start + nr;
int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
- while (nr - bits_to_clear >= 0) {
+ while (nr >= bits_to_clear) {
if (clear_bits_ll(p, mask_to_clear))
return nr;
nr -= bits_to_clear;
@@ -184,8 +185,8 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
size_t size, int nid)
{
struct gen_pool_chunk *chunk;
- int nbits = size >> pool->min_alloc_order;
- int nbytes = sizeof(struct gen_pool_chunk) +
+ unsigned long nbits = size >> pool->min_alloc_order;
+ unsigned long nbytes = sizeof(struct gen_pool_chunk) +
BITS_TO_LONGS(nbits) * sizeof(long);
chunk = vzalloc_node(nbytes, nid);
@@ -242,7 +243,7 @@ void gen_pool_destroy(struct gen_pool *pool)
struct list_head *_chunk, *_next_chunk;
struct gen_pool_chunk *chunk;
int order = pool->min_alloc_order;
- int bit, end_bit;
+ unsigned long bit, end_bit;
list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
@@ -293,7 +294,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
struct gen_pool_chunk *chunk;
unsigned long addr = 0;
int order = pool->min_alloc_order;
- int nbits, start_bit, end_bit, remain;
+ unsigned long nbits, start_bit, end_bit, remain;
#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
BUG_ON(in_nmi());
@@ -376,7 +377,7 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
{
struct gen_pool_chunk *chunk;
int order = pool->min_alloc_order;
- int start_bit, nbits, remain;
+ unsigned long start_bit, nbits, remain;
#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
BUG_ON(in_nmi());
@@ -638,7 +639,7 @@ unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
index = bitmap_find_next_zero_area(map, size, start, nr, 0);
while (index < size) {
- int next_bit = find_next_bit(map, size, index + nr);
+ unsigned long next_bit = find_next_bit(map, size, index + nr);
if ((next_bit - index) < len) {
len = next_bit - index;
start_bit = index;
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 7761f3294339..26d21339bef2 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -250,12 +250,13 @@ static int kobj_usermode_filter(struct kobject *kobj)
static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem)
{
+ int buffer_size = sizeof(env->buf) - env->buflen;
int len;
- len = strlcpy(&env->buf[env->buflen], subsystem,
- sizeof(env->buf) - env->buflen);
- if (len >= (sizeof(env->buf) - env->buflen)) {
- WARN(1, KERN_ERR "init_uevent_argv: buffer size too small\n");
+ len = strlcpy(&env->buf[env->buflen], subsystem, buffer_size);
+ if (len >= buffer_size) {
+ pr_warn("init_uevent_argv: buffer size of %d too small, needed %d\n",
+ buffer_size, len);
return -ENOMEM;
}
diff --git a/lib/logic_pio.c b/lib/logic_pio.c
index 905027574e5d..774bb02fff10 100644
--- a/lib/logic_pio.c
+++ b/lib/logic_pio.c
@@ -27,6 +27,8 @@ static DEFINE_MUTEX(io_range_mutex);
* @new_range: pointer to the IO range to be registered.
*
* Returns 0 on success, the error code in case of failure.
+ * If the range already exists, -EEXIST will be returned, which should be
+ * considered a success.
*
* Register a new IO range node in the IO range list.
*/
@@ -49,6 +51,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
list_for_each_entry(range, &io_range_list, list) {
if (range->fwnode == new_range->fwnode) {
/* range already there */
+ ret = -EEXIST;
goto end_register;
}
if (range->flags == LOGIC_PIO_CPU_MMIO &&
diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
index 08c60d10747f..6c5229f98c9e 100644
--- a/lib/mpi/longlong.h
+++ b/lib/mpi/longlong.h
@@ -671,7 +671,7 @@ do { \
************** MIPS/64 **************
***************************************/
#if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64
-#if defined(__mips_isa_rev) && __mips_isa_rev >= 6
+#if defined(__mips_isa_rev) && __mips_isa_rev >= 6 && defined(CONFIG_CC_IS_GCC)
/*
* GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C
* code below, so we special case MIPS64r6 until the compiler can do better.
@@ -756,22 +756,22 @@ do { \
do { \
if (__builtin_constant_p(bh) && (bh) == 0) \
__asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "%r" ((USItype)(ah)), \
"%r" ((USItype)(al)), \
"rI" ((USItype)(bl))); \
else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \
__asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "%r" ((USItype)(ah)), \
"%r" ((USItype)(al)), \
"rI" ((USItype)(bl))); \
else \
__asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "%r" ((USItype)(ah)), \
"r" ((USItype)(bh)), \
"%r" ((USItype)(al)), \
@@ -781,36 +781,36 @@ do { \
do { \
if (__builtin_constant_p(ah) && (ah) == 0) \
__asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "r" ((USItype)(bh)), \
"rI" ((USItype)(al)), \
"r" ((USItype)(bl))); \
else if (__builtin_constant_p(ah) && (ah) == ~(USItype) 0) \
__asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "r" ((USItype)(bh)), \
"rI" ((USItype)(al)), \
"r" ((USItype)(bl))); \
else if (__builtin_constant_p(bh) && (bh) == 0) \
__asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "r" ((USItype)(ah)), \
"rI" ((USItype)(al)), \
"r" ((USItype)(bl))); \
else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \
__asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "r" ((USItype)(ah)), \
"rI" ((USItype)(al)), \
"r" ((USItype)(bl))); \
else \
__asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "r" ((USItype)(ah)), \
"r" ((USItype)(bh)), \
"rI" ((USItype)(al)), \
@@ -821,7 +821,7 @@ do { \
do { \
USItype __m0 = (m0), __m1 = (m1); \
__asm__ ("mulhwu %0,%1,%2" \
- : "=r" ((USItype) ph) \
+ : "=r" (ph) \
: "%r" (__m0), \
"r" (__m1)); \
(pl) = __m0 * __m1; \
diff --git a/lib/nlattr.c b/lib/nlattr.c
index e335bcafa9e4..00bfc6aece05 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -402,7 +402,7 @@ int nla_strcmp(const struct nlattr *nla, const char *str)
int attrlen = nla_len(nla);
int d;
- if (attrlen > 0 && buf[attrlen - 1] == '\0')
+ while (attrlen > 0 && buf[attrlen - 1] == '\0')
attrlen--;
d = attrlen - len;
diff --git a/lib/raid6/neon.uc b/lib/raid6/neon.uc
index d5242f544551..b7c68030da4f 100644
--- a/lib/raid6/neon.uc
+++ b/lib/raid6/neon.uc
@@ -28,7 +28,6 @@
typedef uint8x16_t unative_t;
-#define NBYTES(x) ((unative_t){x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x})
#define NSIZE sizeof(unative_t)
/*
@@ -61,7 +60,7 @@ void raid6_neon$#_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs)
int d, z, z0;
register unative_t wd$$, wq$$, wp$$, w1$$, w2$$;
- const unative_t x1d = NBYTES(0x1d);
+ const unative_t x1d = vdupq_n_u8(0x1d);
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
@@ -92,7 +91,7 @@ void raid6_neon$#_xor_syndrome_real(int disks, int start, int stop,
int d, z, z0;
register unative_t wd$$, wq$$, wp$$, w1$$, w2$$;
- const unative_t x1d = NBYTES(0x1d);
+ const unative_t x1d = vdupq_n_u8(0x1d);
z0 = stop; /* P/Q right side optimization */
p = dptr[disks-2]; /* XOR parity */
diff --git a/lib/raid6/recov_neon_inner.c b/lib/raid6/recov_neon_inner.c
index 8cd20c9f834a..7d00c31a6547 100644
--- a/lib/raid6/recov_neon_inner.c
+++ b/lib/raid6/recov_neon_inner.c
@@ -10,11 +10,6 @@
#include <arm_neon.h>
-static const uint8x16_t x0f = {
- 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
- 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
-};
-
#ifdef CONFIG_ARM
/*
* AArch32 does not provide this intrinsic natively because it does not
@@ -41,6 +36,7 @@ void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp,
uint8x16_t pm1 = vld1q_u8(pbmul + 16);
uint8x16_t qm0 = vld1q_u8(qmul);
uint8x16_t qm1 = vld1q_u8(qmul + 16);
+ uint8x16_t x0f = vdupq_n_u8(0x0f);
/*
* while ( bytes-- ) {
@@ -87,6 +83,7 @@ void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq,
{
uint8x16_t qm0 = vld1q_u8(qmul);
uint8x16_t qm1 = vld1q_u8(qmul + 16);
+ uint8x16_t x0f = vdupq_n_u8(0x0f);
/*
* while (bytes--) {
diff --git a/lib/random32.c b/lib/random32.c
index 4aaa76404d56..9085b1172015 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -40,16 +40,6 @@
#include <linux/sched.h>
#include <asm/unaligned.h>
-#ifdef CONFIG_RANDOM32_SELFTEST
-static void __init prandom_state_selftest(void);
-#else
-static inline void prandom_state_selftest(void)
-{
-}
-#endif
-
-static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
-
/**
* prandom_u32_state - seeded pseudo-random number generator.
* @state: pointer to state structure holding seeded state.
@@ -70,25 +60,6 @@ u32 prandom_u32_state(struct rnd_state *state)
EXPORT_SYMBOL(prandom_u32_state);
/**
- * prandom_u32 - pseudo random number generator
- *
- * A 32 bit pseudo-random number is generated using a fast
- * algorithm suitable for simulation. This algorithm is NOT
- * considered safe for cryptographic use.
- */
-u32 prandom_u32(void)
-{
- struct rnd_state *state = &get_cpu_var(net_rand_state);
- u32 res;
-
- res = prandom_u32_state(state);
- put_cpu_var(net_rand_state);
-
- return res;
-}
-EXPORT_SYMBOL(prandom_u32);
-
-/**
* prandom_bytes_state - get the requested number of pseudo-random bytes
*
* @state: pointer to state structure holding seeded state.
@@ -119,20 +90,6 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes)
}
EXPORT_SYMBOL(prandom_bytes_state);
-/**
- * prandom_bytes - get the requested number of pseudo-random bytes
- * @buf: where to copy the pseudo-random bytes to
- * @bytes: the requested number of bytes
- */
-void prandom_bytes(void *buf, size_t bytes)
-{
- struct rnd_state *state = &get_cpu_var(net_rand_state);
-
- prandom_bytes_state(state, buf, bytes);
- put_cpu_var(net_rand_state);
-}
-EXPORT_SYMBOL(prandom_bytes);
-
static void prandom_warmup(struct rnd_state *state)
{
/* Calling RNG ten times to satisfy recurrence condition */
@@ -148,96 +105,6 @@ static void prandom_warmup(struct rnd_state *state)
prandom_u32_state(state);
}
-static u32 __extract_hwseed(void)
-{
- unsigned int val = 0;
-
- (void)(arch_get_random_seed_int(&val) ||
- arch_get_random_int(&val));
-
- return val;
-}
-
-static void prandom_seed_early(struct rnd_state *state, u32 seed,
- bool mix_with_hwseed)
-{
-#define LCG(x) ((x) * 69069U) /* super-duper LCG */
-#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
- state->s1 = __seed(HWSEED() ^ LCG(seed), 2U);
- state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U);
- state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U);
- state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
-}
-
-/**
- * prandom_seed - add entropy to pseudo random number generator
- * @seed: seed value
- *
- * Add some additional seeding to the prandom pool.
- */
-void prandom_seed(u32 entropy)
-{
- int i;
- /*
- * No locking on the CPUs, but then somewhat random results are, well,
- * expected.
- */
- for_each_possible_cpu(i) {
- struct rnd_state *state = &per_cpu(net_rand_state, i);
-
- state->s1 = __seed(state->s1 ^ entropy, 2U);
- prandom_warmup(state);
- }
-}
-EXPORT_SYMBOL(prandom_seed);
-
-/*
- * Generate some initially weak seeding values to allow
- * to start the prandom_u32() engine.
- */
-static int __init prandom_init(void)
-{
- int i;
-
- prandom_state_selftest();
-
- for_each_possible_cpu(i) {
- struct rnd_state *state = &per_cpu(net_rand_state, i);
- u32 weak_seed = (i + jiffies) ^ random_get_entropy();
-
- prandom_seed_early(state, weak_seed, true);
- prandom_warmup(state);
- }
-
- return 0;
-}
-core_initcall(prandom_init);
-
-static void __prandom_timer(struct timer_list *unused);
-
-static DEFINE_TIMER(seed_timer, __prandom_timer);
-
-static void __prandom_timer(struct timer_list *unused)
-{
- u32 entropy;
- unsigned long expires;
-
- get_random_bytes(&entropy, sizeof(entropy));
- prandom_seed(entropy);
-
- /* reseed every ~60 seconds, in [40 .. 80) interval with slack */
- expires = 40 + prandom_u32_max(40);
- seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC);
-
- add_timer(&seed_timer);
-}
-
-static void __init __prandom_start_seed_timer(void)
-{
- seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC);
- add_timer(&seed_timer);
-}
-
void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
{
int i;
@@ -257,51 +124,6 @@ void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
}
EXPORT_SYMBOL(prandom_seed_full_state);
-/*
- * Generate better values after random number generator
- * is fully initialized.
- */
-static void __prandom_reseed(bool late)
-{
- unsigned long flags;
- static bool latch = false;
- static DEFINE_SPINLOCK(lock);
-
- /* Asking for random bytes might result in bytes getting
- * moved into the nonblocking pool and thus marking it
- * as initialized. In this case we would double back into
- * this function and attempt to do a late reseed.
- * Ignore the pointless attempt to reseed again if we're
- * already waiting for bytes when the nonblocking pool
- * got initialized.
- */
-
- /* only allow initial seeding (late == false) once */
- if (!spin_trylock_irqsave(&lock, flags))
- return;
-
- if (latch && !late)
- goto out;
-
- latch = true;
- prandom_seed_full_state(&net_rand_state);
-out:
- spin_unlock_irqrestore(&lock, flags);
-}
-
-void prandom_reseed_late(void)
-{
- __prandom_reseed(true);
-}
-
-static int __init prandom_reseed(void)
-{
- __prandom_reseed(false);
- __prandom_start_seed_timer();
- return 0;
-}
-late_initcall(prandom_reseed);
-
#ifdef CONFIG_RANDOM32_SELFTEST
static struct prandom_test1 {
u32 seed;
@@ -421,7 +243,28 @@ static struct prandom_test2 {
{ 407983964U, 921U, 728767059U },
};
-static void __init prandom_state_selftest(void)
+static u32 __extract_hwseed(void)
+{
+ unsigned int val = 0;
+
+ (void)(arch_get_random_seed_int(&val) ||
+ arch_get_random_int(&val));
+
+ return val;
+}
+
+static void prandom_seed_early(struct rnd_state *state, u32 seed,
+ bool mix_with_hwseed)
+{
+#define LCG(x) ((x) * 69069U) /* super-duper LCG */
+#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
+ state->s1 = __seed(HWSEED() ^ LCG(seed), 2U);
+ state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U);
+ state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U);
+ state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
+}
+
+static int __init prandom_state_selftest(void)
{
int i, j, errors = 0, runs = 0;
bool error = false;
@@ -461,5 +304,266 @@ static void __init prandom_state_selftest(void)
pr_warn("prandom: %d/%d self tests failed\n", errors, runs);
else
pr_info("prandom: %d self tests passed\n", runs);
+ return 0;
}
+core_initcall(prandom_state_selftest);
#endif
+
+/*
+ * The prandom_u32() implementation is now completely separate from the
+ * prandom_state() functions, which are retained (for now) for compatibility.
+ *
+ * Because of (ab)use in the networking code for choosing random TCP/UDP port
+ * numbers, which open DoS possibilities if guessable, we want something
+ * stronger than a standard PRNG. But the performance requirements of
+ * the network code do not allow robust crypto for this application.
+ *
+ * So this is a homebrew Junior Spaceman implementation, based on the
+ * lowest-latency trustworthy crypto primitive available, SipHash.
+ * (The authors of SipHash have not been consulted about this abuse of
+ * their work.)
+ *
+ * Standard SipHash-2-4 uses 2n+4 rounds to hash n words of input to
+ * one word of output. This abbreviated version uses 2 rounds per word
+ * of output.
+ */
+
+struct siprand_state {
+ unsigned long v0;
+ unsigned long v1;
+ unsigned long v2;
+ unsigned long v3;
+};
+
+static DEFINE_PER_CPU(struct siprand_state, net_rand_state) __latent_entropy;
+
+/*
+ * This is the core CPRNG function. As "pseudorandom", this is not used
+ * for truly valuable things, just intended to be a PITA to guess.
+ * For maximum speed, we do just two SipHash rounds per word. This is
+ * the same rate as 4 rounds per 64 bits that SipHash normally uses,
+ * so hopefully it's reasonably secure.
+ *
+ * There are two changes from the official SipHash finalization:
+ * - We omit some constants XORed with v2 in the SipHash spec as irrelevant;
+ * they are there only to make the output rounds distinct from the input
+ * rounds, and this application has no input rounds.
+ * - Rather than returning v0^v1^v2^v3, return v1+v3.
+ * If you look at the SipHash round, the last operation on v3 is
+ * "v3 ^= v0", so "v0 ^ v3" just undoes that, a waste of time.
+ * Likewise "v1 ^= v2". (The rotate of v2 makes a difference, but
+ * it still cancels out half of the bits in v2 for no benefit.)
+ * Second, since the last combining operation was xor, continue the
+ * pattern of alternating xor/add for a tiny bit of extra non-linearity.
+ */
+static inline u32 siprand_u32(struct siprand_state *s)
+{
+ unsigned long v0 = s->v0, v1 = s->v1, v2 = s->v2, v3 = s->v3;
+
+ PRND_SIPROUND(v0, v1, v2, v3);
+ PRND_SIPROUND(v0, v1, v2, v3);
+ s->v0 = v0; s->v1 = v1; s->v2 = v2; s->v3 = v3;
+ return v1 + v3;
+}
+
+
+/**
+ * prandom_u32 - pseudo random number generator
+ *
+ * A 32 bit pseudo-random number is generated using a fast
+ * algorithm suitable for simulation. This algorithm is NOT
+ * considered safe for cryptographic use.
+ */
+u32 prandom_u32(void)
+{
+ struct siprand_state *state = get_cpu_ptr(&net_rand_state);
+ u32 res = siprand_u32(state);
+
+ put_cpu_ptr(&net_rand_state);
+ return res;
+}
+EXPORT_SYMBOL(prandom_u32);
+
+/**
+ * prandom_bytes - get the requested number of pseudo-random bytes
+ * @buf: where to copy the pseudo-random bytes to
+ * @bytes: the requested number of bytes
+ */
+void prandom_bytes(void *buf, size_t bytes)
+{
+ struct siprand_state *state = get_cpu_ptr(&net_rand_state);
+ u8 *ptr = buf;
+
+ while (bytes >= sizeof(u32)) {
+ put_unaligned(siprand_u32(state), (u32 *)ptr);
+ ptr += sizeof(u32);
+ bytes -= sizeof(u32);
+ }
+
+ if (bytes > 0) {
+ u32 rem = siprand_u32(state);
+
+ do {
+ *ptr++ = (u8)rem;
+ rem >>= BITS_PER_BYTE;
+ } while (--bytes > 0);
+ }
+ put_cpu_ptr(&net_rand_state);
+}
+EXPORT_SYMBOL(prandom_bytes);
+
+/**
+ * prandom_seed - add entropy to pseudo random number generator
+ * @entropy: entropy value
+ *
+ * Add some additional seed material to the prandom pool.
+ * The "entropy" is actually our IP address (the only caller is
+ * the network code), not for unpredictability, but to ensure that
+ * different machines are initialized differently.
+ */
+void prandom_seed(u32 entropy)
+{
+ int i;
+
+ add_device_randomness(&entropy, sizeof(entropy));
+
+ for_each_possible_cpu(i) {
+ struct siprand_state *state = per_cpu_ptr(&net_rand_state, i);
+ unsigned long v0 = state->v0, v1 = state->v1;
+ unsigned long v2 = state->v2, v3 = state->v3;
+
+ do {
+ v3 ^= entropy;
+ PRND_SIPROUND(v0, v1, v2, v3);
+ PRND_SIPROUND(v0, v1, v2, v3);
+ v0 ^= entropy;
+ } while (unlikely(!v0 || !v1 || !v2 || !v3));
+
+ WRITE_ONCE(state->v0, v0);
+ WRITE_ONCE(state->v1, v1);
+ WRITE_ONCE(state->v2, v2);
+ WRITE_ONCE(state->v3, v3);
+ }
+}
+EXPORT_SYMBOL(prandom_seed);
+
+/*
+ * Generate some initially weak seeding values to allow
+ * the prandom_u32() engine to be started.
+ */
+static int __init prandom_init_early(void)
+{
+ int i;
+ unsigned long v0, v1, v2, v3;
+
+ if (!arch_get_random_long(&v0))
+ v0 = jiffies;
+ if (!arch_get_random_long(&v1))
+ v1 = random_get_entropy();
+ v2 = v0 ^ PRND_K0;
+ v3 = v1 ^ PRND_K1;
+
+ for_each_possible_cpu(i) {
+ struct siprand_state *state;
+
+ v3 ^= i;
+ PRND_SIPROUND(v0, v1, v2, v3);
+ PRND_SIPROUND(v0, v1, v2, v3);
+ v0 ^= i;
+
+ state = per_cpu_ptr(&net_rand_state, i);
+ state->v0 = v0; state->v1 = v1;
+ state->v2 = v2; state->v3 = v3;
+ }
+
+ return 0;
+}
+core_initcall(prandom_init_early);
+
+
+/* Stronger reseeding when available, and periodically thereafter. */
+static void prandom_reseed(struct timer_list *unused);
+
+static DEFINE_TIMER(seed_timer, prandom_reseed);
+
+static void prandom_reseed(struct timer_list *unused)
+{
+ unsigned long expires;
+ int i;
+
+ /*
+ * Reinitialize each CPU's PRNG with 128 bits of key.
+ * No locking on the CPUs, but then somewhat random results are,
+ * well, expected.
+ */
+ for_each_possible_cpu(i) {
+ struct siprand_state *state;
+ unsigned long v0 = get_random_long(), v2 = v0 ^ PRND_K0;
+ unsigned long v1 = get_random_long(), v3 = v1 ^ PRND_K1;
+#if BITS_PER_LONG == 32
+ int j;
+
+ /*
+ * On 32-bit machines, hash in two extra words to
+ * approximate 128-bit key length. Not that the hash
+ * has that much security, but this prevents a trivial
+ * 64-bit brute force.
+ */
+ for (j = 0; j < 2; j++) {
+ unsigned long m = get_random_long();
+
+ v3 ^= m;
+ PRND_SIPROUND(v0, v1, v2, v3);
+ PRND_SIPROUND(v0, v1, v2, v3);
+ v0 ^= m;
+ }
+#endif
+ /*
+ * Probably impossible in practice, but there is a
+ * theoretical risk that a race between this reseeding
+ * and the target CPU writing its state back could
+ * create the all-zero SipHash fixed point.
+ *
+ * To ensure that never happens, ensure the state
+ * we write contains no zero words.
+ */
+ state = per_cpu_ptr(&net_rand_state, i);
+ WRITE_ONCE(state->v0, v0 ? v0 : -1ul);
+ WRITE_ONCE(state->v1, v1 ? v1 : -1ul);
+ WRITE_ONCE(state->v2, v2 ? v2 : -1ul);
+ WRITE_ONCE(state->v3, v3 ? v3 : -1ul);
+ }
+
+ /* reseed every ~60 seconds, in [40 .. 80) interval with slack */
+ expires = round_jiffies(jiffies + 40 * HZ + prandom_u32_max(40 * HZ));
+ mod_timer(&seed_timer, expires);
+}
+
+/*
+ * The random ready callback can be called from almost any interrupt.
+ * To avoid worrying about whether it's safe to delay that interrupt
+ * long enough to seed all CPUs, just schedule an immediate timer event.
+ */
+static void prandom_timer_start(struct random_ready_callback *unused)
+{
+ mod_timer(&seed_timer, jiffies);
+}
+
+/*
+ * Start periodic full reseeding as soon as strong
+ * random numbers are available.
+ */
+static int __init prandom_init_late(void)
+{
+ static struct random_ready_callback random_ready = {
+ .func = prandom_timer_start
+ };
+ int ret = add_random_ready_callback(&random_ready);
+
+ if (ret == -EALREADY) {
+ prandom_timer_start(&random_ready);
+ ret = 0;
+ }
+ return ret;
+}
+late_initcall(prandom_init_late);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 60e7eca2f4be..3b859201f84c 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -506,7 +506,7 @@ struct scatterlist *sgl_alloc_order(unsigned long long length,
elem_len = min_t(u64, length, PAGE_SIZE << order);
page = alloc_pages(gfp, order);
if (!page) {
- sgl_free(sgl);
+ sgl_free_order(sgl, order);
return NULL;
}
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 3376a3291186..d0f1b7d0ce2e 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -78,7 +78,7 @@ static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
static int depot_index;
static int next_slab_inited;
static size_t depot_offset;
-static DEFINE_SPINLOCK(depot_lock);
+static DEFINE_RAW_SPINLOCK(depot_lock);
static bool init_stack_slab(void **prealloc)
{
@@ -266,7 +266,7 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
prealloc = page_address(page);
}
- spin_lock_irqsave(&depot_lock, flags);
+ raw_spin_lock_irqsave(&depot_lock, flags);
found = find_stack(*bucket, trace->entries, trace->nr_entries, hash);
if (!found) {
@@ -290,7 +290,7 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
WARN_ON(!init_stack_slab(&prealloc));
}
- spin_unlock_irqrestore(&depot_lock, flags);
+ raw_spin_unlock_irqrestore(&depot_lock, flags);
exit:
if (prealloc) {
/* Nobody used this memory, ok to free it. */
diff --git a/lib/string.c b/lib/string.c
index 72125fd5b4a6..f7f7770444bf 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -158,11 +158,9 @@ EXPORT_SYMBOL(strlcpy);
* @src: Where to copy the string from
* @count: Size of destination buffer
*
- * Copy the string, or as much of it as fits, into the dest buffer.
- * The routine returns the number of characters copied (not including
- * the trailing NUL) or -E2BIG if the destination buffer wasn't big enough.
- * The behavior is undefined if the string buffers overlap.
- * The destination buffer is always NUL terminated, unless it's zero-sized.
+ * Copy the string, or as much of it as fits, into the dest buffer. The
+ * behavior is undefined if the string buffers overlap. The destination
+ * buffer is always NUL terminated, unless it's zero-sized.
*
* Preferred to strlcpy() since the API doesn't require reading memory
* from the src string beyond the specified "count" bytes, and since
@@ -172,8 +170,10 @@ EXPORT_SYMBOL(strlcpy);
*
* Preferred to strncpy() since it always returns a valid string, and
* doesn't unnecessarily force the tail of the destination buffer to be
- * zeroed. If the zeroing is desired, it's likely cleaner to use strscpy()
- * with an overflow test, then just memset() the tail of the dest buffer.
+ * zeroed. If zeroing is desired please use strscpy_pad().
+ *
+ * Return: The number of characters copied (not including the trailing
+ * %NUL) or -E2BIG if the destination buffer wasn't big enough.
*/
ssize_t strscpy(char *dest, const char *src, size_t count)
{
@@ -236,6 +236,63 @@ ssize_t strscpy(char *dest, const char *src, size_t count)
EXPORT_SYMBOL(strscpy);
#endif
+/**
+ * stpcpy - copy a string from src to dest returning a pointer to the new end
+ * of dest, including src's %NUL-terminator. May overrun dest.
+ * @dest: pointer to end of string being copied into. Must be large enough
+ * to receive copy.
+ * @src: pointer to the beginning of string being copied from. Must not overlap
+ * dest.
+ *
+ * stpcpy differs from strcpy in a key way: the return value is a pointer
+ * to the new %NUL-terminating character in @dest. (For strcpy, the return
+ * value is a pointer to the start of @dest). This interface is considered
+ * unsafe as it doesn't perform bounds checking of the inputs. As such it's
+ * not recommended for usage. Instead, its definition is provided in case
+ * the compiler lowers other libcalls to stpcpy.
+ */
+char *stpcpy(char *__restrict__ dest, const char *__restrict__ src);
+char *stpcpy(char *__restrict__ dest, const char *__restrict__ src)
+{
+ while ((*dest++ = *src++) != '\0')
+ /* nothing */;
+ return --dest;
+}
+EXPORT_SYMBOL(stpcpy);
+
+/**
+ * strscpy_pad() - Copy a C-string into a sized buffer
+ * @dest: Where to copy the string to
+ * @src: Where to copy the string from
+ * @count: Size of destination buffer
+ *
+ * Copy the string, or as much of it as fits, into the dest buffer. The
+ * behavior is undefined if the string buffers overlap. The destination
+ * buffer is always %NUL terminated, unless it's zero-sized.
+ *
+ * If the source string is shorter than the destination buffer, zeros
+ * the tail of the destination buffer.
+ *
+ * For full explanation of why you may want to consider using the
+ * 'strscpy' functions please see the function docstring for strscpy().
+ *
+ * Return: The number of characters copied (not including the trailing
+ * %NUL) or -E2BIG if the destination buffer wasn't big enough.
+ */
+ssize_t strscpy_pad(char *dest, const char *src, size_t count)
+{
+ ssize_t written;
+
+ written = strscpy(dest, src, count);
+ if (written < 0 || written == count - 1)
+ return written;
+
+ memset(dest + written + 1, 0, count - written - 1);
+
+ return written;
+}
+EXPORT_SYMBOL(strscpy_pad);
+
#ifndef __HAVE_ARCH_STRCAT
/**
* strcat - Append one %NUL-terminated string to another
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index e304b54c9c7d..fc5b1e2d997d 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -29,13 +29,6 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src,
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
unsigned long res = 0;
- /*
- * Truncate 'max' to the user-specified limit, so that
- * we only have one limit we need to check in the loop
- */
- if (max > count)
- max = count;
-
if (IS_UNALIGNED(src, dst))
goto byte_at_a_time;
@@ -113,12 +106,20 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
unsigned long max = max_addr - src_addr;
long retval;
+ /*
+ * Truncate 'max' to the user-specified limit, so that
+ * we only have one limit we need to check in the loop
+ */
+ if (max > count)
+ max = count;
+
kasan_check_write(dst, count);
check_object_size(dst, count, false);
- user_access_begin();
- retval = do_strncpy_from_user(dst, src, count, max);
- user_access_end();
- return retval;
+ if (user_access_begin(VERIFY_READ, src, max)) {
+ retval = do_strncpy_from_user(dst, src, count, max);
+ user_access_end();
+ return retval;
+ }
}
return -EFAULT;
}
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index 184f80f7bacf..0bf7c06ebdad 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -32,13 +32,6 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
unsigned long c;
/*
- * Truncate 'max' to the user-specified limit, so that
- * we only have one limit we need to check in the loop
- */
- if (max > count)
- max = count;
-
- /*
* Do everything aligned. But that means that we
* need to also expand the maximum..
*/
@@ -114,10 +107,18 @@ long strnlen_user(const char __user *str, long count)
unsigned long max = max_addr - src_addr;
long retval;
- user_access_begin();
- retval = do_strnlen_user(str, count, max);
- user_access_end();
- return retval;
+ /*
+ * Truncate 'max' to the user-specified limit, so that
+ * we only have one limit we need to check in the loop
+ */
+ if (max > count)
+ max = count;
+
+ if (user_access_begin(VERIFY_READ, str, max)) {
+ retval = do_strnlen_user(str, count, max);
+ user_access_end();
+ return retval;
+ }
}
return 0;
}
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index 9cf77628fc91..87a0cc750ea2 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -745,7 +745,7 @@ static int trigger_config_run_type(struct kmod_test_device *test_dev,
break;
case TEST_KMOD_FS_TYPE:
kfree_const(config->test_fs);
- config->test_driver = NULL;
+ config->test_fs = NULL;
copied = config_copy_test_fs(config, test_str,
strlen(test_str));
break;
diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c
index 2c13ecc5bb2c..ed1f3df27260 100644
--- a/lib/zlib_inflate/inffast.c
+++ b/lib/zlib_inflate/inffast.c
@@ -10,17 +10,6 @@
#ifndef ASMINF
-/* Allow machine dependent optimization for post-increment or pre-increment.
- Based on testing to date,
- Pre-increment preferred for:
- - PowerPC G3 (Adler)
- - MIPS R5000 (Randers-Pehrson)
- Post-increment preferred for:
- - none
- No measurable difference:
- - Pentium III (Anderson)
- - M68060 (Nikl)
- */
union uu {
unsigned short us;
unsigned char b[2];
@@ -38,16 +27,6 @@ get_unaligned16(const unsigned short *p)
return mm.us;
}
-#ifdef POSTINC
-# define OFF 0
-# define PUP(a) *(a)++
-# define UP_UNALIGNED(a) get_unaligned16((a)++)
-#else
-# define OFF 1
-# define PUP(a) *++(a)
-# define UP_UNALIGNED(a) get_unaligned16(++(a))
-#endif
-
/*
Decode literal, length, and distance codes and write out the resulting
literal and match bytes until either not enough input or output is
@@ -115,9 +94,9 @@ void inflate_fast(z_streamp strm, unsigned start)
/* copy state to local variables */
state = (struct inflate_state *)strm->state;
- in = strm->next_in - OFF;
+ in = strm->next_in;
last = in + (strm->avail_in - 5);
- out = strm->next_out - OFF;
+ out = strm->next_out;
beg = out - (start - strm->avail_out);
end = out + (strm->avail_out - 257);
#ifdef INFLATE_STRICT
@@ -138,9 +117,9 @@ void inflate_fast(z_streamp strm, unsigned start)
input data or output space */
do {
if (bits < 15) {
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
}
this = lcode[hold & lmask];
@@ -150,14 +129,14 @@ void inflate_fast(z_streamp strm, unsigned start)
bits -= op;
op = (unsigned)(this.op);
if (op == 0) { /* literal */
- PUP(out) = (unsigned char)(this.val);
+ *out++ = (unsigned char)(this.val);
}
else if (op & 16) { /* length base */
len = (unsigned)(this.val);
op &= 15; /* number of extra bits */
if (op) {
if (bits < op) {
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
}
len += (unsigned)hold & ((1U << op) - 1);
@@ -165,9 +144,9 @@ void inflate_fast(z_streamp strm, unsigned start)
bits -= op;
}
if (bits < 15) {
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
}
this = dcode[hold & dmask];
@@ -180,10 +159,10 @@ void inflate_fast(z_streamp strm, unsigned start)
dist = (unsigned)(this.val);
op &= 15; /* number of extra bits */
if (bits < op) {
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
if (bits < op) {
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
}
}
@@ -205,13 +184,13 @@ void inflate_fast(z_streamp strm, unsigned start)
state->mode = BAD;
break;
}
- from = window - OFF;
+ from = window;
if (write == 0) { /* very common case */
from += wsize - op;
if (op < len) { /* some from window */
len -= op;
do {
- PUP(out) = PUP(from);
+ *out++ = *from++;
} while (--op);
from = out - dist; /* rest from output */
}
@@ -222,14 +201,14 @@ void inflate_fast(z_streamp strm, unsigned start)
if (op < len) { /* some from end of window */
len -= op;
do {
- PUP(out) = PUP(from);
+ *out++ = *from++;
} while (--op);
- from = window - OFF;
+ from = window;
if (write < len) { /* some from start of window */
op = write;
len -= op;
do {
- PUP(out) = PUP(from);
+ *out++ = *from++;
} while (--op);
from = out - dist; /* rest from output */
}
@@ -240,21 +219,21 @@ void inflate_fast(z_streamp strm, unsigned start)
if (op < len) { /* some from window */
len -= op;
do {
- PUP(out) = PUP(from);
+ *out++ = *from++;
} while (--op);
from = out - dist; /* rest from output */
}
}
while (len > 2) {
- PUP(out) = PUP(from);
- PUP(out) = PUP(from);
- PUP(out) = PUP(from);
+ *out++ = *from++;
+ *out++ = *from++;
+ *out++ = *from++;
len -= 3;
}
if (len) {
- PUP(out) = PUP(from);
+ *out++ = *from++;
if (len > 1)
- PUP(out) = PUP(from);
+ *out++ = *from++;
}
}
else {
@@ -264,29 +243,29 @@ void inflate_fast(z_streamp strm, unsigned start)
from = out - dist; /* copy direct from output */
/* minimum length is three */
/* Align out addr */
- if (!((long)(out - 1 + OFF) & 1)) {
- PUP(out) = PUP(from);
+ if (!((long)(out - 1) & 1)) {
+ *out++ = *from++;
len--;
}
- sout = (unsigned short *)(out - OFF);
+ sout = (unsigned short *)(out);
if (dist > 2) {
unsigned short *sfrom;
- sfrom = (unsigned short *)(from - OFF);
+ sfrom = (unsigned short *)(from);
loops = len >> 1;
do
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- PUP(sout) = PUP(sfrom);
+ *sout++ = *sfrom++;
#else
- PUP(sout) = UP_UNALIGNED(sfrom);
+ *sout++ = get_unaligned16(sfrom++);
#endif
while (--loops);
- out = (unsigned char *)sout + OFF;
- from = (unsigned char *)sfrom + OFF;
+ out = (unsigned char *)sout;
+ from = (unsigned char *)sfrom;
} else { /* dist == 1 or dist == 2 */
unsigned short pat16;
- pat16 = *(sout-1+OFF);
+ pat16 = *(sout-1);
if (dist == 1) {
union uu mm;
/* copy one char pattern to both bytes */
@@ -296,12 +275,12 @@ void inflate_fast(z_streamp strm, unsigned start)
}
loops = len >> 1;
do
- PUP(sout) = pat16;
+ *sout++ = pat16;
while (--loops);
- out = (unsigned char *)sout + OFF;
+ out = (unsigned char *)sout;
}
if (len & 1)
- PUP(out) = PUP(from);
+ *out++ = *from++;
}
}
else if ((op & 64) == 0) { /* 2nd level distance code */
@@ -336,8 +315,8 @@ void inflate_fast(z_streamp strm, unsigned start)
hold &= (1U << bits) - 1;
/* update state and return */
- strm->next_in = in + OFF;
- strm->next_out = out + OFF;
+ strm->next_in = in;
+ strm->next_out = out;
strm->avail_in = (unsigned)(in < last ? 5 + (last - in) : 5 - (in - last));
strm->avail_out = (unsigned)(out < end ?
257 + (end - out) : 257 - (out - end));
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 72e6d0c55cfa..0a970be24a28 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -19,6 +19,7 @@ struct backing_dev_info noop_backing_dev_info = {
EXPORT_SYMBOL_GPL(noop_backing_dev_info);
static struct class *bdi_class;
+const char *bdi_unknown_name = "(unknown)";
/*
* bdi_lock protects updates to bdi_list. bdi_list has RCU reader side
diff --git a/mm/filemap.c b/mm/filemap.c
index 45f1c6d73b5b..f2e777003b90 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2889,6 +2889,14 @@ filler:
unlock_page(page);
goto out;
}
+
+ /*
+ * A previous I/O error may have been due to temporary
+ * failures.
+ * Clear page error before actual read, PG_error will be
+ * set again if read page fails.
+ */
+ ClearPageError(page);
goto filler;
out:
diff --git a/mm/gup.c b/mm/gup.c
index f3088d25bd92..44569927f0ea 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -61,13 +61,22 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
}
/*
- * FOLL_FORCE can write to even unwritable pte's, but only
- * after we've gone through a COW cycle and they are dirty.
+ * FOLL_FORCE or a forced COW break can write even to unwritable pte's,
+ * but only after we've gone through a COW cycle and they are dirty.
*/
static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
{
- return pte_write(pte) ||
- ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
+ return pte_write(pte) || ((flags & FOLL_COW) && pte_dirty(pte));
+}
+
+/*
+ * A (separate) COW fault might break the page the other way and
+ * get_user_pages() would return the page from what is now the wrong
+ * VM. So we need to force a COW break at GUP time even for reads.
+ */
+static inline bool should_force_cow_break(struct vm_area_struct *vma, unsigned int flags)
+{
+ return is_cow_mapping(vma->vm_flags) && (flags & FOLL_GET);
}
static struct page *follow_page_pte(struct vm_area_struct *vma,
@@ -710,12 +719,18 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
if (!vma || check_vma_flags(vma, gup_flags))
return i ? : -EFAULT;
if (is_vm_hugetlb_page(vma)) {
+ if (should_force_cow_break(vma, foll_flags))
+ foll_flags |= FOLL_WRITE;
i = follow_hugetlb_page(mm, vma, pages, vmas,
&start, &nr_pages, i,
- gup_flags, nonblocking);
+ foll_flags, nonblocking);
continue;
}
}
+
+ if (should_force_cow_break(vma, foll_flags))
+ foll_flags |= FOLL_WRITE;
+
retry:
/*
* If we have a pending SIGKILL, don't keep faulting pages and
@@ -1804,6 +1819,10 @@ bool gup_fast_permitted(unsigned long start, int nr_pages, int write)
* the regular GUP.
* Note a difference with get_user_pages_fast: this always returns the
* number of pages pinned, 0 if no pages were pinned.
+ *
+ * Careful, careful! COW breaking can go either way, so a non-write
+ * access can get ambiguous page results. If you call this function without
+ * 'write' set, you'd better be sure that you're ok with that ambiguity.
*/
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
@@ -1831,6 +1850,12 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
*
* We do not adopt an rcu_read_lock(.) here as we also want to
* block IPIs that come from THPs splitting.
+ *
+ * NOTE! We allow read-only gup_fast() here, but you'd better be
+ * careful about possible COW pages. You'll get _a_ COW page, but
+ * not necessarily the one you intended to get depending on what
+ * COW event happens after this. COW may break the page copy in a
+ * random direction.
*/
if (gup_fast_permitted(start, nr_pages, write)) {
@@ -1876,9 +1901,16 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
(void __user *)start, len)))
return -EFAULT;
+ /*
+ * The FAST_GUP case requires FOLL_WRITE even for pure reads,
+ * because get_user_pages() may need to cause an early COW in
+ * order to avoid confusing the normal COW routines. So only
+ * targets that are already writable are safe to do by just
+ * looking at the page tables.
+ */
if (gup_fast_permitted(start, nr_pages, write)) {
local_irq_disable();
- gup_pgd_range(addr, end, write, pages, &nr);
+ gup_pgd_range(addr, end, 1, pages, &nr);
local_irq_enable();
ret = nr;
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 280b0e71b783..7c374c0fcf0c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -694,7 +694,6 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
transparent_hugepage_use_zero_page()) {
pgtable_t pgtable;
struct page *zero_page;
- bool set;
vm_fault_t ret;
pgtable = pte_alloc_one(vma->vm_mm, haddr);
if (unlikely(!pgtable))
@@ -707,25 +706,25 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
}
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
ret = 0;
- set = false;
if (pmd_none(*vmf->pmd)) {
ret = check_stable_address_space(vma->vm_mm);
if (ret) {
spin_unlock(vmf->ptl);
+ pte_free(vma->vm_mm, pgtable);
} else if (userfaultfd_missing(vma)) {
spin_unlock(vmf->ptl);
+ pte_free(vma->vm_mm, pgtable);
ret = handle_userfault(vmf, VM_UFFD_MISSING);
VM_BUG_ON(ret & VM_FAULT_FALLBACK);
} else {
set_huge_zero_page(pgtable, vma->vm_mm, vma,
haddr, vmf->pmd, zero_page);
spin_unlock(vmf->ptl);
- set = true;
}
- } else
+ } else {
spin_unlock(vmf->ptl);
- if (!set)
pte_free(vma->vm_mm, pgtable);
+ }
return ret;
}
gfp = alloc_hugepage_direct_gfpmask(vma);
@@ -1433,13 +1432,12 @@ out_unlock:
}
/*
- * FOLL_FORCE can write to even unwritable pmd's, but only
- * after we've gone through a COW cycle and they are dirty.
+ * FOLL_FORCE or a forced COW break can write even to unwritable pmd's,
+ * but only after we've gone through a COW cycle and they are dirty.
*/
static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
{
- return pmd_write(pmd) ||
- ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
+ return pmd_write(pmd) || ((flags & FOLL_COW) && pmd_dirty(pmd));
}
struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
@@ -2145,7 +2143,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
put_page(page);
add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
return;
- } else if (is_huge_zero_pmd(*pmd)) {
+ } else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
/*
* FIXME: Do we want to invalidate secondary mmu by calling
* mmu_notifier_invalidate_range() see comments below inside
@@ -2233,27 +2231,33 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
pte = pte_offset_map(&_pmd, addr);
BUG_ON(!pte_none(*pte));
set_pte_at(mm, addr, pte, entry);
- atomic_inc(&page[i]._mapcount);
- pte_unmap(pte);
- }
-
- /*
- * Set PG_double_map before dropping compound_mapcount to avoid
- * false-negative page_mapped().
- */
- if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) {
- for (i = 0; i < HPAGE_PMD_NR; i++)
+ if (!pmd_migration)
atomic_inc(&page[i]._mapcount);
+ pte_unmap(pte);
}
- if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
- /* Last compound_mapcount is gone. */
- __dec_node_page_state(page, NR_ANON_THPS);
- if (TestClearPageDoubleMap(page)) {
- /* No need in mapcount reference anymore */
+ if (!pmd_migration) {
+ /*
+ * Set PG_double_map before dropping compound_mapcount to avoid
+ * false-negative page_mapped().
+ */
+ if (compound_mapcount(page) > 1 &&
+ !TestSetPageDoubleMap(page)) {
for (i = 0; i < HPAGE_PMD_NR; i++)
- atomic_dec(&page[i]._mapcount);
+ atomic_inc(&page[i]._mapcount);
}
+
+ lock_page_memcg(page);
+ if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
+ /* Last compound_mapcount is gone. */
+ __dec_lruvec_page_state(page, NR_ANON_THPS);
+ if (TestClearPageDoubleMap(page)) {
+ /* No need in mapcount reference anymore */
+ for (i = 0; i < HPAGE_PMD_NR; i++)
+ atomic_dec(&page[i]._mapcount);
+ }
+ }
+ unlock_page_memcg(page);
}
smp_wmb(); /* make pte visible before pmd */
@@ -2273,6 +2277,8 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
spinlock_t *ptl;
struct mm_struct *mm = vma->vm_mm;
unsigned long haddr = address & HPAGE_PMD_MASK;
+ bool do_unlock_page = false;
+ pmd_t _pmd;
mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
ptl = pmd_lock(mm, pmd);
@@ -2282,11 +2288,41 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
* pmd against. Otherwise we can end up replacing wrong page.
*/
VM_BUG_ON(freeze && !page);
- if (page && page != pmd_page(*pmd))
- goto out;
+ if (page) {
+ VM_WARN_ON_ONCE(!PageLocked(page));
+ if (page != pmd_page(*pmd))
+ goto out;
+ }
+repeat:
if (pmd_trans_huge(*pmd)) {
- page = pmd_page(*pmd);
+ if (!page) {
+ page = pmd_page(*pmd);
+ /*
+ * An anonymous page must be locked, to ensure that a
+ * concurrent reuse_swap_page() sees stable mapcount;
+ * but reuse_swap_page() is not used on shmem or file,
+ * and page lock must not be taken when zap_pmd_range()
+ * calls __split_huge_pmd() while i_mmap_lock is held.
+ */
+ if (PageAnon(page)) {
+ if (unlikely(!trylock_page(page))) {
+ get_page(page);
+ _pmd = *pmd;
+ spin_unlock(ptl);
+ lock_page(page);
+ spin_lock(ptl);
+ if (unlikely(!pmd_same(*pmd, _pmd))) {
+ unlock_page(page);
+ put_page(page);
+ page = NULL;
+ goto repeat;
+ }
+ put_page(page);
+ }
+ do_unlock_page = true;
+ }
+ }
if (PageMlocked(page))
clear_page_mlock(page);
} else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd)))
@@ -2294,6 +2330,8 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
__split_huge_pmd_locked(vma, pmd, haddr, freeze);
out:
spin_unlock(ptl);
+ if (do_unlock_page)
+ unlock_page(page);
/*
* No need to double call mmu_notifier->invalidate_range() callback.
* They are 3 cases to consider inside __split_huge_pmd_locked():
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6f4ce9547658..2f769a661568 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -68,6 +68,21 @@ DEFINE_SPINLOCK(hugetlb_lock);
static int num_fault_mutexes;
struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
+static inline bool PageHugeFreed(struct page *head)
+{
+ return page_private(head + 4) == -1UL;
+}
+
+static inline void SetPageHugeFreed(struct page *head)
+{
+ set_page_private(head + 4, -1UL);
+}
+
+static inline void ClearPageHugeFreed(struct page *head)
+{
+ set_page_private(head + 4, 0);
+}
+
/* Forward declaration */
static int hugetlb_acct_memory(struct hstate *h, long delta);
@@ -573,13 +588,20 @@ void hugetlb_fix_reserve_counts(struct inode *inode)
{
struct hugepage_subpool *spool = subpool_inode(inode);
long rsv_adjust;
+ bool reserved = false;
rsv_adjust = hugepage_subpool_get_pages(spool, 1);
- if (rsv_adjust) {
+ if (rsv_adjust > 0) {
struct hstate *h = hstate_inode(inode);
- hugetlb_acct_memory(h, 1);
+ if (!hugetlb_acct_memory(h, 1))
+ reserved = true;
+ } else if (!rsv_adjust) {
+ reserved = true;
}
+
+ if (!reserved)
+ pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
}
/*
@@ -858,6 +880,7 @@ static void enqueue_huge_page(struct hstate *h, struct page *page)
list_move(&page->lru, &h->hugepage_freelists[nid]);
h->free_huge_pages++;
h->free_huge_pages_node[nid]++;
+ SetPageHugeFreed(page);
}
static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
@@ -875,6 +898,7 @@ static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
return NULL;
list_move(&page->lru, &h->hugepage_activelist);
set_page_refcounted(page);
+ ClearPageHugeFreed(page);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
return page;
@@ -1154,14 +1178,16 @@ static inline void destroy_compound_gigantic_page(struct page *page,
static void update_and_free_page(struct hstate *h, struct page *page)
{
int i;
+ struct page *subpage = page;
if (hstate_is_gigantic(h) && !gigantic_page_supported())
return;
h->nr_huge_pages--;
h->nr_huge_pages_node[page_to_nid(page)]--;
- for (i = 0; i < pages_per_huge_page(h); i++) {
- page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
+ for (i = 0; i < pages_per_huge_page(h);
+ i++, subpage = mem_map_next(subpage, page, i)) {
+ subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
1 << PG_referenced | 1 << PG_dirty |
1 << PG_active | 1 << PG_private |
1 << PG_writeback);
@@ -1196,12 +1222,11 @@ struct hstate *size_to_hstate(unsigned long size)
*/
bool page_huge_active(struct page *page)
{
- VM_BUG_ON_PAGE(!PageHuge(page), page);
- return PageHead(page) && PagePrivate(&page[1]);
+ return PageHeadHuge(page) && PagePrivate(&page[1]);
}
/* never called for tail page */
-static void set_page_huge_active(struct page *page)
+void set_page_huge_active(struct page *page)
{
VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
SetPagePrivate(&page[1]);
@@ -1305,6 +1330,7 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
set_hugetlb_cgroup(page, NULL);
h->nr_huge_pages++;
h->nr_huge_pages_node[nid]++;
+ ClearPageHugeFreed(page);
spin_unlock(&hugetlb_lock);
}
@@ -1500,6 +1526,7 @@ int dissolve_free_huge_page(struct page *page)
{
int rc = -EBUSY;
+retry:
/* Not to disrupt normal path by vainly holding hugetlb_lock */
if (!PageHuge(page))
return 0;
@@ -1516,6 +1543,26 @@ int dissolve_free_huge_page(struct page *page)
int nid = page_to_nid(head);
if (h->free_huge_pages - h->resv_huge_pages == 0)
goto out;
+
+ /*
+ * We should make sure that the page is already on the free list
+ * when it is dissolved.
+ */
+ if (unlikely(!PageHugeFreed(head))) {
+ spin_unlock(&hugetlb_lock);
+ cond_resched();
+
+ /*
+ * Theoretically, we should return -EBUSY when we
+ * encounter this race. In fact, we have a chance
+ * to successfully dissolve the page if we do a
+ * retry. Because the race window is quite small.
+ * If we seize this opportunity, it is an optimization
+ * for increasing the success rate of dissolving page.
+ */
+ goto retry;
+ }
+
/*
* Move PageHWPoison flag from head page to the raw error page,
* which makes any subpages rather than the error page reusable.
@@ -2610,8 +2657,10 @@ static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
return -ENOMEM;
retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
- if (retval)
+ if (retval) {
kobject_put(hstate_kobjs[hi]);
+ hstate_kobjs[hi] = NULL;
+ }
return retval;
}
@@ -2918,6 +2967,22 @@ static unsigned int cpuset_mems_nr(unsigned int *array)
}
#ifdef CONFIG_SYSCTL
+static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
+ void *buffer, size_t *length,
+ loff_t *ppos, unsigned long *out)
+{
+ struct ctl_table dup_table;
+
+ /*
+ * In order to avoid races with __do_proc_doulongvec_minmax(), we
+ * can duplicate the @table and alter the duplicate of it.
+ */
+ dup_table = *table;
+ dup_table.data = out;
+
+ return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
+}
+
static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
@@ -2929,9 +2994,8 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
if (!hugepages_supported())
return -EOPNOTSUPP;
- table->data = &tmp;
- table->maxlen = sizeof(unsigned long);
- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
+ ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
+ &tmp);
if (ret)
goto out;
@@ -2975,9 +3039,8 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
if (write && hstate_is_gigantic(h))
return -EINVAL;
- table->data = &tmp;
- table->maxlen = sizeof(unsigned long);
- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
+ ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
+ &tmp);
if (ret)
goto out;
@@ -3799,7 +3862,7 @@ retry:
* handling userfault. Reacquire after handling
* fault to make calling code simpler.
*/
- hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr);
+ hash = hugetlb_fault_mutex_hash(h, mapping, idx);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
ret = handle_userfault(&vmf, VM_UFFD_MISSING);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
@@ -3838,7 +3901,7 @@ retry:
* So we need to block hugepage fault by PG_hwpoison bit check.
*/
if (unlikely(PageHWPoison(page))) {
- ret = VM_FAULT_HWPOISON |
+ ret = VM_FAULT_HWPOISON_LARGE |
VM_FAULT_SET_HINDEX(hstate_index(h));
goto backout_unlocked;
}
@@ -3908,7 +3971,7 @@ backout_unlocked:
#ifdef CONFIG_SMP
u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
- pgoff_t idx, unsigned long address)
+ pgoff_t idx)
{
unsigned long key[2];
u32 hash;
@@ -3916,7 +3979,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
key[0] = (unsigned long) mapping;
key[1] = idx;
- hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
+ hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
return hash & (num_fault_mutexes - 1);
}
@@ -3926,7 +3989,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
* return 0 and avoid the hashing overhead.
*/
u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
- pgoff_t idx, unsigned long address)
+ pgoff_t idx)
{
return 0;
}
@@ -3970,7 +4033,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* get spurious allocation failures if two CPUs race to instantiate
* the same page in the page cache.
*/
- hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr);
+ hash = hugetlb_fault_mutex_hash(h, mapping, idx);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
entry = huge_ptep_get(ptep);
@@ -4650,25 +4713,23 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end)
{
- unsigned long check_addr = *start;
+ unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
+ v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
- if (!(vma->vm_flags & VM_MAYSHARE))
+ /*
+ * vma need span at least one aligned PUD size and the start,end range
+ * must at least partialy within it.
+ */
+ if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
+ (*end <= v_start) || (*start >= v_end))
return;
- for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
- unsigned long a_start = check_addr & PUD_MASK;
- unsigned long a_end = a_start + PUD_SIZE;
+ /* Extend the range to be PUD aligned for a worst case scenario */
+ if (*start > v_start)
+ *start = ALIGN_DOWN(*start, PUD_SIZE);
- /*
- * If sharing is possible, adjust start/end if necessary.
- */
- if (range_in_vma(vma, a_start, a_end)) {
- if (a_start < *start)
- *start = a_start;
- if (a_end > *end)
- *end = a_end;
- }
- }
+ if (*end < v_end)
+ *end = ALIGN(*end, PUD_SIZE);
}
/*
@@ -4820,8 +4881,8 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
{
pgd_t *pgd;
p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
+ pud_t *pud, pud_entry;
+ pmd_t *pmd, pmd_entry;
pgd = pgd_offset(mm, addr);
if (!pgd_present(*pgd))
@@ -4831,17 +4892,19 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
return NULL;
pud = pud_offset(p4d, addr);
- if (sz != PUD_SIZE && pud_none(*pud))
+ pud_entry = READ_ONCE(*pud);
+ if (sz != PUD_SIZE && pud_none(pud_entry))
return NULL;
/* hugepage or swap? */
- if (pud_huge(*pud) || !pud_present(*pud))
+ if (pud_huge(pud_entry) || !pud_present(pud_entry))
return (pte_t *)pud;
pmd = pmd_offset(pud, addr);
- if (sz != PMD_SIZE && pmd_none(*pmd))
+ pmd_entry = READ_ONCE(*pmd);
+ if (sz != PMD_SIZE && pmd_none(pmd_entry))
return NULL;
/* hugepage or swap? */
- if (pmd_huge(*pmd) || !pmd_present(*pmd))
+ if (pmd_huge(pmd_entry) || !pmd_present(pmd_entry))
return (pte_t *)pmd;
return NULL;
@@ -4928,9 +4991,9 @@ bool isolate_huge_page(struct page *page, struct list_head *list)
{
bool ret = true;
- VM_BUG_ON_PAGE(!PageHead(page), page);
spin_lock(&hugetlb_lock);
- if (!page_huge_active(page) || !get_page_unless_zero(page)) {
+ if (!PageHeadHuge(page) || !page_huge_active(page) ||
+ !get_page_unless_zero(page)) {
ret = false;
goto unlock;
}
diff --git a/mm/kasan/kasan_init.c b/mm/kasan/kasan_init.c
index 7a2a2f13f86f..7a731c74be7d 100644
--- a/mm/kasan/kasan_init.c
+++ b/mm/kasan/kasan_init.c
@@ -372,9 +372,10 @@ static void kasan_remove_pmd_table(pmd_t *pmd, unsigned long addr,
if (kasan_pte_table(*pmd)) {
if (IS_ALIGNED(addr, PMD_SIZE) &&
- IS_ALIGNED(next, PMD_SIZE))
+ IS_ALIGNED(next, PMD_SIZE)) {
pmd_clear(pmd);
- continue;
+ continue;
+ }
}
pte = pte_offset_kernel(pmd, addr);
kasan_remove_pte_table(pte, addr, next);
@@ -397,9 +398,10 @@ static void kasan_remove_pud_table(pud_t *pud, unsigned long addr,
if (kasan_pmd_table(*pud)) {
if (IS_ALIGNED(addr, PUD_SIZE) &&
- IS_ALIGNED(next, PUD_SIZE))
+ IS_ALIGNED(next, PUD_SIZE)) {
pud_clear(pud);
- continue;
+ continue;
+ }
}
pmd = pmd_offset(pud, addr);
pmd_base = pmd_offset(pud, 0);
@@ -423,9 +425,10 @@ static void kasan_remove_p4d_table(p4d_t *p4d, unsigned long addr,
if (kasan_pud_table(*p4d)) {
if (IS_ALIGNED(addr, P4D_SIZE) &&
- IS_ALIGNED(next, P4D_SIZE))
+ IS_ALIGNED(next, P4D_SIZE)) {
p4d_clear(p4d);
- continue;
+ continue;
+ }
}
pud = pud_offset(p4d, addr);
kasan_remove_pud_table(pud, addr, next);
@@ -457,9 +460,10 @@ void kasan_remove_zero_shadow(void *start, unsigned long size)
if (kasan_p4d_table(*pgd)) {
if (IS_ALIGNED(addr, PGDIR_SIZE) &&
- IS_ALIGNED(next, PGDIR_SIZE))
+ IS_ALIGNED(next, PGDIR_SIZE)) {
pgd_clear(pgd);
- continue;
+ continue;
+ }
}
p4d = p4d_offset(pgd, addr);
@@ -483,7 +487,6 @@ int kasan_add_zero_shadow(void *start, unsigned long size)
ret = kasan_populate_zero_shadow(shadow_start, shadow_end);
if (ret)
- kasan_remove_zero_shadow(shadow_start,
- size >> KASAN_SHADOW_SCALE_SHIFT);
+ kasan_remove_zero_shadow(start, size);
return ret;
}
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index ecefdba4b0dd..5dd14ef2e1de 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -53,6 +53,9 @@ enum scan_result {
#define CREATE_TRACE_POINTS
#include <trace/events/huge_memory.h>
+static struct task_struct *khugepaged_thread __read_mostly;
+static DEFINE_MUTEX(khugepaged_mutex);
+
/* default scan 8*512 pte (or vmas) every 30 second */
static unsigned int khugepaged_pages_to_scan __read_mostly;
static unsigned int khugepaged_pages_collapsed;
@@ -394,7 +397,7 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm,
static inline int khugepaged_test_exit(struct mm_struct *mm)
{
- return atomic_read(&mm->mm_users) == 0;
+ return atomic_read(&mm->mm_users) == 0 || !mmget_still_valid(mm);
}
static bool hugepage_vma_check(struct vm_area_struct *vma,
@@ -427,7 +430,7 @@ int __khugepaged_enter(struct mm_struct *mm)
return -ENOMEM;
/* __khugepaged_exit() must not run from under us */
- VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
+ VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
free_mm_slot(mm_slot);
return 0;
@@ -613,17 +616,17 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
mmu_notifier_test_young(vma->vm_mm, address))
referenced++;
}
- if (likely(writable)) {
- if (likely(referenced)) {
- result = SCAN_SUCCEED;
- trace_mm_collapse_huge_page_isolate(page, none_or_zero,
- referenced, writable, result);
- return 1;
- }
- } else {
+
+ if (unlikely(!writable)) {
result = SCAN_PAGE_RO;
+ } else if (unlikely(!referenced)) {
+ result = SCAN_LACK_REFERENCED_PAGE;
+ } else {
+ result = SCAN_SUCCEED;
+ trace_mm_collapse_huge_page_isolate(page, none_or_zero,
+ referenced, writable, result);
+ return 1;
}
-
out:
release_pte_pages(pte, _pte);
trace_mm_collapse_huge_page_isolate(page, none_or_zero,
@@ -820,6 +823,18 @@ static struct page *khugepaged_alloc_hugepage(bool *wait)
static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
{
+ /*
+ * If the hpage allocated earlier was briefly exposed in page cache
+ * before collapse_file() failed, it is possible that racing lookups
+ * have not yet completed, and would then be unpleasantly surprised by
+ * finding the hpage reused for the same mapping at a different offset.
+ * Just release the previous allocation if there is any danger of that.
+ */
+ if (*hpage && page_count(*hpage) > 1) {
+ put_page(*hpage);
+ *hpage = NULL;
+ }
+
if (!*hpage)
*hpage = khugepaged_alloc_hugepage(wait);
@@ -1005,9 +1020,6 @@ static void collapse_huge_page(struct mm_struct *mm,
* handled by the anon_vma lock + PG_lock.
*/
down_write(&mm->mmap_sem);
- result = SCAN_ANY_PROCESS;
- if (!mmget_still_valid(mm))
- goto out;
result = hugepage_vma_revalidate(mm, address, &vma);
if (result)
goto out;
@@ -1251,6 +1263,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
{
struct vm_area_struct *vma;
+ struct mm_struct *mm;
unsigned long addr;
pmd_t *pmd, _pmd;
@@ -1264,7 +1277,8 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
continue;
if (vma->vm_end < addr + HPAGE_PMD_SIZE)
continue;
- pmd = mm_find_pmd(vma->vm_mm, addr);
+ mm = vma->vm_mm;
+ pmd = mm_find_pmd(mm, addr);
if (!pmd)
continue;
/*
@@ -1273,14 +1287,16 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
* re-fault. Not ideal, but it's more important to not disturb
* the system too much.
*/
- if (down_write_trylock(&vma->vm_mm->mmap_sem)) {
- spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
- /* assume page table is clear */
- _pmd = pmdp_collapse_flush(vma, addr, pmd);
- spin_unlock(ptl);
- up_write(&vma->vm_mm->mmap_sem);
- mm_dec_nr_ptes(vma->vm_mm);
- pte_free(vma->vm_mm, pmd_pgtable(_pmd));
+ if (down_write_trylock(&mm->mmap_sem)) {
+ if (!khugepaged_test_exit(mm)) {
+ spinlock_t *ptl = pmd_lock(mm, pmd);
+ /* assume page table is clear */
+ _pmd = pmdp_collapse_flush(vma, addr, pmd);
+ spin_unlock(ptl);
+ mm_dec_nr_ptes(mm);
+ pte_free(mm, pmd_pgtable(_pmd));
+ }
+ up_write(&mm->mmap_sem);
}
}
i_mmap_unlock_write(mapping);
@@ -1939,8 +1955,6 @@ static void set_recommended_min_free_kbytes(void)
int start_stop_khugepaged(void)
{
- static struct task_struct *khugepaged_thread __read_mostly;
- static DEFINE_MUTEX(khugepaged_mutex);
int err = 0;
mutex_lock(&khugepaged_mutex);
@@ -1967,3 +1981,11 @@ fail:
mutex_unlock(&khugepaged_mutex);
return err;
}
+
+void khugepaged_min_free_kbytes_update(void)
+{
+ mutex_lock(&khugepaged_mutex);
+ if (khugepaged_enabled() && khugepaged_thread)
+ set_recommended_min_free_kbytes();
+ mutex_unlock(&khugepaged_mutex);
+}
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 5eeabece0c17..f54734abf946 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -2039,7 +2039,7 @@ void __init kmemleak_init(void)
create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
KMEMLEAK_GREY, GFP_ATOMIC);
/* only register .data..ro_after_init if not within .data */
- if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata)
+ if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
create_object((unsigned long)__start_ro_after_init,
__end_ro_after_init - __start_ro_after_init,
KMEMLEAK_GREY, GFP_ATOMIC);
diff --git a/mm/ksm.c b/mm/ksm.c
index b3ea0f0316eb..87a541ab1474 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -778,6 +778,7 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
stable_node->rmap_hlist_len--;
put_anon_vma(rmap_item->anon_vma);
+ rmap_item->head = NULL;
rmap_item->address &= PAGE_MASK;
} else if (rmap_item->address & UNSTABLE_FLAG) {
@@ -2106,8 +2107,16 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
down_read(&mm->mmap_sem);
vma = find_mergeable_vma(mm, rmap_item->address);
- err = try_to_merge_one_page(vma, page,
- ZERO_PAGE(rmap_item->address));
+ if (vma) {
+ err = try_to_merge_one_page(vma, page,
+ ZERO_PAGE(rmap_item->address));
+ } else {
+ /*
+ * If the vma is out of date, we do not need to
+ * continue.
+ */
+ err = 0;
+ }
up_read(&mm->mmap_sem);
/*
* In case of failure, the page was not really empty, so we
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 758653dd1443..f7d2ffc209fb 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -542,7 +542,6 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
struct list_lru_node *nlru = &lru->node[nid];
int dst_idx = dst_memcg->kmemcg_id;
struct list_lru_one *src, *dst;
- bool set;
/*
* Since list_lru_{add,del} may be called under an IRQ-safe lock,
@@ -554,11 +553,12 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
dst = list_lru_from_memcg_idx(nlru, dst_idx);
list_splice_init(&src->list, &dst->list);
- set = (!dst->nr_items && src->nr_items);
- dst->nr_items += src->nr_items;
- if (set)
+
+ if (src->nr_items) {
+ dst->nr_items += src->nr_items;
memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
- src->nr_items = 0;
+ src->nr_items = 0;
+ }
spin_unlock_irq(&nlru->lock);
}
diff --git a/mm/maccess.c b/mm/maccess.c
index ec00be51a24f..6e41ba452e5e 100644
--- a/mm/maccess.c
+++ b/mm/maccess.c
@@ -5,8 +5,32 @@
#include <linux/mm.h>
#include <linux/uaccess.h>
+static __always_inline long
+probe_read_common(void *dst, const void __user *src, size_t size)
+{
+ long ret;
+
+ pagefault_disable();
+ ret = __copy_from_user_inatomic(dst, src, size);
+ pagefault_enable();
+
+ return ret ? -EFAULT : 0;
+}
+
+static __always_inline long
+probe_write_common(void __user *dst, const void *src, size_t size)
+{
+ long ret;
+
+ pagefault_disable();
+ ret = __copy_to_user_inatomic(dst, src, size);
+ pagefault_enable();
+
+ return ret ? -EFAULT : 0;
+}
+
/**
- * probe_kernel_read(): safely attempt to read from a location
+ * probe_kernel_read(): safely attempt to read from a kernel-space location
* @dst: pointer to the buffer that shall take the data
* @src: address to read from
* @size: size of the data chunk
@@ -29,17 +53,41 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
- pagefault_disable();
- ret = __copy_from_user_inatomic(dst,
- (__force const void __user *)src, size);
- pagefault_enable();
+ ret = probe_read_common(dst, (__force const void __user *)src, size);
set_fs(old_fs);
- return ret ? -EFAULT : 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(probe_kernel_read);
/**
+ * probe_user_read(): safely attempt to read from a user-space location
+ * @dst: pointer to the buffer that shall take the data
+ * @src: address to read from. This must be a user address.
+ * @size: size of the data chunk
+ *
+ * Safely read from user address @src to the buffer at @dst. If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+
+long __weak probe_user_read(void *dst, const void __user *src, size_t size)
+ __attribute__((alias("__probe_user_read")));
+
+long __probe_user_read(void *dst, const void __user *src, size_t size)
+{
+ long ret = -EFAULT;
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(USER_DS);
+ if (access_ok(VERIFY_READ, src, size))
+ ret = probe_read_common(dst, src, size);
+ set_fs(old_fs);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(probe_user_read);
+
+/**
* probe_kernel_write(): safely attempt to write to a location
* @dst: address to write to
* @src: pointer to the data that shall be written
@@ -48,6 +96,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
* Safely write to address @dst from the buffer at @src. If a kernel fault
* happens, handle that and return -EFAULT.
*/
+
long __weak probe_kernel_write(void *dst, const void *src, size_t size)
__attribute__((alias("__probe_kernel_write")));
@@ -57,16 +106,41 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
- pagefault_disable();
- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
- pagefault_enable();
+ ret = probe_write_common((__force void __user *)dst, src, size);
set_fs(old_fs);
- return ret ? -EFAULT : 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(probe_kernel_write);
/**
+ * probe_user_write(): safely attempt to write to a user-space location
+ * @dst: address to write to
+ * @src: pointer to the data that shall be written
+ * @size: size of the data chunk
+ *
+ * Safely write to address @dst from the buffer at @src. If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+
+long __weak probe_user_write(void __user *dst, const void *src, size_t size)
+ __attribute__((alias("__probe_user_write")));
+
+long __probe_user_write(void __user *dst, const void *src, size_t size)
+{
+ long ret = -EFAULT;
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(USER_DS);
+ if (access_ok(VERIFY_WRITE, dst, size))
+ ret = probe_write_common(dst, src, size);
+ set_fs(old_fs);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(probe_user_write);
+
+/**
* strncpy_from_unsafe: - Copy a NUL terminated string from unsafe address.
* @dst: Destination address, in kernel space. This buffer must be at
* least @count bytes long.
@@ -105,3 +179,76 @@ long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count)
return ret ? -EFAULT : src - unsafe_addr;
}
+
+/**
+ * strncpy_from_unsafe_user: - Copy a NUL terminated string from unsafe user
+ * address.
+ * @dst: Destination address, in kernel space. This buffer must be at
+ * least @count bytes long.
+ * @unsafe_addr: Unsafe user address.
+ * @count: Maximum number of bytes to copy, including the trailing NUL.
+ *
+ * Copies a NUL-terminated string from unsafe user address to kernel buffer.
+ *
+ * On success, returns the length of the string INCLUDING the trailing NUL.
+ *
+ * If access fails, returns -EFAULT (some data may have been copied
+ * and the trailing NUL added).
+ *
+ * If @count is smaller than the length of the string, copies @count-1 bytes,
+ * sets the last byte of @dst buffer to NUL and returns @count.
+ */
+long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr,
+ long count)
+{
+ mm_segment_t old_fs = get_fs();
+ long ret;
+
+ if (unlikely(count <= 0))
+ return 0;
+
+ set_fs(USER_DS);
+ pagefault_disable();
+ ret = strncpy_from_user(dst, unsafe_addr, count);
+ pagefault_enable();
+ set_fs(old_fs);
+
+ if (ret >= count) {
+ ret = count;
+ dst[ret - 1] = '\0';
+ } else if (ret > 0) {
+ ret++;
+ }
+
+ return ret;
+}
+
+/**
+ * strnlen_unsafe_user: - Get the size of a user string INCLUDING final NUL.
+ * @unsafe_addr: The string to measure.
+ * @count: Maximum count (including NUL)
+ *
+ * Get the size of a NUL-terminated string in user space without pagefault.
+ *
+ * Returns the size of the string INCLUDING the terminating NUL.
+ *
+ * If the string is too long, returns a number larger than @count. User
+ * has to check the return value against "> count".
+ * On exception (or invalid count), returns 0.
+ *
+ * Unlike strnlen_user, this can be used from IRQ handler etc. because
+ * it disables pagefaults.
+ */
+long strnlen_unsafe_user(const void __user *unsafe_addr, long count)
+{
+ mm_segment_t old_fs = get_fs();
+ int ret;
+
+ set_fs(USER_DS);
+ pagefault_disable();
+ ret = strnlen_user(unsafe_addr, count);
+ pagefault_enable();
+ set_fs(old_fs);
+
+ return ret;
+}
diff --git a/mm/memblock.c b/mm/memblock.c
index bb4e32c6b19e..4f7c5c3c442c 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -234,14 +234,6 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
*
* Find @size free area aligned to @align in the specified range and node.
*
- * When allocation direction is bottom-up, the @start should be greater
- * than the end of the kernel image. Otherwise, it will be trimmed. The
- * reason is that we want the bottom-up allocation just near the kernel
- * image so it is highly likely that the allocated memory and the kernel
- * will reside in the same node.
- *
- * If bottom-up allocation failed, will try to allocate memory top-down.
- *
* Return:
* Found address on success, 0 on failure.
*/
@@ -250,8 +242,6 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
phys_addr_t end, int nid,
enum memblock_flags flags)
{
- phys_addr_t kernel_end, ret;
-
/* pump up @end */
if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
end = memblock.current_limit;
@@ -259,40 +249,13 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
/* avoid allocating the first page */
start = max_t(phys_addr_t, start, PAGE_SIZE);
end = max(start, end);
- kernel_end = __pa_symbol(_end);
-
- /*
- * try bottom-up allocation only when bottom-up mode
- * is set and @end is above the kernel image.
- */
- if (memblock_bottom_up() && end > kernel_end) {
- phys_addr_t bottom_up_start;
-
- /* make sure we will allocate above the kernel */
- bottom_up_start = max(start, kernel_end);
- /* ok, try bottom-up allocation first */
- ret = __memblock_find_range_bottom_up(bottom_up_start, end,
- size, align, nid, flags);
- if (ret)
- return ret;
-
- /*
- * we always limit bottom-up allocation above the kernel,
- * but top-down allocation doesn't have the limit, so
- * retrying top-down allocation may succeed when bottom-up
- * allocation failed.
- *
- * bottom-up allocation is expected to be fail very rarely,
- * so we use WARN_ONCE() here to see the stack trace if
- * fail happens.
- */
- WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE),
- "memblock: bottom-up allocation failed, memory hotremove may be affected\n");
- }
-
- return __memblock_find_range_top_down(start, end, size, align, nid,
- flags);
+ if (memblock_bottom_up())
+ return __memblock_find_range_bottom_up(start, end, size, align,
+ nid, flags);
+ else
+ return __memblock_find_range_top_down(start, end, size, align,
+ nid, flags);
}
/**
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 3b78b6af353b..87cd5bf1b487 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4780,7 +4780,7 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
struct page *page = NULL;
swp_entry_t ent = pte_to_swp_entry(ptent);
- if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
+ if (!(mc.flags & MOVE_ANON))
return NULL;
/*
@@ -4799,6 +4799,9 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
return page;
}
+ if (non_swap_entry(ent))
+ return NULL;
+
/*
* Because lookup_swap_cache() updates some statistics counter,
* we call find_get_page() with swapper_space directly.
@@ -5147,7 +5150,6 @@ static void __mem_cgroup_clear_mc(void)
if (!mem_cgroup_is_root(mc.to))
page_counter_uncharge(&mc.to->memory, mc.moved_swap);
- mem_cgroup_id_get_many(mc.to, mc.moved_swap);
css_put_many(&mc.to->css, mc.moved_swap);
mc.moved_swap = 0;
@@ -5338,7 +5340,8 @@ put: /* get_mctgt_type() gets the page */
ent = target.ent;
if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
mc.precharge--;
- /* we fixup refcnts and charges later. */
+ mem_cgroup_id_get_many(mc.to, 1);
+ /* we fixup other refcnts and charges later. */
mc.moved_swap++;
}
break;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 148fdd929a19..034607a68ccb 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1220,7 +1220,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
* communicated in siginfo, see kill_proc()
*/
start = (page->index << PAGE_SHIFT) & ~(size - 1);
- unmap_mapping_range(page->mapping, start, start + size, 0);
+ unmap_mapping_range(page->mapping, start, size, 0);
}
kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, pfn, flags);
rc = 0;
diff --git a/mm/memory.c b/mm/memory.c
index bbf0cc4066c8..c2011c51f15d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -116,6 +116,18 @@ int randomize_va_space __read_mostly =
2;
#endif
+#ifndef arch_faults_on_old_pte
+static inline bool arch_faults_on_old_pte(void)
+{
+ /*
+ * Those arches which don't have hw access flag feature need to
+ * implement their own helper. By default, "true" means pagefault
+ * will be hit on old pte.
+ */
+ return true;
+}
+#endif
+
static int __init disable_randmaps(char *s)
{
randomize_va_space = 0;
@@ -136,7 +148,7 @@ static int __init init_zero_pfn(void)
zero_pfn = page_to_pfn(ZERO_PAGE(0));
return 0;
}
-core_initcall(init_zero_pfn);
+early_initcall(init_zero_pfn);
#if defined(SPLIT_RSS_COUNTING)
@@ -1983,11 +1995,11 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, unsigned long end,
unsigned long pfn, pgprot_t prot)
{
- pte_t *pte;
+ pte_t *pte, *mapped_pte;
spinlock_t *ptl;
int err = 0;
- pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
+ mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
if (!pte)
return -ENOMEM;
arch_enter_lazy_mmu_mode();
@@ -2001,7 +2013,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
- pte_unmap_unlock(pte - 1, ptl);
+ pte_unmap_unlock(mapped_pte, ptl);
return err;
}
@@ -2335,32 +2347,101 @@ static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
return same;
}
-static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
+static inline bool cow_user_page(struct page *dst, struct page *src,
+ struct vm_fault *vmf)
{
+ bool ret;
+ void *kaddr;
+ void __user *uaddr;
+ bool locked = false;
+ struct vm_area_struct *vma = vmf->vma;
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long addr = vmf->address;
+
debug_dma_assert_idle(src);
+ if (likely(src)) {
+ copy_user_highpage(dst, src, addr, vma);
+ return true;
+ }
+
/*
* If the source page was a PFN mapping, we don't have
* a "struct page" for it. We do a best-effort copy by
* just copying from the original user address. If that
* fails, we just zero-fill it. Live with it.
*/
- if (unlikely(!src)) {
- void *kaddr = kmap_atomic(dst);
- void __user *uaddr = (void __user *)(va & PAGE_MASK);
+ kaddr = kmap_atomic(dst);
+ uaddr = (void __user *)(addr & PAGE_MASK);
+
+ /*
+ * On architectures with software "accessed" bits, we would
+ * take a double page fault, so mark it accessed here.
+ */
+ if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {
+ pte_t entry;
+
+ vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
+ locked = true;
+ if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
+ /*
+ * Other thread has already handled the fault
+ * and we don't need to do anything. If it's
+ * not the case, the fault will be triggered
+ * again on the same address.
+ */
+ ret = false;
+ goto pte_unlock;
+ }
+
+ entry = pte_mkyoung(vmf->orig_pte);
+ if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
+ update_mmu_cache(vma, addr, vmf->pte);
+ }
+
+ /*
+ * This really shouldn't fail, because the page is there
+ * in the page tables. But it might just be unreadable,
+ * in which case we just give up and fill the result with
+ * zeroes.
+ */
+ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
+ if (locked)
+ goto warn;
+
+ /* Re-validate under PTL if the page is still mapped */
+ vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
+ locked = true;
+ if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
+ /* The PTE changed under us. Retry page fault. */
+ ret = false;
+ goto pte_unlock;
+ }
/*
- * This really shouldn't fail, because the page is there
- * in the page tables. But it might just be unreadable,
- * in which case we just give up and fill the result with
- * zeroes.
+ * The same page can be mapped back since last copy attampt.
+ * Try to copy again under PTL.
*/
- if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
+ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
+ /*
+ * Give a warn in case there can be some obscure
+ * use-case
+ */
+warn:
+ WARN_ON_ONCE(1);
clear_page(kaddr);
- kunmap_atomic(kaddr);
- flush_dcache_page(dst);
- } else
- copy_user_highpage(dst, src, va, vma);
+ }
+ }
+
+ ret = true;
+
+pte_unlock:
+ if (locked)
+ pte_unmap_unlock(vmf->pte, vmf->ptl);
+ kunmap_atomic(kaddr);
+ flush_dcache_page(dst);
+
+ return ret;
}
static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
@@ -2514,7 +2595,19 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
vmf->address);
if (!new_page)
goto oom;
- cow_user_page(new_page, old_page, vmf->address, vma);
+
+ if (!cow_user_page(new_page, old_page, vmf)) {
+ /*
+ * COW failed, if the fault was solved by other,
+ * it's fine. If not, userspace would re-fault on
+ * the same address and we will handle the fault
+ * from the second attempt.
+ */
+ put_page(new_page);
+ if (old_page)
+ put_page(old_page);
+ return 0;
+ }
}
if (mem_cgroup_try_charge_delay(new_page, mm, GFP_KERNEL, &memcg, false))
@@ -4792,17 +4885,19 @@ long copy_huge_page_from_user(struct page *dst_page,
void *page_kaddr;
unsigned long i, rc = 0;
unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
+ struct page *subpage = dst_page;
- for (i = 0; i < pages_per_huge_page; i++) {
+ for (i = 0; i < pages_per_huge_page;
+ i++, subpage = mem_map_next(subpage, dst_page, i)) {
if (allow_pagefault)
- page_kaddr = kmap(dst_page + i);
+ page_kaddr = kmap(subpage);
else
- page_kaddr = kmap_atomic(dst_page + i);
+ page_kaddr = kmap_atomic(subpage);
rc = copy_from_user(page_kaddr,
(const void __user *)(src + i * PAGE_SIZE),
PAGE_SIZE);
if (allow_pagefault)
- kunmap(dst_page + i);
+ kunmap(subpage);
else
kunmap_atomic(page_kaddr);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index aae7ff485671..e60e28131f67 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -733,7 +733,7 @@ void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
* are reserved so nobody should be touching them so we should be safe
*/
memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn,
- MEMMAP_HOTPLUG, altmap);
+ MEMINIT_HOTPLUG, altmap);
set_zone_contiguous(zone);
}
@@ -1102,7 +1102,8 @@ int __ref add_memory_resource(int nid, struct resource *res, bool online)
}
/* link memory sections under this node.*/
- ret = link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1));
+ ret = link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1),
+ MEMINIT_HOTPLUG);
BUG_ON(ret);
/* create new memmap entry */
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 68c46da82aac..3cd27c1c729f 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -496,7 +496,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long flags = qp->flags;
int ret;
bool has_unmovable = false;
- pte_t *pte;
+ pte_t *pte, *mapped_pte;
spinlock_t *ptl;
ptl = pmd_trans_huge_lock(pmd, vma);
@@ -510,7 +510,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
if (pmd_trans_unstable(pmd))
return 0;
- pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
if (!pte_present(*pte))
continue;
@@ -542,7 +542,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
} else
break;
}
- pte_unmap_unlock(pte - 1, ptl);
+ pte_unmap_unlock(mapped_pte, ptl);
cond_resched();
if (has_unmovable)
diff --git a/mm/mmap.c b/mm/mmap.c
index a98f09b83019..f875386e7acd 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2077,6 +2077,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
info.align_mask = 0;
+ info.align_offset = 0;
return vm_unmapped_area(&info);
}
#endif
@@ -2118,6 +2119,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
info.low_limit = max(PAGE_SIZE, mmap_min_addr);
info.high_limit = mm->mmap_base;
info.align_mask = 0;
+ info.align_offset = 0;
addr = vm_unmapped_area(&info);
/*
@@ -3100,6 +3102,7 @@ void exit_mmap(struct mm_struct *mm)
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += vma_pages(vma);
vma = remove_vma(vma);
+ cond_resched();
}
vm_unacct_memory(nr_accounted);
}
diff --git a/mm/mremap.c b/mm/mremap.c
index a9617e72e6b7..33d8bbe24ddd 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -221,7 +221,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
if (!new_pmd)
break;
- if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd)) {
+ if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) || pmd_devmap(*old_pmd)) {
if (extent == HPAGE_PMD_SIZE) {
bool moved;
/* See comment in move_ptes() */
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index a581fe2a2f1f..928b3b5e24e6 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -62,6 +62,8 @@ int sysctl_oom_dump_tasks = 1;
* and mark_oom_victim
*/
DEFINE_MUTEX(oom_lock);
+/* Serializes oom_score_adj and oom_score_adj_min updates */
+DEFINE_MUTEX(oom_adj_mutex);
#ifdef CONFIG_NUMA
/**
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e5c610d711f3..4446a523e684 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -66,6 +66,7 @@
#include <linux/ftrace.h>
#include <linux/lockdep.h>
#include <linux/nmi.h>
+#include <linux/khugepaged.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
@@ -1115,6 +1116,11 @@ static void free_pcppages_bulk(struct zone *zone, int count,
struct page *page, *tmp;
LIST_HEAD(head);
+ /*
+ * Ensure proper count is passed which otherwise would stuck in the
+ * below while (list_empty(list)) loop.
+ */
+ count = min(pcp->count, count);
while (count) {
struct list_head *list;
@@ -1422,6 +1428,7 @@ void set_zone_contiguous(struct zone *zone)
if (!__pageblock_pfn_to_page(block_start_pfn,
block_end_pfn, zone))
return;
+ cond_resched();
}
/* We confirm that there is no hole */
@@ -1585,6 +1592,13 @@ static int __init deferred_init_memmap(void *data)
BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
pgdat->first_deferred_pfn = ULONG_MAX;
+ /*
+ * Once we unlock here, the zone cannot be grown anymore, thus if an
+ * interrupt thread must allocate this early in boot, zone must be
+ * pre-grown prior to start of deferred page initialization.
+ */
+ pgdat_resize_unlock(pgdat, &flags);
+
/* Only the highest zone is deferred so find it */
for (zid = 0; zid < MAX_NR_ZONES; zid++) {
zone = pgdat->node_zones + zid;
@@ -1609,7 +1623,6 @@ static int __init deferred_init_memmap(void *data)
epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
deferred_free_pages(nid, zid, spfn, epfn);
}
- pgdat_resize_unlock(pgdat, &flags);
/* Sanity check that the next zone really is unpopulated */
WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
@@ -1656,17 +1669,6 @@ deferred_grow_zone(struct zone *zone, unsigned int order)
pgdat_resize_lock(pgdat, &flags);
/*
- * If deferred pages have been initialized while we were waiting for
- * the lock, return true, as the zone was grown. The caller will retry
- * this zone. We won't return to this function since the caller also
- * has this static branch.
- */
- if (!static_branch_unlikely(&deferred_pages)) {
- pgdat_resize_unlock(pgdat, &flags);
- return true;
- }
-
- /*
* If someone grew this zone while we were waiting for spinlock, return
* true, as there might be enough pages already.
*/
@@ -4537,11 +4539,11 @@ refill:
/* Even if we own the page, we do not use atomic_set().
* This would break get_page_unless_zero() users.
*/
- page_ref_add(page, size);
+ page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
/* reset page count bias and offset to start of new frag */
nc->pfmemalloc = page_is_pfmemalloc(page);
- nc->pagecnt_bias = size + 1;
+ nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
nc->offset = size;
}
@@ -4552,15 +4554,20 @@ refill:
if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
goto refill;
+ if (unlikely(nc->pfmemalloc)) {
+ free_the_page(page, compound_order(page));
+ goto refill;
+ }
+
#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
/* if size can vary use size else just use PAGE_SIZE */
size = nc->size;
#endif
/* OK, page count is 0, we can safely set it */
- set_page_count(page, size + 1);
+ set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
/* reset page count bias and offset to start of new frag */
- nc->pagecnt_bias = size + 1;
+ nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
offset = size - fragsz;
}
@@ -5479,7 +5486,7 @@ void __ref build_all_zonelists(pg_data_t *pgdat)
* done. Non-atomic initialization, single-pass.
*/
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
- unsigned long start_pfn, enum memmap_context context,
+ unsigned long start_pfn, enum meminit_context context,
struct vmem_altmap *altmap)
{
unsigned long end_pfn = start_pfn + size;
@@ -5506,7 +5513,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
* There can be holes in boot-time mem_map[]s handed to this
* function. They do not exist on hotplugged memory.
*/
- if (context != MEMMAP_EARLY)
+ if (context != MEMINIT_EARLY)
goto not_early;
if (!early_pfn_valid(pfn))
@@ -5541,7 +5548,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
not_early:
page = pfn_to_page(pfn);
__init_single_page(page, pfn, zone, nid);
- if (context == MEMMAP_HOTPLUG)
+ if (context == MEMINIT_HOTPLUG)
SetPageReserved(page);
/*
@@ -5556,7 +5563,7 @@ not_early:
* check here not to call set_pageblock_migratetype() against
* pfn out of zone.
*
- * Please note that MEMMAP_HOTPLUG path doesn't clear memmap
+ * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
* because this is done early in sparse_add_one_section
*/
if (!(pfn & (pageblock_nr_pages - 1))) {
@@ -5577,7 +5584,8 @@ static void __meminit zone_init_free_lists(struct zone *zone)
#ifndef __HAVE_ARCH_MEMMAP_INIT
#define memmap_init(size, nid, zone, start_pfn) \
- memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY, NULL)
+ memmap_init_zone((size), (nid), (zone), (start_pfn), \
+ MEMINIT_EARLY, NULL)
#endif
static int zone_batchsize(struct zone *zone)
@@ -7397,9 +7405,11 @@ int __meminit init_per_zone_wmark_min(void)
setup_min_slab_ratio();
#endif
+ khugepaged_min_free_kbytes_update();
+
return 0;
}
-core_initcall(init_per_zone_wmark_min)
+postcore_initcall(init_per_zone_wmark_min)
/*
* min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
diff --git a/mm/page_counter.c b/mm/page_counter.c
index de31470655f6..147ff99187b8 100644
--- a/mm/page_counter.c
+++ b/mm/page_counter.c
@@ -77,7 +77,7 @@ void page_counter_charge(struct page_counter *counter, unsigned long nr_pages)
long new;
new = atomic_long_add_return(nr_pages, &c->usage);
- propagate_protected_usage(counter, new);
+ propagate_protected_usage(c, new);
/*
* This is indeed racy, but we can live with some
* inaccuracy in the watermark.
@@ -121,7 +121,7 @@ bool page_counter_try_charge(struct page_counter *counter,
new = atomic_long_add_return(nr_pages, &c->usage);
if (new > c->max) {
atomic_long_sub(nr_pages, &c->usage);
- propagate_protected_usage(counter, new);
+ propagate_protected_usage(c, new);
/*
* This is racy, but we can live with some
* inaccuracy in the failcnt.
@@ -130,7 +130,7 @@ bool page_counter_try_charge(struct page_counter *counter,
*fail = c;
goto failed;
}
- propagate_protected_usage(counter, new);
+ propagate_protected_usage(c, new);
/*
* Just like with failcnt, we can live with some
* inaccuracy in the watermark.
diff --git a/mm/page_io.c b/mm/page_io.c
index 08d2eae58fce..9b646f07f47f 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -38,7 +38,6 @@ static struct bio *get_swap_bio(gfp_t gfp_flags,
bio->bi_iter.bi_sector = map_swap_page(page, &bdev);
bio_set_dev(bio, bdev);
- bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9;
bio->bi_end_io = end_io;
for (i = 0; i < nr; i++)
@@ -262,11 +261,6 @@ out:
return ret;
}
-static sector_t swap_page_sector(struct page *page)
-{
- return (sector_t)__page_file_index(page) << (PAGE_SHIFT - 9);
-}
-
static inline void count_swpout_vm_event(struct page *page)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -325,7 +319,8 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
return ret;
}
- ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc);
+ ret = bdev_write_page(sis->bdev, map_swap_page(page, &sis->bdev),
+ page, wbc);
if (!ret) {
count_swpout_vm_event(page);
return 0;
@@ -376,7 +371,7 @@ int swap_readpage(struct page *page, bool synchronous)
return ret;
}
- ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
+ ret = bdev_read_page(sis->bdev, map_swap_page(page, &sis->bdev), page);
if (!ret) {
if (trylock_page(page)) {
swap_slot_free_notify(page);
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index c3084ff2569d..3c0930d94a29 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -15,9 +15,9 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
if (err)
break;
- addr += PAGE_SIZE;
- if (addr == end)
+ if (addr >= end - PAGE_SIZE)
break;
+ addr += PAGE_SIZE;
pte++;
}
diff --git a/mm/percpu.c b/mm/percpu.c
index ff76fa0b7528..0151f276ae68 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1103,7 +1103,7 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
/* allocate chunk */
chunk = memblock_virt_alloc(sizeof(struct pcpu_chunk) +
- BITS_TO_LONGS(region_size >> PAGE_SHIFT),
+ BITS_TO_LONGS(region_size >> PAGE_SHIFT) * sizeof(unsigned long),
0);
INIT_LIST_HEAD(&chunk->list);
diff --git a/mm/shmem.c b/mm/shmem.c
index a6fc82a1ab6b..9fd0e72757cf 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2149,7 +2149,11 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
struct shmem_inode_info *info = SHMEM_I(inode);
int retval = -ENOMEM;
- spin_lock_irq(&info->lock);
+ /*
+ * What serializes the accesses to info->flags?
+ * ipc_lock_object() when called from shmctl_do_lock(),
+ * no serialization needed when called from shm_destroy().
+ */
if (lock && !(info->flags & VM_LOCKED)) {
if (!user_shm_lock(inode->i_size, user))
goto out_nomem;
@@ -2164,7 +2168,6 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
retval = 0;
out_nomem:
- spin_unlock_irq(&info->lock);
return retval;
}
@@ -2268,8 +2271,18 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
pgoff_t offset, max_off;
ret = -ENOMEM;
- if (!shmem_inode_acct_block(inode, 1))
+ if (!shmem_inode_acct_block(inode, 1)) {
+ /*
+ * We may have got a page, returned -ENOENT triggering a retry,
+ * and now we find ourselves with -ENOMEM. Release the page, to
+ * avoid a BUG_ON in our caller.
+ */
+ if (unlikely(*pagep)) {
+ put_page(*pagep);
+ *pagep = NULL;
+ }
goto out;
+ }
if (!*pagep) {
page = shmem_alloc_page(gfp, info, pgoff);
@@ -2350,11 +2363,11 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
lru_cache_add_anon(page);
- spin_lock(&info->lock);
+ spin_lock_irq(&info->lock);
info->alloced++;
inode->i_blocks += BLOCKS_PER_PAGE;
shmem_recalc_inode(inode);
- spin_unlock(&info->lock);
+ spin_unlock_irq(&info->lock);
inc_mm_counter(dst_mm, mm_counter_file(page));
page_add_file_rmap(page, false);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 39e382acb0b8..a94b9981eb17 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -130,6 +130,7 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
#ifdef CONFIG_MEMCG_KMEM
LIST_HEAD(slab_root_caches);
+static DEFINE_SPINLOCK(memcg_kmem_wq_lock);
void slab_init_memcg_params(struct kmem_cache *s)
{
@@ -310,6 +311,14 @@ int slab_unmergeable(struct kmem_cache *s)
if (s->refcount < 0)
return 1;
+#ifdef CONFIG_MEMCG_KMEM
+ /*
+ * Skip the dying kmem_cache.
+ */
+ if (s->memcg_params.dying)
+ return 1;
+#endif
+
return 0;
}
@@ -717,14 +726,22 @@ void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
WARN_ON_ONCE(s->memcg_params.deact_fn))
return;
+ /*
+ * memcg_kmem_wq_lock is used to synchronize memcg_params.dying
+ * flag and make sure that no new kmem_cache deactivation tasks
+ * are queued (see flush_memcg_workqueue() ).
+ */
+ spin_lock_irq(&memcg_kmem_wq_lock);
if (s->memcg_params.root_cache->memcg_params.dying)
- return;
+ goto unlock;
/* pin memcg so that @s doesn't get destroyed in the middle */
css_get(&s->memcg_params.memcg->css);
s->memcg_params.deact_fn = deact_fn;
call_rcu_sched(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn);
+unlock:
+ spin_unlock_irq(&memcg_kmem_wq_lock);
}
void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
@@ -832,12 +849,15 @@ static int shutdown_memcg_caches(struct kmem_cache *s)
return 0;
}
-static void flush_memcg_workqueue(struct kmem_cache *s)
+static void memcg_set_kmem_cache_dying(struct kmem_cache *s)
{
- mutex_lock(&slab_mutex);
+ spin_lock_irq(&memcg_kmem_wq_lock);
s->memcg_params.dying = true;
- mutex_unlock(&slab_mutex);
+ spin_unlock_irq(&memcg_kmem_wq_lock);
+}
+static void flush_memcg_workqueue(struct kmem_cache *s)
+{
/*
* SLUB deactivates the kmem_caches through call_rcu_sched. Make
* sure all registered rcu callbacks have been invoked.
@@ -858,10 +878,6 @@ static inline int shutdown_memcg_caches(struct kmem_cache *s)
{
return 0;
}
-
-static inline void flush_memcg_workqueue(struct kmem_cache *s)
-{
-}
#endif /* CONFIG_MEMCG_KMEM */
void slab_kmem_cache_release(struct kmem_cache *s)
@@ -879,8 +895,6 @@ void kmem_cache_destroy(struct kmem_cache *s)
if (unlikely(!s))
return;
- flush_memcg_workqueue(s);
-
get_online_cpus();
get_online_mems();
@@ -890,6 +904,22 @@ void kmem_cache_destroy(struct kmem_cache *s)
if (s->refcount)
goto out_unlock;
+#ifdef CONFIG_MEMCG_KMEM
+ memcg_set_kmem_cache_dying(s);
+
+ mutex_unlock(&slab_mutex);
+
+ put_online_mems();
+ put_online_cpus();
+
+ flush_memcg_workqueue(s);
+
+ get_online_cpus();
+ get_online_mems();
+
+ mutex_lock(&slab_mutex);
+#endif
+
err = shutdown_memcg_caches(s);
if (!err)
err = shutdown_cache(s);
@@ -1540,7 +1570,7 @@ void kzfree(const void *p)
if (unlikely(ZERO_OR_NULL_PTR(mem)))
return;
ks = ksize(mem);
- memset(mem, 0, ks);
+ memzero_explicit(mem, ks);
kfree(mem);
}
EXPORT_SYMBOL(kzfree);
diff --git a/mm/slub.c b/mm/slub.c
index 9b7b989273d4..da141e5974f2 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -249,7 +249,7 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
unsigned long ptr_addr)
{
#ifdef CONFIG_SLAB_FREELIST_HARDENED
- return (void *)((unsigned long)ptr ^ s->random ^ ptr_addr);
+ return (void *)((unsigned long)ptr ^ s->random ^ swab(ptr_addr));
#else
return ptr;
#endif
@@ -645,6 +645,20 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...)
va_end(args);
}
+static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
+ void **freelist, void *nextfree)
+{
+ if ((s->flags & SLAB_CONSISTENCY_CHECKS) &&
+ !check_valid_pointer(s, page, nextfree) && freelist) {
+ object_err(s, page, *freelist, "Freechain corrupt");
+ *freelist = NULL;
+ slab_fix(s, "Isolate corrupted freechain");
+ return true;
+ }
+
+ return false;
+}
+
static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
{
unsigned int off; /* Offset of last byte */
@@ -1328,6 +1342,11 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node,
static inline void dec_slabs_node(struct kmem_cache *s, int node,
int objects) {}
+static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
+ void **freelist, void *nextfree)
+{
+ return false;
+}
#endif /* CONFIG_SLUB_DEBUG */
/*
@@ -2013,6 +2032,14 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
void *prior;
unsigned long counters;
+ /*
+ * If 'nextfree' is invalid, it is possible that the object at
+ * 'freelist' is already corrupted. So isolate all objects
+ * starting at 'freelist'.
+ */
+ if (freelist_corrupted(s, page, &freelist, nextfree))
+ break;
+
do {
prior = page->freelist;
counters = page->counters;
@@ -5583,7 +5610,8 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
*/
if (buffer)
buf = buffer;
- else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf))
+ else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf) &&
+ !IS_ENABLED(CONFIG_SLUB_STATS))
buf = mbuf;
else {
buffer = (char *) get_zeroed_page(GFP_KERNEL);
diff --git a/mm/sparse.c b/mm/sparse.c
index 3b24ba903d9e..ed60f0a375fe 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -467,6 +467,7 @@ static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.",
__func__, nid);
pnum_begin = pnum;
+ sparse_buffer_fini();
goto failed;
}
check_usemap_section_nr(nid, usemap);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index ecee9c6c4cc1..3febffe0fca4 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -23,6 +23,7 @@
#include <linux/huge_mm.h>
#include <asm/pgtable.h>
+#include "internal.h"
/*
* swapper_space is a fiction, retained to simplify the path through
@@ -416,7 +417,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
/*
* call radix_tree_preload() while we can wait.
*/
- err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
+ err = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK);
if (err)
break;
@@ -536,10 +537,11 @@ static unsigned long swapin_nr_pages(unsigned long offset)
return 1;
hits = atomic_xchg(&swapin_readahead_hits, 0);
- pages = __swapin_nr_pages(prev_offset, offset, hits, max_pages,
+ pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
+ max_pages,
atomic_read(&last_readahead_pages));
if (!hits)
- prev_offset = offset;
+ WRITE_ONCE(prev_offset, offset);
atomic_set(&last_readahead_pages, pages);
return pages;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 0047dcaf9369..057e6907bf7b 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -998,7 +998,7 @@ start_over:
goto nextsi;
}
if (size == SWAPFILE_CLUSTER) {
- if (!(si->flags & SWP_FILE))
+ if (si->flags & SWP_BLKDEV)
n_ret = swap_alloc_cluster(si, swp_entries);
} else
n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
@@ -2305,7 +2305,7 @@ sector_t map_swap_page(struct page *page, struct block_device **bdev)
{
swp_entry_t entry;
entry.val = page_private(page);
- return map_swap_entry(entry, bdev);
+ return map_swap_entry(entry, bdev) << (PAGE_SHIFT - 9);
}
/*
@@ -2738,10 +2738,10 @@ static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
else
type = si->type + 1;
+ ++(*pos);
for (; (si = swap_type_to_swap_info(type)); type++) {
if (!(si->flags & SWP_USED) || !si->swap_map)
continue;
- ++*pos;
return si;
}
@@ -2825,6 +2825,7 @@ late_initcall(max_swapfiles_check);
static struct swap_info_struct *alloc_swap_info(void)
{
struct swap_info_struct *p;
+ struct swap_info_struct *defer = NULL;
unsigned int type;
int i;
int size = sizeof(*p) + nr_node_ids * sizeof(struct plist_node);
@@ -2854,7 +2855,7 @@ static struct swap_info_struct *alloc_swap_info(void)
smp_wmb();
WRITE_ONCE(nr_swapfiles, nr_swapfiles + 1);
} else {
- kvfree(p);
+ defer = p;
p = swap_info[type];
/*
* Do not memset this entry: a racing procfs swap_next()
@@ -2867,6 +2868,7 @@ static struct swap_info_struct *alloc_swap_info(void)
plist_node_init(&p->avail_lists[i], 0);
p->flags = SWP_USED;
spin_unlock(&swap_lock);
+ kvfree(defer);
spin_lock_init(&p->lock);
spin_lock_init(&p->cont_lock);
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 7529d3fcc899..93a12cc107c9 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -271,7 +271,7 @@ retry:
*/
idx = linear_page_index(dst_vma, dst_addr);
mapping = dst_vma->vm_file->f_mapping;
- hash = hugetlb_fault_mutex_hash(h, mapping, idx, dst_addr);
+ hash = hugetlb_fault_mutex_hash(h, mapping, idx);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
err = -ENOMEM;
diff --git a/mm/util.c b/mm/util.c
index 6a24a1025d77..621afcea2bfa 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -453,6 +453,24 @@ void kvfree(const void *addr)
}
EXPORT_SYMBOL(kvfree);
+/**
+ * kvfree_sensitive - Free a data object containing sensitive information.
+ * @addr: address of the data object to be freed.
+ * @len: length of the data object.
+ *
+ * Use the special memzero_explicit() function to clear the content of a
+ * kvmalloc'ed object containing sensitive data to make sure that the
+ * compiler won't optimize out the data clearing.
+ */
+void kvfree_sensitive(const void *addr, size_t len)
+{
+ if (likely(!ZERO_OR_NULL_PTR(addr))) {
+ memzero_explicit((void *)addr, len);
+ kvfree(addr);
+ }
+}
+EXPORT_SYMBOL(kvfree_sensitive);
+
static inline void *__page_rmapping(struct page *page)
{
unsigned long mapping;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 958d6ba9ee2d..1817871b0239 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -31,6 +31,7 @@
#include <linux/compiler.h>
#include <linux/llist.h>
#include <linux/bitops.h>
+#include <linux/overflow.h>
#include <linux/uaccess.h>
#include <asm/tlbflush.h>
@@ -1509,7 +1510,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
addr))
return;
- area = find_vmap_area((unsigned long)addr)->vm;
+ area = find_vm_area(addr);
if (unlikely(!area)) {
WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
addr);
@@ -1668,7 +1669,6 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
array_size = (nr_pages * sizeof(struct page *));
- area->nr_pages = nr_pages;
/* Please note that the recursion is strictly bounded. */
if (array_size > PAGE_SIZE) {
pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask,
@@ -1676,13 +1676,16 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
} else {
pages = kmalloc_node(array_size, nested_gfp, node);
}
- area->pages = pages;
- if (!area->pages) {
+
+ if (!pages) {
remove_vm_area(area->addr);
kfree(area);
return NULL;
}
+ area->pages = pages;
+ area->nr_pages = nr_pages;
+
for (i = 0; i < area->nr_pages; i++) {
struct page *page;
@@ -2226,6 +2229,7 @@ finished:
* @vma: vma to cover
* @uaddr: target user address to start at
* @kaddr: virtual address of vmalloc kernel memory
+ * @pgoff: offset from @kaddr to start at
* @size: size of map area
*
* Returns: 0 for success, -Exxx on failure
@@ -2238,9 +2242,15 @@ finished:
* Similar to remap_pfn_range() (see mm/memory.c)
*/
int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
- void *kaddr, unsigned long size)
+ void *kaddr, unsigned long pgoff,
+ unsigned long size)
{
struct vm_struct *area;
+ unsigned long off;
+ unsigned long end_index;
+
+ if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
+ return -EINVAL;
size = PAGE_ALIGN(size);
@@ -2254,8 +2264,10 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
if (!(area->flags & VM_USERMAP))
return -EINVAL;
- if (kaddr + size > area->addr + get_vm_area_size(area))
+ if (check_add_overflow(size, off, &end_index) ||
+ end_index > get_vm_area_size(area))
return -EINVAL;
+ kaddr += off;
do {
struct page *page = vmalloc_to_page(kaddr);
@@ -2294,7 +2306,7 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long pgoff)
{
return remap_vmalloc_range_partial(vma, vma->vm_start,
- addr + (pgoff << PAGE_SHIFT),
+ addr, pgoff,
vma->vm_end - vma->vm_start);
}
EXPORT_SYMBOL(remap_vmalloc_range);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index bc2ecd43251a..b7d7f6d65bd5 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2708,6 +2708,14 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
unsigned long reclaimed;
unsigned long scanned;
+ /*
+ * This loop can become CPU-bound when target memcgs
+ * aren't eligible for reclaim - either because they
+ * don't have any reclaimable pages, or because their
+ * memory is explicitly protected. Avoid soft lockups.
+ */
+ cond_resched();
+
switch (mem_cgroup_protected(root, memcg)) {
case MEMCG_PROT_MIN:
/*
@@ -3101,8 +3109,9 @@ static bool allow_direct_reclaim(pg_data_t *pgdat)
/* kswapd must be awake if processes are being throttled */
if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
- pgdat->kswapd_classzone_idx = min(pgdat->kswapd_classzone_idx,
- (enum zone_type)ZONE_NORMAL);
+ if (READ_ONCE(pgdat->kswapd_classzone_idx) > ZONE_NORMAL)
+ WRITE_ONCE(pgdat->kswapd_classzone_idx, ZONE_NORMAL);
+
wake_up_interruptible(&pgdat->kswapd_wait);
}
@@ -3618,9 +3627,9 @@ out:
static enum zone_type kswapd_classzone_idx(pg_data_t *pgdat,
enum zone_type prev_classzone_idx)
{
- if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES)
- return prev_classzone_idx;
- return pgdat->kswapd_classzone_idx;
+ enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_classzone_idx);
+
+ return curr_idx == MAX_NR_ZONES ? prev_classzone_idx : curr_idx;
}
static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order,
@@ -3664,8 +3673,11 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_o
* the previous request that slept prematurely.
*/
if (remaining) {
- pgdat->kswapd_classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
- pgdat->kswapd_order = max(pgdat->kswapd_order, reclaim_order);
+ WRITE_ONCE(pgdat->kswapd_classzone_idx,
+ kswapd_classzone_idx(pgdat, classzone_idx));
+
+ if (READ_ONCE(pgdat->kswapd_order) < reclaim_order)
+ WRITE_ONCE(pgdat->kswapd_order, reclaim_order);
}
finish_wait(&pgdat->kswapd_wait, &wait);
@@ -3747,12 +3759,12 @@ static int kswapd(void *p)
tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
set_freezable();
- pgdat->kswapd_order = 0;
- pgdat->kswapd_classzone_idx = MAX_NR_ZONES;
+ WRITE_ONCE(pgdat->kswapd_order, 0);
+ WRITE_ONCE(pgdat->kswapd_classzone_idx, MAX_NR_ZONES);
for ( ; ; ) {
bool ret;
- alloc_order = reclaim_order = pgdat->kswapd_order;
+ alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);
classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
kswapd_try_sleep:
@@ -3760,10 +3772,10 @@ kswapd_try_sleep:
classzone_idx);
/* Read the new order and classzone_idx */
- alloc_order = reclaim_order = pgdat->kswapd_order;
+ alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);
classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
- pgdat->kswapd_order = 0;
- pgdat->kswapd_classzone_idx = MAX_NR_ZONES;
+ WRITE_ONCE(pgdat->kswapd_order, 0);
+ WRITE_ONCE(pgdat->kswapd_classzone_idx, MAX_NR_ZONES);
ret = try_to_freeze();
if (kthread_should_stop())
@@ -3808,20 +3820,23 @@ void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order,
enum zone_type classzone_idx)
{
pg_data_t *pgdat;
+ enum zone_type curr_idx;
if (!managed_zone(zone))
return;
if (!cpuset_zone_allowed(zone, gfp_flags))
return;
+
pgdat = zone->zone_pgdat;
+ curr_idx = READ_ONCE(pgdat->kswapd_classzone_idx);
+
+ if (curr_idx == MAX_NR_ZONES || curr_idx < classzone_idx)
+ WRITE_ONCE(pgdat->kswapd_classzone_idx, classzone_idx);
+
+ if (READ_ONCE(pgdat->kswapd_order) < order)
+ WRITE_ONCE(pgdat->kswapd_order, order);
- if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES)
- pgdat->kswapd_classzone_idx = classzone_idx;
- else
- pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx,
- classzone_idx);
- pgdat->kswapd_order = max(pgdat->kswapd_order, order);
if (!waitqueue_active(&pgdat->kswapd_wait))
return;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index ce81b0a7d018..21e07e71ea2d 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1384,6 +1384,9 @@ static void pagetypeinfo_showfree_print(struct seq_file *m,
list_for_each(curr, &area->free_list[mtype])
freecount++;
seq_printf(m, "%6lu ", freecount);
+ spin_unlock_irq(&zone->lock);
+ cond_resched();
+ spin_lock_irq(&zone->lock);
}
seq_putc(m, '\n');
}
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 85cc29c93d93..d52c005a060f 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -2285,11 +2285,13 @@ static unsigned long zs_can_compact(struct size_class *class)
return obj_wasted * class->pages_per_zspage;
}
-static void __zs_compact(struct zs_pool *pool, struct size_class *class)
+static unsigned long __zs_compact(struct zs_pool *pool,
+ struct size_class *class)
{
struct zs_compact_control cc;
struct zspage *src_zspage;
struct zspage *dst_zspage = NULL;
+ unsigned long pages_freed = 0;
spin_lock(&class->lock);
while ((src_zspage = isolate_zspage(class, true))) {
@@ -2319,7 +2321,7 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
putback_zspage(class, dst_zspage);
if (putback_zspage(class, src_zspage) == ZS_EMPTY) {
free_zspage(pool, class, src_zspage);
- pool->stats.pages_compacted += class->pages_per_zspage;
+ pages_freed += class->pages_per_zspage;
}
spin_unlock(&class->lock);
cond_resched();
@@ -2330,12 +2332,15 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
putback_zspage(class, src_zspage);
spin_unlock(&class->lock);
+
+ return pages_freed;
}
unsigned long zs_compact(struct zs_pool *pool)
{
int i;
struct size_class *class;
+ unsigned long pages_freed = 0;
for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
class = pool->size_class[i];
@@ -2343,10 +2348,11 @@ unsigned long zs_compact(struct zs_pool *pool)
continue;
if (class->index != i)
continue;
- __zs_compact(pool, class);
+ pages_freed += __zs_compact(pool, class);
}
+ atomic_long_add(pages_freed, &pool->stats.pages_compacted);
- return pool->stats.pages_compacted;
+ return pages_freed;
}
EXPORT_SYMBOL_GPL(zs_compact);
@@ -2363,13 +2369,12 @@ static unsigned long zs_shrinker_scan(struct shrinker *shrinker,
struct zs_pool *pool = container_of(shrinker, struct zs_pool,
shrinker);
- pages_freed = pool->stats.pages_compacted;
/*
* Compact classes and calculate compaction delta.
* Can run concurrently with a manually triggered
* (by user) compaction.
*/
- pages_freed = zs_compact(pool) - pages_freed;
+ pages_freed = zs_compact(pool);
return pages_freed ? pages_freed : SHRINK_STOP;
}
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 5e9950453955..512ada90657b 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -277,7 +277,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
return 0;
out_free_newdev:
- if (new_dev->reg_state == NETREG_UNINITIALIZED)
+ if (new_dev->reg_state == NETREG_UNINITIALIZED ||
+ new_dev->reg_state == NETREG_UNREGISTERED)
free_netdev(new_dev);
return err;
}
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index f868cf6fba79..9268f808afc0 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -377,6 +377,10 @@ static void p9_read_work(struct work_struct *work)
if (m->rreq->status == REQ_STATUS_SENT) {
list_del(&m->rreq->req_list);
p9_client_cb(m->client, m->rreq, REQ_STATUS_RCVD);
+ } else if (m->rreq->status == REQ_STATUS_FLSHD) {
+ /* Ignore replies associated with a cancelled request. */
+ p9_debug(P9_DEBUG_TRANS,
+ "Ignore replies associated with a cancelled request\n");
} else {
spin_unlock(&m->client->lock);
p9_debug(P9_DEBUG_ERROR,
@@ -718,11 +722,20 @@ static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req)
{
p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
+ spin_lock(&client->lock);
+ /* Ignore cancelled request if message has been received
+ * before lock.
+ */
+ if (req->status == REQ_STATUS_RCVD) {
+ spin_unlock(&client->lock);
+ return 0;
+ }
+
/* we haven't received a response for oldreq,
* remove it from the list.
*/
- spin_lock(&client->lock);
list_del(&req->req_list);
+ req->status = REQ_STATUS_FLSHD;
spin_unlock(&client->lock);
p9_req_put(req);
@@ -818,20 +831,28 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
return -ENOMEM;
ts->rd = fget(rfd);
+ if (!ts->rd)
+ goto out_free_ts;
+ if (!(ts->rd->f_mode & FMODE_READ))
+ goto out_put_rd;
ts->wr = fget(wfd);
- if (!ts->rd || !ts->wr) {
- if (ts->rd)
- fput(ts->rd);
- if (ts->wr)
- fput(ts->wr);
- kfree(ts);
- return -EIO;
- }
+ if (!ts->wr)
+ goto out_put_rd;
+ if (!(ts->wr->f_mode & FMODE_WRITE))
+ goto out_put_wr;
client->trans = ts;
client->status = Connected;
return 0;
+
+out_put_wr:
+ fput(ts->wr);
+out_put_rd:
+ fput(ts->rd);
+out_free_ts:
+ kfree(ts);
+ return -EIO;
}
static int p9_socket_open(struct p9_client *client, struct socket *csocket)
@@ -1017,7 +1038,7 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
csocket = NULL;
- if (addr == NULL)
+ if (!addr || !strlen(addr))
return -EINVAL;
if (strlen(addr) >= UNIX_PATH_MAX) {
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 2880ac470379..20ec8e7f9423 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1573,8 +1573,8 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
struct sk_buff *skb;
struct net_device *dev;
struct ddpehdr *ddp;
- int size;
- struct atalk_route *rt;
+ int size, hard_header_len;
+ struct atalk_route *rt, *rt_lo = NULL;
int err;
if (flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
@@ -1637,7 +1637,22 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
SOCK_DEBUG(sk, "SK %p: Size needed %d, device %s\n",
sk, size, dev->name);
- size += dev->hard_header_len;
+ hard_header_len = dev->hard_header_len;
+ /* Leave room for loopback hardware header if necessary */
+ if (usat->sat_addr.s_node == ATADDR_BCAST &&
+ (dev->flags & IFF_LOOPBACK || !(rt->flags & RTF_GATEWAY))) {
+ struct atalk_addr at_lo;
+
+ at_lo.s_node = 0;
+ at_lo.s_net = 0;
+
+ rt_lo = atrtr_find(&at_lo);
+
+ if (rt_lo && rt_lo->dev->hard_header_len > hard_header_len)
+ hard_header_len = rt_lo->dev->hard_header_len;
+ }
+
+ size += hard_header_len;
release_sock(sk);
skb = sock_alloc_send_skb(sk, size, (flags & MSG_DONTWAIT), &err);
lock_sock(sk);
@@ -1645,7 +1660,7 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
goto out;
skb_reserve(skb, ddp_dl->header_length);
- skb_reserve(skb, dev->hard_header_len);
+ skb_reserve(skb, hard_header_len);
skb->dev = dev;
SOCK_DEBUG(sk, "SK %p: Begin build.\n", sk);
@@ -1696,18 +1711,12 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
/* loop back */
skb_orphan(skb);
if (ddp->deh_dnode == ATADDR_BCAST) {
- struct atalk_addr at_lo;
-
- at_lo.s_node = 0;
- at_lo.s_net = 0;
-
- rt = atrtr_find(&at_lo);
- if (!rt) {
+ if (!rt_lo) {
kfree_skb(skb);
err = -ENETUNREACH;
goto out;
}
- dev = rt->dev;
+ dev = rt_lo->dev;
skb->dev = dev;
}
ddp_dl->request(ddp_dl, skb, dev->dev_addr);
diff --git a/net/atm/lec.c b/net/atm/lec.c
index ad4f829193f0..5a6186b80987 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1270,6 +1270,12 @@ static void lec_arp_clear_vccs(struct lec_arp_table *entry)
entry->vcc = NULL;
}
if (entry->recv_vcc) {
+ struct atm_vcc *vcc = entry->recv_vcc;
+ struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
+
+ kfree(vpriv);
+ vcc->user_back = NULL;
+
entry->recv_vcc->push = entry->old_recv_push;
vcc_release_async(entry->recv_vcc, -EPIPE);
entry->recv_vcc = NULL;
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 44ec492f3dc2..a45db78eaf00 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -638,8 +638,10 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
break;
case SO_BINDTODEVICE:
- if (optlen > IFNAMSIZ)
- optlen = IFNAMSIZ;
+ if (optlen > IFNAMSIZ - 1)
+ optlen = IFNAMSIZ - 1;
+
+ memset(devname, 0, sizeof(devname));
if (copy_from_user(devname, optval, optlen)) {
res = -EFAULT;
@@ -1188,7 +1190,10 @@ static int __must_check ax25_connect(struct socket *sock,
if (addr_len > sizeof(struct sockaddr_ax25) &&
fsa->fsa_ax25.sax25_ndigis != 0) {
/* Valid number of digipeaters ? */
- if (fsa->fsa_ax25.sax25_ndigis < 1 || fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS) {
+ if (fsa->fsa_ax25.sax25_ndigis < 1 ||
+ fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS ||
+ addr_len < sizeof(struct sockaddr_ax25) +
+ sizeof(ax25_address) * fsa->fsa_ax25.sax25_ndigis) {
err = -EINVAL;
goto out_release;
}
@@ -1508,7 +1513,10 @@ static int ax25_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)usax;
/* Valid number of digipeaters ? */
- if (usax->sax25_ndigis < 1 || usax->sax25_ndigis > AX25_MAX_DIGIS) {
+ if (usax->sax25_ndigis < 1 ||
+ usax->sax25_ndigis > AX25_MAX_DIGIS ||
+ addr_len < sizeof(struct sockaddr_ax25) +
+ sizeof(ax25_address) * usax->sax25_ndigis) {
err = -EINVAL;
goto out;
}
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index 5da183b2f4c9..af3da6cdfc79 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -132,20 +132,7 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
rtnl_lock();
ret = __ethtool_get_link_ksettings(hard_iface->net_dev, &link_settings);
rtnl_unlock();
-
- /* Virtual interface drivers such as tun / tap interfaces, VLAN, etc
- * tend to initialize the interface throughput with some value for the
- * sake of having a throughput number to export via ethtool. This
- * exported throughput leaves batman-adv to conclude the interface
- * throughput is genuine (reflecting reality), thus no measurements
- * are necessary.
- *
- * Based on the observation that those interface types also tend to set
- * the link auto-negotiation to 'off', batman-adv shall check this
- * setting to differentiate between genuine link throughput information
- * and placeholders installed by virtual interfaces.
- */
- if (ret == 0 && link_settings.base.autoneg == AUTONEG_ENABLE) {
+ if (ret == 0) {
/* link characteristics might change over time */
if (link_settings.base.duplex == DUPLEX_FULL)
hard_iface->bat_v.flags |= BATADV_FULL_DUPLEX;
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index bf9ea404abe7..04a620fd1301 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -716,6 +716,12 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
ntohl(ogm_packet->seqno), ogm_throughput, ogm_packet->ttl,
ogm_packet->version, ntohs(ogm_packet->tvlv_len));
+ if (batadv_is_my_mac(bat_priv, ogm_packet->orig)) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: originator packet from ourself\n");
+ return;
+ }
+
/* If the throughput metric is 0, immediately drop the packet. No need
* to create orig_node / neigh_node for an unusable route.
*/
@@ -735,7 +741,7 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
orig_node = batadv_v_ogm_orig_get(bat_priv, ogm_packet->orig);
if (!orig_node)
- return;
+ goto out;
neigh_node = batadv_neigh_node_get_or_create(orig_node, if_incoming,
ethhdr->h_source);
@@ -843,11 +849,6 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
goto free_skb;
- ogm_packet = (struct batadv_ogm2_packet *)skb->data;
-
- if (batadv_is_my_mac(bat_priv, ogm_packet->orig))
- goto free_skb;
-
batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX);
batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES,
skb->len + ETH_HLEN);
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 85faf25c2912..1401031f4bb4 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -37,6 +37,7 @@
#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
+#include <linux/preempt.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/seq_file.h>
@@ -96,11 +97,12 @@ static inline u32 batadv_choose_claim(const void *data, u32 size)
*/
static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
{
- const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
+ const struct batadv_bla_backbone_gw *gw;
u32 hash = 0;
- hash = jhash(&claim->addr, sizeof(claim->addr), hash);
- hash = jhash(&claim->vid, sizeof(claim->vid), hash);
+ gw = (struct batadv_bla_backbone_gw *)data;
+ hash = jhash(&gw->orig, sizeof(gw->orig), hash);
+ hash = jhash(&gw->vid, sizeof(gw->vid), hash);
return hash % size;
}
@@ -450,7 +452,10 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
skb->len + ETH_HLEN);
- netif_rx(skb);
+ if (in_interrupt())
+ netif_rx(skb);
+ else
+ netif_rx_ni(skb);
out:
if (primary_if)
batadv_hardif_put(primary_if);
@@ -1589,13 +1594,16 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
}
/**
- * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup.
+ * batadv_bla_check_duplist() - Check if a frame is in the broadcast dup.
* @bat_priv: the bat priv with all the soft interface information
- * @skb: contains the bcast_packet to be checked
+ * @skb: contains the multicast packet to be checked
+ * @payload_ptr: pointer to position inside the head buffer of the skb
+ * marking the start of the data to be CRC'ed
+ * @orig: originator mac address, NULL if unknown
*
- * check if it is on our broadcast list. Another gateway might
- * have sent the same packet because it is connected to the same backbone,
- * so we have to remove this duplicate.
+ * Check if it is on our broadcast list. Another gateway might have sent the
+ * same packet because it is connected to the same backbone, so we have to
+ * remove this duplicate.
*
* This is performed by checking the CRC, which will tell us
* with a good chance that it is the same packet. If it is furthermore
@@ -1604,19 +1612,17 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
*
* Return: true if a packet is in the duplicate list, false otherwise.
*/
-bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
- struct sk_buff *skb)
+static bool batadv_bla_check_duplist(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, u8 *payload_ptr,
+ const u8 *orig)
{
- int i, curr;
- __be32 crc;
- struct batadv_bcast_packet *bcast_packet;
struct batadv_bcast_duplist_entry *entry;
bool ret = false;
-
- bcast_packet = (struct batadv_bcast_packet *)skb->data;
+ int i, curr;
+ __be32 crc;
/* calculate the crc ... */
- crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1));
+ crc = batadv_skb_crc32(skb, payload_ptr);
spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);
@@ -1635,8 +1641,21 @@ bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
if (entry->crc != crc)
continue;
- if (batadv_compare_eth(entry->orig, bcast_packet->orig))
- continue;
+ /* are the originators both known and not anonymous? */
+ if (orig && !is_zero_ether_addr(orig) &&
+ !is_zero_ether_addr(entry->orig)) {
+ /* If known, check if the new frame came from
+ * the same originator:
+ * We are safe to take identical frames from the
+ * same orig, if known, as multiplications in
+ * the mesh are detected via the (orig, seqno) pair.
+ * So we can be a bit more liberal here and allow
+ * identical frames from the same orig which the source
+ * host might have sent multiple times on purpose.
+ */
+ if (batadv_compare_eth(entry->orig, orig))
+ continue;
+ }
/* this entry seems to match: same crc, not too old,
* and from another gw. therefore return true to forbid it.
@@ -1652,7 +1671,14 @@ bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
entry = &bat_priv->bla.bcast_duplist[curr];
entry->crc = crc;
entry->entrytime = jiffies;
- ether_addr_copy(entry->orig, bcast_packet->orig);
+
+ /* known originator */
+ if (orig)
+ ether_addr_copy(entry->orig, orig);
+ /* anonymous originator */
+ else
+ eth_zero_addr(entry->orig);
+
bat_priv->bla.bcast_duplist_curr = curr;
out:
@@ -1662,6 +1688,48 @@ out:
}
/**
+ * batadv_bla_check_ucast_duplist() - Check if a frame is in the broadcast dup.
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: contains the multicast packet to be checked, decapsulated from a
+ * unicast_packet
+ *
+ * Check if it is on our broadcast list. Another gateway might have sent the
+ * same packet because it is connected to the same backbone, so we have to
+ * remove this duplicate.
+ *
+ * Return: true if a packet is in the duplicate list, false otherwise.
+ */
+static bool batadv_bla_check_ucast_duplist(struct batadv_priv *bat_priv,
+ struct sk_buff *skb)
+{
+ return batadv_bla_check_duplist(bat_priv, skb, (u8 *)skb->data, NULL);
+}
+
+/**
+ * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup.
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: contains the bcast_packet to be checked
+ *
+ * Check if it is on our broadcast list. Another gateway might have sent the
+ * same packet because it is connected to the same backbone, so we have to
+ * remove this duplicate.
+ *
+ * Return: true if a packet is in the duplicate list, false otherwise.
+ */
+bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
+ struct sk_buff *skb)
+{
+ struct batadv_bcast_packet *bcast_packet;
+ u8 *payload_ptr;
+
+ bcast_packet = (struct batadv_bcast_packet *)skb->data;
+ payload_ptr = (u8 *)(bcast_packet + 1);
+
+ return batadv_bla_check_duplist(bat_priv, skb, payload_ptr,
+ bcast_packet->orig);
+}
+
+/**
* batadv_bla_is_backbone_gw_orig() - Check if the originator is a gateway for
* the VLAN identified by vid.
* @bat_priv: the bat priv with all the soft interface information
@@ -1822,7 +1890,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
* @bat_priv: the bat priv with all the soft interface information
* @skb: the frame to be checked
* @vid: the VLAN ID of the frame
- * @is_bcast: the packet came in a broadcast packet type.
+ * @packet_type: the batman packet type this frame came in
*
* batadv_bla_rx avoidance checks if:
* * we have to race for a claim
@@ -1834,7 +1902,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
* further process the skb.
*/
bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
- unsigned short vid, bool is_bcast)
+ unsigned short vid, int packet_type)
{
struct batadv_bla_backbone_gw *backbone_gw;
struct ethhdr *ethhdr;
@@ -1856,9 +1924,32 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
goto handled;
if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
- /* don't allow broadcasts while requests are in flight */
- if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
- goto handled;
+ /* don't allow multicast packets while requests are in flight */
+ if (is_multicast_ether_addr(ethhdr->h_dest))
+ /* Both broadcast flooding or multicast-via-unicasts
+ * delivery might send to multiple backbone gateways
+ * sharing the same LAN and therefore need to coordinate
+ * which backbone gateway forwards into the LAN,
+ * by claiming the payload source address.
+ *
+ * Broadcast flooding and multicast-via-unicasts
+ * delivery use the following two batman packet types.
+ * Note: explicitly exclude BATADV_UNICAST_4ADDR,
+ * as the DHCP gateway feature will send explicitly
+ * to only one BLA gateway, so the claiming process
+ * should be avoided there.
+ */
+ if (packet_type == BATADV_BCAST ||
+ packet_type == BATADV_UNICAST)
+ goto handled;
+
+ /* potential duplicates from foreign BLA backbone gateways via
+ * multicast-in-unicast packets
+ */
+ if (is_multicast_ether_addr(ethhdr->h_dest) &&
+ packet_type == BATADV_UNICAST &&
+ batadv_bla_check_ucast_duplist(bat_priv, skb))
+ goto handled;
ether_addr_copy(search_claim.addr, ethhdr->h_source);
search_claim.vid = vid;
@@ -1893,13 +1984,14 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
goto allow;
}
- /* if it is a broadcast ... */
- if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
+ /* if it is a multicast ... */
+ if (is_multicast_ether_addr(ethhdr->h_dest) &&
+ (packet_type == BATADV_BCAST || packet_type == BATADV_UNICAST)) {
/* ... drop it. the responsible gateway is in charge.
*
- * We need to check is_bcast because with the gateway
+ * We need to check packet type because with the gateway
* feature, broadcasts (like DHCP requests) may be sent
- * using a unicast packet type.
+ * using a unicast 4 address packet type. See comment above.
*/
goto handled;
} else {
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index 71f95a3e4d3f..af28fdb01467 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -48,7 +48,7 @@ static inline bool batadv_bla_is_loopdetect_mac(const uint8_t *mac)
#ifdef CONFIG_BATMAN_ADV_BLA
bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
- unsigned short vid, bool is_bcast);
+ unsigned short vid, int packet_type);
bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
unsigned short vid);
bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
@@ -79,7 +79,7 @@ bool batadv_bla_check_claim(struct batadv_priv *bat_priv, u8 *addr,
static inline bool batadv_bla_rx(struct batadv_priv *bat_priv,
struct sk_buff *skb, unsigned short vid,
- bool is_bcast)
+ int packet_type)
{
return false;
}
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 140c61a3f1ec..0c59fefc1371 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -714,8 +714,10 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
chaddr_offset = *header_len + BATADV_DHCP_CHADDR_OFFSET;
/* store the client address if the message is going to a client */
- if (ret == BATADV_DHCP_TO_CLIENT &&
- pskb_may_pull(skb, chaddr_offset + ETH_ALEN)) {
+ if (ret == BATADV_DHCP_TO_CLIENT) {
+ if (!pskb_may_pull(skb, chaddr_offset + ETH_ALEN))
+ return BATADV_DHCP_NO;
+
/* check if the DHCP packet carries an Ethernet DHCP */
p = skb->data + *header_len + BATADV_DHCP_HTYPE_OFFSET;
if (*p != BATADV_DHCP_HTYPE_ETHERNET)
diff --git a/net/batman-adv/log.c b/net/batman-adv/log.c
index 853773e45f79..837f67c9fad3 100644
--- a/net/batman-adv/log.c
+++ b/net/batman-adv/log.c
@@ -205,6 +205,7 @@ static const struct file_operations batadv_log_fops = {
.read = batadv_log_read,
.poll = batadv_log_poll,
.llseek = no_llseek,
+ .owner = THIS_MODULE,
};
/**
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index 34caf129a9bf..7f1be5a28757 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -1021,15 +1021,8 @@ static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
*/
static u8 batadv_nc_random_weight_tq(u8 tq)
{
- u8 rand_val, rand_tq;
-
- get_random_bytes(&rand_val, sizeof(rand_val));
-
/* randomize the estimated packet loss (max TQ - estimated TQ) */
- rand_tq = rand_val * (BATADV_TQ_MAX_VALUE - tq);
-
- /* normalize the randomized packet loss */
- rand_tq /= BATADV_TQ_MAX_VALUE;
+ u8 rand_tq = prandom_u32_max(BATADV_TQ_MAX_VALUE + 1 - tq);
/* convert to (randomized) estimated tq again */
return BATADV_TQ_MAX_VALUE - rand_tq;
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index cc3ed93a6d51..98af41e3810d 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -838,6 +838,10 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
vid = batadv_get_vid(skb, hdr_len);
ethhdr = (struct ethhdr *)(skb->data + hdr_len);
+ /* do not reroute multicast frames in a unicast header */
+ if (is_multicast_ether_addr(ethhdr->h_dest))
+ return true;
+
/* check if the destination client was served by this node and it is now
* roaming. In this case, it means that the node has got a ROAM_ADV
* message and that it knows the new destination in the mesh to re-route
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index a2976adeeedc..6ff78080ec7f 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -426,10 +426,10 @@ void batadv_interface_rx(struct net_device *soft_iface,
struct vlan_ethhdr *vhdr;
struct ethhdr *ethhdr;
unsigned short vid;
- bool is_bcast;
+ int packet_type;
batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data;
- is_bcast = (batadv_bcast_packet->packet_type == BATADV_BCAST);
+ packet_type = batadv_bcast_packet->packet_type;
skb_pull_rcsum(skb, hdr_size);
skb_reset_mac_header(skb);
@@ -472,7 +472,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
/* Let the bridge loop avoidance check the packet. If will
* not handle it, we can safely push it up.
*/
- if (batadv_bla_rx(bat_priv, skb, vid, is_bcast))
+ if (batadv_bla_rx(bat_priv, skb, vid, packet_type))
goto out;
if (orig_node)
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index 09427fc6494a..976b038e53bf 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -1093,7 +1093,7 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
ret = batadv_parse_throughput(net_dev, buff, "throughput_override",
&tp_override);
if (!ret)
- return count;
+ goto out;
old_tp_override = atomic_read(&hard_iface->bat_v.throughput_override);
if (old_tp_override == tp_override)
@@ -1126,6 +1126,7 @@ static ssize_t batadv_show_throughput_override(struct kobject *kobj,
tp_override = atomic_read(&hard_iface->bat_v.throughput_override);
+ batadv_hardif_put(hard_iface);
return sprintf(buff, "%u.%u MBit\n", tp_override / 10,
tp_override % 10);
}
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 9fa5389ea244..cc350ab4de0a 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -904,6 +904,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
tt_vlan->vid = htons(vlan->vid);
tt_vlan->crc = htonl(vlan->tt.crc);
+ tt_vlan->reserved = 0;
tt_vlan++;
}
@@ -987,6 +988,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
tt_vlan->vid = htons(vlan->vid);
tt_vlan->crc = htonl(vlan->tt.crc);
+ tt_vlan->reserved = 0;
tt_vlan++;
}
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 357475cceec6..9a75f9b00b51 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -57,6 +57,7 @@ static bool enable_6lowpan;
/* We are listening incoming connections via this channel
*/
static struct l2cap_chan *listen_chan;
+static DEFINE_MUTEX(set_lock);
struct lowpan_peer {
struct list_head list;
@@ -1082,12 +1083,14 @@ static void do_enable_set(struct work_struct *work)
enable_6lowpan = set_enable->flag;
+ mutex_lock(&set_lock);
if (listen_chan) {
l2cap_chan_close(listen_chan, 0);
l2cap_chan_put(listen_chan);
}
listen_chan = bt_6lowpan_listen();
+ mutex_unlock(&set_lock);
kfree(set_enable);
}
@@ -1139,11 +1142,13 @@ static ssize_t lowpan_control_write(struct file *fp,
if (ret == -EINVAL)
return ret;
+ mutex_lock(&set_lock);
if (listen_chan) {
l2cap_chan_close(listen_chan, 0);
l2cap_chan_put(listen_chan);
listen_chan = NULL;
}
+ mutex_unlock(&set_lock);
if (conn) {
struct lowpan_peer *peer;
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
index 51c2cf2d8923..e09ea78356c3 100644
--- a/net/bluetooth/a2mp.c
+++ b/net/bluetooth/a2mp.c
@@ -233,6 +233,9 @@ static int a2mp_discover_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
struct a2mp_info_req req;
found = true;
+
+ memset(&req, 0, sizeof(req));
+
req.id = cl->id;
a2mp_send(mgr, A2MP_GETINFO_REQ, __next_ident(mgr),
sizeof(req), &req);
@@ -312,6 +315,8 @@ static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
if (!hdev || hdev->dev_type != HCI_AMP) {
struct a2mp_info_rsp rsp;
+ memset(&rsp, 0, sizeof(rsp));
+
rsp.id = req->id;
rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
@@ -355,6 +360,8 @@ static int a2mp_getinfo_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
if (!ctrl)
return -ENOMEM;
+ memset(&req, 0, sizeof(req));
+
req.id = rsp->id;
a2mp_send(mgr, A2MP_GETAMPASSOC_REQ, __next_ident(mgr), sizeof(req),
&req);
@@ -381,6 +388,8 @@ static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb,
hdev = hci_dev_get(req->id);
if (!hdev || hdev->amp_type == AMP_TYPE_BREDR || tmp) {
struct a2mp_amp_assoc_rsp rsp;
+
+ memset(&rsp, 0, sizeof(rsp));
rsp.id = req->id;
if (tmp) {
@@ -471,7 +480,6 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
struct a2mp_cmd *hdr)
{
struct a2mp_physlink_req *req = (void *) skb->data;
-
struct a2mp_physlink_rsp rsp;
struct hci_dev *hdev;
struct hci_conn *hcon;
@@ -482,6 +490,8 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id);
+ memset(&rsp, 0, sizeof(rsp));
+
rsp.local_id = req->remote_id;
rsp.remote_id = req->local_id;
@@ -509,6 +519,7 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
assoc = kmemdup(req->amp_assoc, assoc_len, GFP_KERNEL);
if (!assoc) {
amp_ctrl_put(ctrl);
+ hci_dev_put(hdev);
return -ENOMEM;
}
@@ -560,6 +571,8 @@ static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
BT_DBG("local_id %d remote_id %d", req->local_id, req->remote_id);
+ memset(&rsp, 0, sizeof(rsp));
+
rsp.local_id = req->remote_id;
rsp.remote_id = req->local_id;
rsp.status = A2MP_STATUS_SUCCESS;
@@ -682,6 +695,8 @@ static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
if (err) {
struct a2mp_cmd_rej rej;
+ memset(&rej, 0, sizeof(rej));
+
rej.reason = cpu_to_le16(0);
hdr = (void *) skb->data;
@@ -905,6 +920,8 @@ void a2mp_send_getinfo_rsp(struct hci_dev *hdev)
BT_DBG("%s mgr %p", hdev->name, mgr);
+ memset(&rsp, 0, sizeof(rsp));
+
rsp.id = hdev->id;
rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
@@ -1002,6 +1019,8 @@ void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status)
if (!mgr)
return;
+ memset(&rsp, 0, sizeof(rsp));
+
hs_hcon = hci_conn_hash_lookup_state(hdev, AMP_LINK, BT_CONNECT);
if (!hs_hcon) {
rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
@@ -1034,6 +1053,8 @@ void a2mp_discover_amp(struct l2cap_chan *chan)
mgr->bredr_chan = chan;
+ memset(&req, 0, sizeof(req));
+
req.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
req.ext_feat = 0;
a2mp_send(mgr, A2MP_DISCOVER_REQ, 1, sizeof(req), &req);
diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
index 78bec8df8525..72ef967c5663 100644
--- a/net/bluetooth/amp.c
+++ b/net/bluetooth/amp.c
@@ -305,6 +305,9 @@ void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
struct hci_request req;
int err;
+ if (!mgr)
+ return;
+
cp.phy_handle = hcon->handle;
cp.len_so_far = cpu_to_le16(0);
cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 7f26a5a19ff6..9873684a9d8f 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -391,6 +391,11 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
if (!(session->flags & BIT(CMTP_LOOPBACK))) {
err = cmtp_attach_device(session);
if (err < 0) {
+ /* Caller will call fput in case of failure, and so
+ * will cmtp_session kthread.
+ */
+ get_file(session->sock->file);
+
atomic_inc(&session->terminate);
wake_up_interruptible(sk_sleep(session->sock->sk));
up_write(&cmtp_session_sem);
diff --git a/net/bluetooth/ecdh_helper.h b/net/bluetooth/ecdh_helper.h
index a6f8d03d4aaf..830723971cf8 100644
--- a/net/bluetooth/ecdh_helper.h
+++ b/net/bluetooth/ecdh_helper.h
@@ -25,6 +25,6 @@
int compute_ecdh_secret(struct crypto_kpp *tfm, const u8 pair_public_key[64],
u8 secret[32]);
-int set_ecdh_privkey(struct crypto_kpp *tfm, const u8 *private_key);
+int set_ecdh_privkey(struct crypto_kpp *tfm, const u8 private_key[32]);
int generate_ecdh_public_key(struct crypto_kpp *tfm, u8 public_key[64]);
int generate_ecdh_keys(struct crypto_kpp *tfm, u8 public_key[64]);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index db735d0d931e..1b50e4ef2c68 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -1282,6 +1282,23 @@ int hci_conn_check_link_mode(struct hci_conn *conn)
return 0;
}
+ /* AES encryption is required for Level 4:
+ *
+ * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
+ * page 1319:
+ *
+ * 128-bit equivalent strength for link and encryption keys
+ * required using FIPS approved algorithms (E0 not allowed,
+ * SAFER+ not allowed, and P-192 not allowed; encryption key
+ * not shortened)
+ */
+ if (conn->sec_level == BT_SECURITY_FIPS &&
+ !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
+ bt_dev_err(conn->hdev,
+ "Invalid security: Missing AES-CCM usage");
+ return 0;
+ }
+
if (hci_conn_ssp_enabled(conn) &&
!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
return 0;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index e03faca84919..04d6f50798c9 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1316,8 +1316,10 @@ int hci_inquiry(void __user *arg)
* cleared). If it is interrupted by a signal, return -EINTR.
*/
if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
- TASK_INTERRUPTIBLE))
- return -EINTR;
+ TASK_INTERRUPTIBLE)) {
+ err = -EINTR;
+ goto done;
+ }
}
/* for unlimited number of responses we will use buffer with
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 3e7badb3ac2d..2e2cad58b6cc 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -41,12 +41,27 @@
/* Handle HCI Event packets */
-static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
+ u8 *new_status)
{
__u8 status = *((__u8 *) skb->data);
BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ /* It is possible that we receive Inquiry Complete event right
+ * before we receive Inquiry Cancel Command Complete event, in
+ * which case the latter event should have status of Command
+ * Disallowed (0x0c). This should not be treated as error, since
+ * we actually achieve what Inquiry Cancel wants to achieve,
+ * which is to end the last Inquiry session.
+ */
+ if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
+ bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
+ status = 0x00;
+ }
+
+ *new_status = status;
+
if (status)
return;
@@ -1229,6 +1244,9 @@ static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
{
struct discovery_state *d = &hdev->discovery;
+ if (len > HCI_MAX_AD_LENGTH)
+ return;
+
bacpy(&d->last_adv_addr, bdaddr);
d->last_adv_addr_type = bdaddr_type;
d->last_adv_rssi = rssi;
@@ -2357,7 +2375,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
- if (!num_rsp)
+ if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
return;
if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
@@ -2738,7 +2756,7 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
&cp);
} else {
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
- hci_encrypt_cfm(conn, ev->status, 0x00);
+ hci_encrypt_cfm(conn, ev->status);
}
}
@@ -2823,22 +2841,7 @@ static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
conn->enc_key_size = rp->key_size;
}
- if (conn->state == BT_CONFIG) {
- conn->state = BT_CONNECTED;
- hci_connect_cfm(conn, 0);
- hci_conn_drop(conn);
- } else {
- u8 encrypt;
-
- if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
- encrypt = 0x00;
- else if (test_bit(HCI_CONN_AES_CCM, &conn->flags))
- encrypt = 0x02;
- else
- encrypt = 0x01;
-
- hci_encrypt_cfm(conn, 0, encrypt);
- }
+ hci_encrypt_cfm(conn, 0);
unlock:
hci_dev_unlock(hdev);
@@ -2887,27 +2890,23 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
+ /* Check link security requirements are met */
+ if (!hci_conn_check_link_mode(conn))
+ ev->status = HCI_ERROR_AUTH_FAILURE;
+
if (ev->status && conn->state == BT_CONNECTED) {
if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
+ /* Notify upper layers so they can cleanup before
+ * disconnecting.
+ */
+ hci_encrypt_cfm(conn, ev->status);
hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
hci_conn_drop(conn);
goto unlock;
}
- /* In Secure Connections Only mode, do not allow any connections
- * that are not encrypted with AES-CCM using a P-256 authenticated
- * combination key.
- */
- if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
- (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
- conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
- hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
- hci_conn_drop(conn);
- goto unlock;
- }
-
/* Try reading the encryption key size for encrypted ACL links */
if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
struct hci_cp_read_enc_key_size cp;
@@ -2937,14 +2936,7 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
}
notify:
- if (conn->state == BT_CONFIG) {
- if (!ev->status)
- conn->state = BT_CONNECTED;
-
- hci_connect_cfm(conn, ev->status);
- hci_conn_drop(conn);
- } else
- hci_encrypt_cfm(conn, ev->status, ev->encrypt);
+ hci_encrypt_cfm(conn, ev->status);
unlock:
hci_dev_unlock(hdev);
@@ -3036,7 +3028,7 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
switch (*opcode) {
case HCI_OP_INQUIRY_CANCEL:
- hci_cc_inquiry_cancel(hdev, skb);
+ hci_cc_inquiry_cancel(hdev, skb, status);
break;
case HCI_OP_PERIODIC_INQ:
@@ -3945,6 +3937,9 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
struct inquiry_info_with_rssi_and_pscan_mode *info;
info = (void *) (skb->data + 1);
+ if (skb->len < num_rsp * sizeof(*info) + 1)
+ goto unlock;
+
for (; num_rsp; num_rsp--, info++) {
u32 flags;
@@ -3966,6 +3961,9 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
} else {
struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
+ if (skb->len < num_rsp * sizeof(*info) + 1)
+ goto unlock;
+
for (; num_rsp; num_rsp--, info++) {
u32 flags;
@@ -3986,6 +3984,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
}
}
+unlock:
hci_dev_unlock(hdev);
}
@@ -4097,6 +4096,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
case 0x11: /* Unsupported Feature or Parameter Value */
case 0x1c: /* SCO interval rejected */
case 0x1a: /* Unsupported Remote Feature */
+ case 0x1e: /* Invalid LMP Parameters */
case 0x1f: /* Unspecified error */
case 0x20: /* Unsupported LMP Parameter value */
if (conn->out) {
@@ -4147,7 +4147,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
- if (!num_rsp)
+ if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
return;
if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
@@ -4672,6 +4672,11 @@ static void hci_phy_link_complete_evt(struct hci_dev *hdev,
return;
}
+ if (!hcon->amp_mgr) {
+ hci_dev_unlock(hdev);
+ return;
+ }
+
if (ev->status) {
hci_conn_del(hcon);
hci_dev_unlock(hdev);
@@ -4716,6 +4721,7 @@ static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
return;
hchan->handle = le16_to_cpu(ev->handle);
+ hchan->amp = true;
BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
@@ -4748,7 +4754,7 @@ static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
hci_dev_lock(hdev);
hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
- if (!hchan)
+ if (!hchan || !hchan->amp)
goto unlock;
amp_destroy_logical_link(hchan, ev->reason);
@@ -5115,7 +5121,8 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
u8 bdaddr_type, bdaddr_t *direct_addr,
- u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
+ u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
+ bool ext_adv)
{
struct discovery_state *d = &hdev->discovery;
struct smp_irk *irk;
@@ -5137,6 +5144,11 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
return;
}
+ if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
+ bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
+ return;
+ }
+
/* Find the end of the data in case the report contains padded zero
* bytes at the end causing an invalid length value.
*
@@ -5196,7 +5208,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
*/
conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
direct_addr);
- if (conn && type == LE_ADV_IND) {
+ if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
/* Store report for later inclusion by
* mgmt_device_connected
*/
@@ -5250,7 +5262,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
* event or send an immediate device found event if the data
* should not be stored for later.
*/
- if (!has_pending_adv_report(hdev)) {
+ if (!ext_adv && !has_pending_adv_report(hdev)) {
/* If the report will trigger a SCAN_REQ store it for
* later merging.
*/
@@ -5285,7 +5297,8 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
/* If the new report will trigger a SCAN_REQ store it for
* later merging.
*/
- if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
+ if (!ext_adv && (type == LE_ADV_IND ||
+ type == LE_ADV_SCAN_IND)) {
store_pending_adv_report(hdev, bdaddr, bdaddr_type,
rssi, flags, data, len);
return;
@@ -5325,7 +5338,7 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
rssi = ev->data[ev->length];
process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
ev->bdaddr_type, NULL, 0, rssi,
- ev->data, ev->length);
+ ev->data, ev->length, false);
} else {
bt_dev_err(hdev, "Dropping invalid advertising data");
}
@@ -5399,7 +5412,8 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
if (legacy_evt_type != LE_ADV_INVALID) {
process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
ev->bdaddr_type, NULL, 0, ev->rssi,
- ev->data, ev->length);
+ ev->data, ev->length,
+ !(evt_type & LE_EXT_ADV_LEGACY_PDU));
}
ptr += sizeof(*ev) + ev->length + 1;
@@ -5588,19 +5602,18 @@ static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
struct sk_buff *skb)
{
u8 num_reports = skb->data[0];
- void *ptr = &skb->data[1];
+ struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
- hci_dev_lock(hdev);
+ if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
+ return;
- while (num_reports--) {
- struct hci_ev_le_direct_adv_info *ev = ptr;
+ hci_dev_lock(hdev);
+ for (; num_reports; num_reports--, ev++)
process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
ev->bdaddr_type, &ev->direct_addr,
- ev->direct_addr_type, ev->rssi, NULL, 0);
-
- ptr += sizeof(*ev);
- }
+ ev->direct_addr_type, ev->rssi, NULL, 0,
+ false);
hci_dev_unlock(hdev);
}
@@ -5718,6 +5731,11 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
u8 status = 0, event = hdr->evt, req_evt = 0;
u16 opcode = HCI_OP_NOP;
+ if (!event) {
+ bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
+ goto done;
+ }
+
if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
opcode = __le16_to_cpu(cmd_hdr->opcode);
@@ -5929,6 +5947,7 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
req_complete_skb(hdev, status, opcode, orig_skb);
}
+done:
kfree_skb(orig_skb);
kfree_skb(skb);
hdev->stat.evt_rx++;
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index a8ddd211e94c..76bd6b122724 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -271,12 +271,16 @@ int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
{
int ret;
- if (!test_bit(HCI_UP, &hdev->flags))
- return -ENETDOWN;
-
/* Serialize all requests */
hci_req_sync_lock(hdev);
- ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
+ /* check the state after obtaing the lock to protect the HCI_UP
+ * against any races from hci_dev_do_close when the controller
+ * gets removed.
+ */
+ if (test_bit(HCI_UP, &hdev->flags))
+ ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
+ else
+ ret = -ENETDOWN;
hci_req_sync_unlock(hdev);
return ret;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 0d84d1f820d4..c0d64b4144d4 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -414,6 +414,9 @@ static void l2cap_chan_timeout(struct work_struct *work)
BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
mutex_lock(&conn->chan_lock);
+ /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
+ * this work. No need to call l2cap_chan_hold(chan) here again.
+ */
l2cap_chan_lock(chan);
if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
@@ -426,12 +429,12 @@ static void l2cap_chan_timeout(struct work_struct *work)
l2cap_chan_close(chan, reason);
- l2cap_chan_unlock(chan);
-
chan->ops->close(chan);
- mutex_unlock(&conn->chan_lock);
+ l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
+
+ mutex_unlock(&conn->chan_lock);
}
struct l2cap_chan *l2cap_chan_create(void)
@@ -442,6 +445,8 @@ struct l2cap_chan *l2cap_chan_create(void)
if (!chan)
return NULL;
+ skb_queue_head_init(&chan->tx_q);
+ skb_queue_head_init(&chan->srej_q);
mutex_init(&chan->lock);
/* Set default lock nesting level */
@@ -507,7 +512,9 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan)
chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
+
chan->conf_state = 0;
+ set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
}
@@ -1725,9 +1732,9 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
l2cap_chan_del(chan, err);
- l2cap_chan_unlock(chan);
-
chan->ops->close(chan);
+
+ l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
}
@@ -4114,7 +4121,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
return 0;
}
- if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
+ if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
+ chan->state != BT_CONNECTED) {
cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
chan->dcid);
goto unlock;
@@ -4337,6 +4345,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
return 0;
}
+ l2cap_chan_hold(chan);
l2cap_chan_lock(chan);
rsp.dcid = cpu_to_le16(chan->scid);
@@ -4345,12 +4354,11 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
chan->ops->set_shutdown(chan);
- l2cap_chan_hold(chan);
l2cap_chan_del(chan, ECONNRESET);
- l2cap_chan_unlock(chan);
-
chan->ops->close(chan);
+
+ l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
mutex_unlock(&conn->chan_lock);
@@ -4382,20 +4390,21 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
return 0;
}
+ l2cap_chan_hold(chan);
l2cap_chan_lock(chan);
if (chan->state != BT_DISCONN) {
l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
mutex_unlock(&conn->chan_lock);
return 0;
}
- l2cap_chan_hold(chan);
l2cap_chan_del(chan, 0);
- l2cap_chan_unlock(chan);
-
chan->ops->close(chan);
+
+ l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
mutex_unlock(&conn->chan_lock);
@@ -6678,9 +6687,10 @@ static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
goto drop;
}
- if ((chan->mode == L2CAP_MODE_ERTM ||
- chan->mode == L2CAP_MODE_STREAMING) && sk_filter(chan->data, skb))
- goto drop;
+ if (chan->ops->filter) {
+ if (chan->ops->filter(chan, skb))
+ goto drop;
+ }
if (!control->sframe) {
int err;
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index a3a2cd55e23a..967a9bb14415 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -179,9 +179,17 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct sockaddr_l2 la;
int len, err = 0;
+ bool zapped;
BT_DBG("sk %p", sk);
+ lock_sock(sk);
+ zapped = sock_flag(sk, SOCK_ZAPPED);
+ release_sock(sk);
+
+ if (zapped)
+ return -EINVAL;
+
if (!addr || alen < offsetofend(struct sockaddr, sa_family) ||
addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
@@ -1039,7 +1047,7 @@ done:
}
/* Kill socket (only if zapped and orphan)
- * Must be called on unlocked socket.
+ * Must be called on unlocked socket, with l2cap channel lock.
*/
static void l2cap_sock_kill(struct sock *sk)
{
@@ -1190,6 +1198,7 @@ static int l2cap_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
int err;
+ struct l2cap_chan *chan;
BT_DBG("sock %p, sk %p", sock, sk);
@@ -1199,9 +1208,17 @@ static int l2cap_sock_release(struct socket *sock)
bt_sock_unlink(&l2cap_sk_list, sk);
err = l2cap_sock_shutdown(sock, 2);
+ chan = l2cap_pi(sk)->chan;
+
+ l2cap_chan_hold(chan);
+ l2cap_chan_lock(chan);
sock_orphan(sk);
l2cap_sock_kill(sk);
+
+ l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
+
return err;
}
@@ -1219,12 +1236,15 @@ static void l2cap_sock_cleanup_listen(struct sock *parent)
BT_DBG("child chan %p state %s", chan,
state_to_string(chan->state));
+ l2cap_chan_hold(chan);
l2cap_chan_lock(chan);
+
__clear_chan_timer(chan);
l2cap_chan_close(chan, ECONNRESET);
- l2cap_chan_unlock(chan);
-
l2cap_sock_kill(sk);
+
+ l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
}
}
@@ -1329,8 +1349,6 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
parent = bt_sk(sk)->parent;
- sock_set_flag(sk, SOCK_ZAPPED);
-
switch (chan->state) {
case BT_OPEN:
case BT_BOUND:
@@ -1357,8 +1375,11 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
break;
}
-
release_sock(sk);
+
+ /* Only zap after cleanup to avoid use after free race */
+ sock_set_flag(sk, SOCK_ZAPPED);
+
}
static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state,
@@ -1464,6 +1485,19 @@ static void l2cap_sock_suspend_cb(struct l2cap_chan *chan)
sk->sk_state_change(sk);
}
+static int l2cap_sock_filter(struct l2cap_chan *chan, struct sk_buff *skb)
+{
+ struct sock *sk = chan->data;
+
+ switch (chan->mode) {
+ case L2CAP_MODE_ERTM:
+ case L2CAP_MODE_STREAMING:
+ return sk_filter(sk, skb);
+ }
+
+ return 0;
+}
+
static const struct l2cap_ops l2cap_chan_ops = {
.name = "L2CAP Socket Interface",
.new_connection = l2cap_sock_new_connection_cb,
@@ -1478,6 +1512,7 @@ static const struct l2cap_ops l2cap_chan_ops = {
.set_shutdown = l2cap_sock_set_shutdown_cb,
.get_sndtimeo = l2cap_sock_get_sndtimeo_cb,
.alloc_skb = l2cap_sock_alloc_skb_cb,
+ .filter = l2cap_sock_filter,
};
static void l2cap_sock_destruct(struct sock *sk)
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index ccce954f8146..5340b1097afb 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -756,7 +756,8 @@ static u32 get_supported_settings(struct hci_dev *hdev)
if (lmp_ssp_capable(hdev)) {
settings |= MGMT_SETTING_SSP;
- settings |= MGMT_SETTING_HS;
+ if (IS_ENABLED(CONFIG_BT_HS))
+ settings |= MGMT_SETTING_HS;
}
if (lmp_sc_capable(hdev))
@@ -1771,6 +1772,10 @@ static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
BT_DBG("request for %s", hdev->name);
+ if (!IS_ENABLED(CONFIG_BT_HS))
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+ MGMT_STATUS_NOT_SUPPORTED);
+
status = mgmt_bredr_support(hdev);
if (status)
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index cc2f7ca91ccd..719ae1dff7b4 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -2703,6 +2703,15 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
if (skb->len < sizeof(*key))
return SMP_INVALID_PARAMS;
+ /* Check if remote and local public keys are the same and debug key is
+ * not in use.
+ */
+ if (!test_bit(SMP_FLAG_DEBUG_KEY, &smp->flags) &&
+ !crypto_memneq(key, smp->local_pk, 64)) {
+ bt_dev_err(hdev, "Remote and local public keys are identical");
+ return SMP_UNSPECIFIED;
+ }
+
memcpy(smp->remote_pk, key, 64);
if (test_bit(SMP_FLAG_REMOTE_OOB, &smp->flags)) {
diff --git a/net/bridge/br_arp_nd_proxy.c b/net/bridge/br_arp_nd_proxy.c
index d42e3904b498..b52e70362268 100644
--- a/net/bridge/br_arp_nd_proxy.c
+++ b/net/bridge/br_arp_nd_proxy.c
@@ -158,7 +158,9 @@ void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br,
if (br->neigh_suppress_enabled) {
if (p && (p->flags & BR_NEIGH_SUPPRESS))
return;
- if (ipv4_is_zeronet(sip) || sip == tip) {
+ if (parp->ar_op != htons(ARPOP_RREQUEST) &&
+ parp->ar_op != htons(ARPOP_RREPLY) &&
+ (ipv4_is_zeronet(sip) || sip == tip)) {
/* prevent flooding to neigh suppress ports */
BR_INPUT_SKB_CB(skb)->proxyarp_replied = true;
return;
@@ -277,6 +279,10 @@ static void br_nd_send(struct net_bridge *br, struct net_bridge_port *p,
ns_olen = request->len - (skb_network_offset(request) +
sizeof(struct ipv6hdr)) - sizeof(*ns);
for (i = 0; i < ns_olen - 1; i += (ns->opt[i + 1] << 3)) {
+ if (!ns->opt[i + 1]) {
+ kfree_skb(reply);
+ return;
+ }
if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
break;
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 9ce661e2590d..a350c05b7ff5 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -215,6 +215,7 @@ static void br_get_stats64(struct net_device *dev,
sum.rx_packets += tmp.rx_packets;
}
+ netdev_stats_to_stats64(stats, &dev->stats);
stats->tx_bytes = sum.tx_bytes;
stats->tx_packets = sum.tx_packets;
stats->rx_bytes = sum.rx_bytes;
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index ccab290c14d4..c5380c6baf2e 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -719,6 +719,11 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff
mtu_reserved = nf_bridge_mtu_reduction(skb);
mtu = skb->dev->mtu;
+ if (nf_bridge->pkt_otherhost) {
+ skb->pkt_type = PACKET_OTHERHOST;
+ nf_bridge->pkt_otherhost = false;
+ }
+
if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu)
mtu = nf_bridge->frag_max_size;
@@ -812,8 +817,6 @@ static unsigned int br_nf_post_routing(void *priv,
else
return NF_ACCEPT;
- /* We assume any code from br_dev_queue_push_xmit onwards doesn't care
- * about the value of skb->pkt_type. */
if (skb->pkt_type == PACKET_OTHERHOST) {
skb->pkt_type = PACKET_HOST;
nf_bridge->pkt_otherhost = true;
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 11ed2029985f..33b8222db75c 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -202,8 +202,8 @@ struct net_bridge_port_group {
struct rcu_head rcu;
struct timer_list timer;
struct br_ip addr;
+ unsigned char eth_addr[ETH_ALEN] __aligned(2);
unsigned char flags;
- unsigned char eth_addr[ETH_ALEN];
};
struct net_bridge_mdb_entry
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index 7c87a2fe5248..e9e2a3b1f477 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -59,9 +59,8 @@ static BRPORT_ATTR(_name, 0644, \
static int store_flag(struct net_bridge_port *p, unsigned long v,
unsigned long mask)
{
- unsigned long flags;
-
- flags = p->flags;
+ unsigned long flags = p->flags;
+ int err;
if (v)
flags |= mask;
@@ -69,6 +68,10 @@ static int store_flag(struct net_bridge_port *p, unsigned long v,
flags &= ~mask;
if (flags != p->flags) {
+ err = br_switchdev_set_port_flag(p, flags, mask);
+ if (err)
+ return err;
+
p->flags = flags;
br_port_flags_change(p, mask);
}
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 5f3950f00f73..a82d0021d461 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -242,8 +242,10 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
}
masterv = br_vlan_get_master(br, v->vid);
- if (!masterv)
+ if (!masterv) {
+ err = -ENOMEM;
goto out_filt;
+ }
v->brvlan = masterv;
v->stats = masterv->stats;
} else {
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index 419e8edf23ba..c9ec46f5313f 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -34,6 +34,12 @@ static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source);
eth->h_proto = eth_hdr(oldskb)->h_proto;
skb_pull(nskb, ETH_HLEN);
+
+ if (skb_vlan_tag_present(oldskb)) {
+ u16 vid = skb_vlan_tag_get(oldskb);
+
+ __vlan_hwaccel_put_tag(nskb, oldskb->vlan_proto, vid);
+ }
}
static int nft_bridge_iphdr_validate(struct sk_buff *skb)
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 04132b0b5d36..b3edb8092124 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -722,16 +722,25 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
{
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
- if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU ||
- cfd->len > CAN_MAX_DLEN)) {
- pr_warn_once("PF_CAN: dropped non conform CAN skbuf: dev type %d, len %d, datalen %d\n",
+ if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU)) {
+ pr_warn_once("PF_CAN: dropped non conform CAN skbuff: dev type %d, len %d\n",
+ dev->type, skb->len);
+ goto free_skb;
+ }
+
+ /* This check is made separately since cfd->len would be uninitialized if skb->len = 0. */
+ if (unlikely(cfd->len > CAN_MAX_DLEN)) {
+ pr_warn_once("PF_CAN: dropped non conform CAN skbuff: dev type %d, len %d, datalen %d\n",
dev->type, skb->len, cfd->len);
- kfree_skb(skb);
- return NET_RX_DROP;
+ goto free_skb;
}
can_receive(skb, dev);
return NET_RX_SUCCESS;
+
+free_skb:
+ kfree_skb(skb);
+ return NET_RX_DROP;
}
static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
@@ -739,16 +748,25 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
{
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
- if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU ||
- cfd->len > CANFD_MAX_DLEN)) {
- pr_warn_once("PF_CAN: dropped non conform CAN FD skbuf: dev type %d, len %d, datalen %d\n",
+ if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU)) {
+ pr_warn_once("PF_CAN: dropped non conform CAN FD skbuff: dev type %d, len %d\n",
+ dev->type, skb->len);
+ goto free_skb;
+ }
+
+ /* This check is made separately since cfd->len would be uninitialized if skb->len = 0. */
+ if (unlikely(cfd->len > CANFD_MAX_DLEN)) {
+ pr_warn_once("PF_CAN: dropped non conform CAN FD skbuff: dev type %d, len %d, datalen %d\n",
dev->type, skb->len, cfd->len);
- kfree_skb(skb);
- return NET_RX_DROP;
+ goto free_skb;
}
can_receive(skb, dev);
return NET_RX_SUCCESS;
+
+free_skb:
+ kfree_skb(skb);
+ return NET_RX_DROP;
}
/*
diff --git a/net/can/proc.c b/net/can/proc.c
index 70fea17bb04c..a3071f43acd7 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -467,6 +467,9 @@ void can_init_proc(struct net *net)
*/
void can_remove_proc(struct net *net)
{
+ if (!net->can.proc_dir)
+ return;
+
if (net->can.pde_version)
remove_proc_entry(CAN_PROC_VERSION, net->can.proc_dir);
@@ -494,6 +497,5 @@ void can_remove_proc(struct net *net)
if (net->can.pde_rcvlist_sff)
remove_proc_entry(CAN_PROC_RCVLIST_SFF, net->can.proc_dir);
- if (net->can.proc_dir)
- remove_proc_entry("can", net->proc_net);
+ remove_proc_entry("can", net->proc_net);
}
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index f7d7f32ac673..21bd37ec5511 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -3037,6 +3037,11 @@ static void con_fault(struct ceph_connection *con)
ceph_msg_put(con->in_msg);
con->in_msg = NULL;
}
+ if (con->out_msg) {
+ BUG_ON(con->out_msg->con != con);
+ ceph_msg_put(con->out_msg);
+ con->out_msg = NULL;
+ }
/* Requeue anything that hasn't been acked */
list_splice_init(&con->out_sent, &con->out_queue);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 76c41a84550e..713fe1fbcb18 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -442,6 +442,7 @@ static void target_copy(struct ceph_osd_request_target *dest,
dest->size = src->size;
dest->min_size = src->min_size;
dest->sort_bitwise = src->sort_bitwise;
+ dest->recovery_deletes = src->recovery_deletes;
dest->flags = src->flags;
dest->paused = src->paused;
@@ -3540,7 +3541,9 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
* supported.
*/
req->r_t.target_oloc.pool = m.redirect.oloc.pool;
- req->r_flags |= CEPH_OSD_FLAG_REDIRECTED;
+ req->r_flags |= CEPH_OSD_FLAG_REDIRECTED |
+ CEPH_OSD_FLAG_IGNORE_OVERLAY |
+ CEPH_OSD_FLAG_IGNORE_CACHE;
req->r_tid = 0;
__submit_request(req, false);
goto out_unlock_osdc;
diff --git a/net/compat.c b/net/compat.c
index 3c4b0283b29a..2a8c7cb5f06a 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -289,6 +289,7 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
break;
}
/* Bump the usage count and install the file. */
+ __receive_sock(fp[i]);
fd_install(new_fd, get_file(fp[i]));
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 2f4d35101f4d..7803bd9628dc 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -83,6 +83,7 @@
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/mutex.h>
+#include <linux/rwsem.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/socket.h>
@@ -195,7 +196,7 @@ static DEFINE_SPINLOCK(napi_hash_lock);
static unsigned int napi_gen_id = NR_CPUS;
static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
-static seqcount_t devnet_rename_seq;
+static DECLARE_RWSEM(devnet_rename_sem);
static inline void dev_base_seq_inc(struct net *net)
{
@@ -899,33 +900,28 @@ EXPORT_SYMBOL(dev_get_by_napi_id);
* @net: network namespace
* @name: a pointer to the buffer where the name will be stored.
* @ifindex: the ifindex of the interface to get the name from.
- *
- * The use of raw_seqcount_begin() and cond_resched() before
- * retrying is required as we want to give the writers a chance
- * to complete when CONFIG_PREEMPT is not set.
*/
int netdev_get_name(struct net *net, char *name, int ifindex)
{
struct net_device *dev;
- unsigned int seq;
+ int ret;
-retry:
- seq = raw_seqcount_begin(&devnet_rename_seq);
+ down_read(&devnet_rename_sem);
rcu_read_lock();
+
dev = dev_get_by_index_rcu(net, ifindex);
if (!dev) {
- rcu_read_unlock();
- return -ENODEV;
+ ret = -ENODEV;
+ goto out;
}
strcpy(name, dev->name);
- rcu_read_unlock();
- if (read_seqcount_retry(&devnet_rename_seq, seq)) {
- cond_resched();
- goto retry;
- }
- return 0;
+ ret = 0;
+out:
+ rcu_read_unlock();
+ up_read(&devnet_rename_sem);
+ return ret;
}
/**
@@ -1198,10 +1194,10 @@ int dev_change_name(struct net_device *dev, const char *newname)
likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
return -EBUSY;
- write_seqcount_begin(&devnet_rename_seq);
+ down_write(&devnet_rename_sem);
if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
- write_seqcount_end(&devnet_rename_seq);
+ up_write(&devnet_rename_sem);
return 0;
}
@@ -1209,7 +1205,7 @@ int dev_change_name(struct net_device *dev, const char *newname)
err = dev_get_valid_name(net, dev, newname);
if (err < 0) {
- write_seqcount_end(&devnet_rename_seq);
+ up_write(&devnet_rename_sem);
return err;
}
@@ -1224,11 +1220,11 @@ rollback:
if (ret) {
memcpy(dev->name, oldname, IFNAMSIZ);
dev->name_assign_type = old_assign_type;
- write_seqcount_end(&devnet_rename_seq);
+ up_write(&devnet_rename_sem);
return ret;
}
- write_seqcount_end(&devnet_rename_seq);
+ up_write(&devnet_rename_sem);
netdev_adjacent_rename_links(dev, oldname);
@@ -1249,7 +1245,7 @@ rollback:
/* err >= 0 after dev_alloc_name() or stores the first errno */
if (err >= 0) {
err = ret;
- write_seqcount_begin(&devnet_rename_seq);
+ down_write(&devnet_rename_sem);
memcpy(dev->name, oldname, IFNAMSIZ);
memcpy(oldname, newname, IFNAMSIZ);
dev->name_assign_type = old_assign_type;
@@ -3538,9 +3534,6 @@ static void skb_update_prio(struct sk_buff *skb)
#define skb_update_prio(skb)
#endif
-DEFINE_PER_CPU(int, xmit_recursion);
-EXPORT_SYMBOL(xmit_recursion);
-
/**
* dev_loopback_xmit - loop back @skb
* @net: network namespace this loopback is happening in
@@ -3831,8 +3824,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
int cpu = smp_processor_id(); /* ok because BHs are off */
if (txq->xmit_lock_owner != cpu) {
- if (unlikely(__this_cpu_read(xmit_recursion) >
- XMIT_RECURSION_LIMIT))
+ if (dev_xmit_recursion())
goto recursion_alert;
skb = validate_xmit_skb(skb, dev, &again);
@@ -3842,9 +3834,9 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
HARD_TX_LOCK(dev, txq, cpu);
if (!netif_xmit_stopped(txq)) {
- __this_cpu_inc(xmit_recursion);
+ dev_xmit_recursion_inc();
skb = dev_hard_start_xmit(skb, dev, txq, &rc);
- __this_cpu_dec(xmit_recursion);
+ dev_xmit_recursion_dec();
if (dev_xmit_complete(rc)) {
HARD_TX_UNLOCK(dev, txq);
goto out;
@@ -3907,10 +3899,12 @@ int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
local_bh_disable();
+ dev_xmit_recursion_inc();
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_xmit_frozen_or_drv_stopped(txq))
ret = netdev_start_xmit(skb, dev, txq, false);
HARD_TX_UNLOCK(dev, txq);
+ dev_xmit_recursion_dec();
local_bh_enable();
@@ -3934,7 +3928,8 @@ EXPORT_SYMBOL(netdev_max_backlog);
int netdev_tstamp_prequeue __read_mostly = 1;
int netdev_budget __read_mostly = 300;
-unsigned int __read_mostly netdev_budget_usecs = 2000;
+/* Must be at least 2 jiffes to guarantee 1 jiffy timeout */
+unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ;
int weight_p __read_mostly = 64; /* old backlog weight */
int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
@@ -4777,11 +4772,12 @@ static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
return 0;
}
-static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc,
+static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
struct packet_type **ppt_prev)
{
struct packet_type *ptype, *pt_prev;
rx_handler_func_t *rx_handler;
+ struct sk_buff *skb = *pskb;
struct net_device *orig_dev;
bool deliver_exact = false;
int ret = NET_RX_DROP;
@@ -4812,8 +4808,10 @@ another_round:
ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
preempt_enable();
- if (ret2 != XDP_PASS)
- return NET_RX_DROP;
+ if (ret2 != XDP_PASS) {
+ ret = NET_RX_DROP;
+ goto out;
+ }
skb_reset_mac_len(skb);
}
@@ -4935,6 +4933,13 @@ drop:
}
out:
+ /* The invariant here is that if *ppt_prev is not NULL
+ * then skb should also be non-NULL.
+ *
+ * Apparently *ppt_prev assignment above holds this invariant due to
+ * skb dereferencing near it.
+ */
+ *pskb = skb;
return ret;
}
@@ -4944,7 +4949,7 @@ static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
struct packet_type *pt_prev = NULL;
int ret;
- ret = __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
+ ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
if (pt_prev)
ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
return ret;
@@ -5020,7 +5025,7 @@ static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemallo
struct packet_type *pt_prev = NULL;
skb_list_del_init(skb);
- __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
+ __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
if (!pt_prev)
continue;
if (pt_curr != pt_prev || od_curr != orig_dev) {
@@ -5247,7 +5252,7 @@ static void flush_backlog(struct work_struct *work)
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->input_pkt_queue);
- kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
input_queue_head_incr(sd);
}
}
@@ -6191,12 +6196,13 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
pr_err_once("netif_napi_add() called with weight %d on device %s\n",
weight, dev->name);
napi->weight = weight;
- list_add(&napi->dev_list, &dev->napi_list);
napi->dev = dev;
#ifdef CONFIG_NETPOLL
napi->poll_owner = -1;
#endif
set_bit(NAPI_STATE_SCHED, &napi->state);
+ set_bit(NAPI_STATE_NPSVC, &napi->state);
+ list_add_rcu(&napi->dev_list, &dev->napi_list);
napi_hash_add(napi);
}
EXPORT_SYMBOL(netif_napi_add);
@@ -8258,11 +8264,13 @@ static void netdev_sync_lower_features(struct net_device *upper,
netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
&feature, lower->name);
lower->wanted_features &= ~feature;
- netdev_update_features(lower);
+ __netdev_update_features(lower);
if (unlikely(lower->features & feature))
netdev_WARN(upper, "failed to disable %pNF on %s!\n",
&feature, lower->name);
+ else
+ netdev_features_change(lower);
}
}
}
@@ -8343,6 +8351,11 @@ static netdev_features_t netdev_fix_features(struct net_device *dev,
}
}
+ if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) {
+ netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n");
+ features &= ~NETIF_F_HW_TLS_RX;
+ }
+
return features;
}
@@ -8744,6 +8757,13 @@ int register_netdevice(struct net_device *dev)
rcu_barrier();
dev->reg_state = NETREG_UNREGISTERED;
+ /* We should put the kobject that hold in
+ * netdev_unregister_kobject(), otherwise
+ * the net device cannot be freed when
+ * driver calls free_netdev(), because the
+ * kobject is being hold.
+ */
+ kobject_put(&dev->dev.kobj);
}
/*
* Prevent userspace races by waiting until the network
@@ -9688,7 +9708,7 @@ static void __net_exit default_device_exit(struct net *net)
continue;
/* Leave virtual devices for the generic cleanup */
- if (dev->rtnl_link_ops)
+ if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund)
continue;
/* Push remaining network devices to init_net */
diff --git a/net/core/devlink.c b/net/core/devlink.c
index a77e3777c8dd..6ad095264896 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -1113,7 +1113,7 @@ static int devlink_nl_sb_port_pool_fill(struct sk_buff *msg,
err = ops->sb_occ_port_pool_get(devlink_port, devlink_sb->index,
pool_index, &cur, &max);
if (err && err != -EOPNOTSUPP)
- return err;
+ goto sb_occ_get_failure;
if (!err) {
if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_CUR, cur))
goto nla_put_failure;
@@ -1126,8 +1126,10 @@ static int devlink_nl_sb_port_pool_fill(struct sk_buff *msg,
return 0;
nla_put_failure:
+ err = -EMSGSIZE;
+sb_occ_get_failure:
genlmsg_cancel(msg, hdr);
- return -EMSGSIZE;
+ return err;
}
static int devlink_nl_cmd_sb_port_pool_get_doit(struct sk_buff *skb,
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index c7785efeea57..3978a5e8d261 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -154,6 +154,7 @@ static void sched_send_work(struct timer_list *t)
static void trace_drop_common(struct sk_buff *skb, void *location)
{
struct net_dm_alert_msg *msg;
+ struct net_dm_drop_point *point;
struct nlmsghdr *nlh;
struct nlattr *nla;
int i;
@@ -172,11 +173,13 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
nlh = (struct nlmsghdr *)dskb->data;
nla = genlmsg_data(nlmsg_data(nlh));
msg = nla_data(nla);
+ point = msg->points;
for (i = 0; i < msg->entries; i++) {
- if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) {
- msg->points[i].count++;
+ if (!memcmp(&location, &point->pc, sizeof(void *))) {
+ point->count++;
goto out;
}
+ point++;
}
if (msg->entries == dm_hit_limit)
goto out;
@@ -185,8 +188,8 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
*/
__nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
- memcpy(msg->points[msg->entries].pc, &location, sizeof(void *));
- msg->points[msg->entries].count = 1;
+ memcpy(point->pc, &location, sizeof(void *));
+ point->count = 1;
msg->entries++;
if (!timer_pending(&data->send_timer)) {
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 1011625a0ca4..83028017c26d 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -618,7 +618,7 @@ store_link_ksettings_for_user(void __user *to,
{
struct ethtool_link_usettings link_usettings;
- memcpy(&link_usettings.base, &from->base, sizeof(link_usettings));
+ memcpy(&link_usettings, from, sizeof(link_usettings));
bitmap_to_arr32(link_usettings.link_modes.supported,
from->link_modes.supported,
__ETHTOOL_LINK_MODE_MASK_NBITS);
diff --git a/net/core/filter.c b/net/core/filter.c
index 40b3af05c883..01561268d216 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1730,25 +1730,27 @@ BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb,
u32, offset, void *, to, u32, len, u32, start_header)
{
u8 *end = skb_tail_pointer(skb);
- u8 *net = skb_network_header(skb);
- u8 *mac = skb_mac_header(skb);
- u8 *ptr;
+ u8 *start, *ptr;
- if (unlikely(offset > 0xffff || len > (end - mac)))
+ if (unlikely(offset > 0xffff))
goto err_clear;
switch (start_header) {
case BPF_HDR_START_MAC:
- ptr = mac + offset;
+ if (unlikely(!skb_mac_header_was_set(skb)))
+ goto err_clear;
+ start = skb_mac_header(skb);
break;
case BPF_HDR_START_NET:
- ptr = net + offset;
+ start = skb_network_header(skb);
break;
default:
goto err_clear;
}
- if (likely(ptr >= mac && ptr + len <= end)) {
+ ptr = start + offset;
+
+ if (likely(ptr + len <= end)) {
memcpy(to, ptr, len);
return 0;
}
@@ -2000,7 +2002,7 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
{
int ret;
- if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
+ if (dev_xmit_recursion()) {
net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
kfree_skb(skb);
return -ENETDOWN;
@@ -2009,9 +2011,9 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
skb->dev = dev;
skb->tstamp = 0;
- __this_cpu_inc(xmit_recursion);
+ dev_xmit_recursion_inc();
ret = dev_queue_xmit(skb);
- __this_cpu_dec(xmit_recursion);
+ dev_xmit_recursion_dec();
return ret;
}
@@ -2693,7 +2695,7 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto)
{
- __be16 from_proto = skb->protocol;
+ __be16 from_proto = skb_protocol(skb, true);
if (from_proto == htons(ETH_P_IP) &&
to_proto == htons(ETH_P_IPV6))
@@ -2766,7 +2768,7 @@ static const struct bpf_func_proto bpf_skb_change_type_proto = {
static u32 bpf_skb_net_base_len(const struct sk_buff *skb)
{
- switch (skb->protocol) {
+ switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
return sizeof(struct iphdr);
case htons(ETH_P_IPV6):
@@ -2834,19 +2836,15 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
return 0;
}
-static u32 __bpf_skb_max_len(const struct sk_buff *skb)
-{
- return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len :
- SKB_MAX_ALLOC;
-}
+#define BPF_SKB_MAX_LEN SKB_MAX_ALLOC
static int bpf_skb_adjust_net(struct sk_buff *skb, s32 len_diff)
{
bool trans_same = skb->transport_header == skb->network_header;
u32 len_cur, len_diff_abs = abs(len_diff);
u32 len_min = bpf_skb_net_base_len(skb);
- u32 len_max = __bpf_skb_max_len(skb);
- __be16 proto = skb->protocol;
+ u32 len_max = BPF_SKB_MAX_LEN;
+ __be16 proto = skb_protocol(skb, true);
bool shrink = len_diff < 0;
int ret;
@@ -2924,7 +2922,7 @@ static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len,
u64 flags)
{
- u32 max_len = __bpf_skb_max_len(skb);
+ u32 max_len = BPF_SKB_MAX_LEN;
u32 min_len = __bpf_skb_min_len(skb);
int ret;
@@ -3000,7 +2998,7 @@ static const struct bpf_func_proto sk_skb_change_tail_proto = {
static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,
u64 flags)
{
- u32 max_len = __bpf_skb_max_len(skb);
+ u32 max_len = BPF_SKB_MAX_LEN;
u32 new_len = skb->len + head_room;
int ret;
@@ -3022,6 +3020,7 @@ static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,
__skb_push(skb, head_room);
memset(skb->data, 0, head_room);
skb_reset_mac_header(skb);
+ skb_reset_mac_len(skb);
}
return ret;
@@ -4498,6 +4497,7 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
{
struct net *net = dev_net(skb->dev);
int rc = -EAFNOSUPPORT;
+ bool check_mtu = false;
if (plen < sizeof(*params))
return -EINVAL;
@@ -4505,22 +4505,28 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
return -EINVAL;
+ if (params->tot_len)
+ check_mtu = true;
+
switch (params->family) {
#if IS_ENABLED(CONFIG_INET)
case AF_INET:
- rc = bpf_ipv4_fib_lookup(net, params, flags, false);
+ rc = bpf_ipv4_fib_lookup(net, params, flags, check_mtu);
break;
#endif
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
- rc = bpf_ipv6_fib_lookup(net, params, flags, false);
+ rc = bpf_ipv6_fib_lookup(net, params, flags, check_mtu);
break;
#endif
}
- if (!rc) {
+ if (rc == BPF_FIB_LKUP_RET_SUCCESS && !check_mtu) {
struct net_device *dev;
+ /* When tot_len isn't provided by user, check skb
+ * against MTU of FIB lookup resulting net_device
+ */
dev = dev_get_by_index_rcu(net, params->ifindex);
if (!is_skb_forwardable(dev, skb))
rc = BPF_FIB_LKUP_RET_FRAG_NEEDED;
@@ -4550,7 +4556,7 @@ static int bpf_push_seg6_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len
switch (type) {
case BPF_LWT_ENCAP_SEG6_INLINE:
- if (skb->protocol != htons(ETH_P_IPV6))
+ if (skb_protocol(skb, true) != htons(ETH_P_IPV6))
return -EBADMSG;
err = seg6_do_srh_inline(skb, srh);
@@ -5416,8 +5422,6 @@ static int bpf_gen_ld_abs(const struct bpf_insn *orig,
bool indirect = BPF_MODE(orig->code) == BPF_IND;
struct bpf_insn *insn = insn_buf;
- /* We're guaranteed here that CTX is in R6. */
- *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX);
if (!indirect) {
*insn++ = BPF_MOV64_IMM(BPF_REG_2, orig->imm);
} else {
@@ -5425,6 +5429,8 @@ static int bpf_gen_ld_abs(const struct bpf_insn *orig,
if (orig->imm)
*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, orig->imm);
}
+ /* We're guaranteed here that CTX is in R6. */
+ *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX);
switch (BPF_SIZE(orig->code)) {
case BPF_B:
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index e4e442d70c2d..752744db11ff 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -84,11 +84,11 @@ static void est_timer(struct timer_list *t)
u64 rate, brate;
est_fetch_counters(est, &b);
- brate = (b.bytes - est->last_bytes) << (10 - est->ewma_log - est->intvl_log);
- brate -= (est->avbps >> est->ewma_log);
+ brate = (b.bytes - est->last_bytes) << (10 - est->intvl_log);
+ brate = (brate >> est->ewma_log) - (est->avbps >> est->ewma_log);
- rate = (u64)(b.packets - est->last_packets) << (10 - est->ewma_log - est->intvl_log);
- rate -= (est->avpps >> est->ewma_log);
+ rate = (u64)(b.packets - est->last_packets) << (10 - est->intvl_log);
+ rate = (rate >> est->ewma_log) - (est->avpps >> est->ewma_log);
write_seqcount_begin(&est->seq);
est->avbps += brate;
@@ -147,6 +147,9 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
if (parm->interval < -2 || parm->interval > 3)
return -EINVAL;
+ if (parm->ewma_log == 0 || parm->ewma_log >= 31)
+ return -EINVAL;
+
est = kzalloc(sizeof(*est), GFP_KERNEL);
if (!est)
return -ENOBUFS;
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index a648568c5e8f..4a5f4fbffd83 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -44,12 +44,11 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
{
int ret;
- /* Preempt disable is needed to protect per-cpu redirect_info between
- * BPF prog and skb_do_redirect(). The call_rcu in bpf_prog_put() and
- * access to maps strictly require a rcu_read_lock() for protection,
- * mixing with BH RCU lock doesn't work.
+ /* Preempt disable and BH disable are needed to protect per-cpu
+ * redirect_info between BPF prog and skb_do_redirect().
*/
preempt_disable();
+ local_bh_disable();
bpf_compute_data_pointers(skb);
ret = bpf_prog_run_save_cb(lwt->prog, skb);
@@ -82,6 +81,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
break;
}
+ local_bh_enable();
preempt_enable();
return ret;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index bf738ec68cb5..e471c32e448f 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1271,7 +1271,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
* we can reinject the packet there.
*/
n2 = NULL;
- if (dst) {
+ if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
n2 = dst_neigh_lookup_skb(dst, skb);
if (n2)
n1 = n2;
@@ -2844,6 +2844,7 @@ static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
*pos = cpu+1;
return per_cpu_ptr(tbl->stats, cpu);
}
+ (*pos)++;
return NULL;
}
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 7614a4f42bfc..fe0d255d66c8 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -1045,7 +1045,7 @@ static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf)
trans_timeout = queue->trans_timeout;
spin_unlock_irq(&queue->_xmit_lock);
- return sprintf(buf, "%lu", trans_timeout);
+ return sprintf(buf, fmt_ulong, trans_timeout);
}
static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
@@ -1244,8 +1244,8 @@ static const struct attribute_group dql_group = {
static ssize_t xps_cpus_show(struct netdev_queue *queue,
char *buf)
{
+ int cpu, len, ret, num_tc = 1, tc = 0;
struct net_device *dev = queue->dev;
- int cpu, len, num_tc = 1, tc = 0;
struct xps_dev_maps *dev_maps;
cpumask_var_t mask;
unsigned long index;
@@ -1255,22 +1255,31 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
index = get_netdev_queue_index(queue);
+ if (!rtnl_trylock())
+ return restart_syscall();
+
if (dev->num_tc) {
/* Do not allow XPS on subordinate device directly */
num_tc = dev->num_tc;
- if (num_tc < 0)
- return -EINVAL;
+ if (num_tc < 0) {
+ ret = -EINVAL;
+ goto err_rtnl_unlock;
+ }
/* If queue belongs to subordinate dev use its map */
dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
tc = netdev_txq_to_tc(dev, index);
- if (tc < 0)
- return -EINVAL;
+ if (tc < 0) {
+ ret = -EINVAL;
+ goto err_rtnl_unlock;
+ }
}
- if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
- return -ENOMEM;
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto err_rtnl_unlock;
+ }
rcu_read_lock();
dev_maps = rcu_dereference(dev->xps_cpus_map);
@@ -1293,9 +1302,15 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
}
rcu_read_unlock();
+ rtnl_unlock();
+
len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
free_cpumask_var(mask);
return len < PAGE_SIZE ? len : -EINVAL;
+
+err_rtnl_unlock:
+ rtnl_unlock();
+ return ret;
}
static ssize_t xps_cpus_store(struct netdev_queue *queue,
@@ -1323,7 +1338,13 @@ static ssize_t xps_cpus_store(struct netdev_queue *queue,
return err;
}
+ if (!rtnl_trylock()) {
+ free_cpumask_var(mask);
+ return restart_syscall();
+ }
+
err = netif_set_xps_queue(dev, mask, index);
+ rtnl_unlock();
free_cpumask_var(mask);
@@ -1335,23 +1356,30 @@ static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init
static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf)
{
+ int j, len, ret, num_tc = 1, tc = 0;
struct net_device *dev = queue->dev;
struct xps_dev_maps *dev_maps;
unsigned long *mask, index;
- int j, len, num_tc = 1, tc = 0;
index = get_netdev_queue_index(queue);
+ if (!rtnl_trylock())
+ return restart_syscall();
+
if (dev->num_tc) {
num_tc = dev->num_tc;
tc = netdev_txq_to_tc(dev, index);
- if (tc < 0)
- return -EINVAL;
+ if (tc < 0) {
+ ret = -EINVAL;
+ goto err_rtnl_unlock;
+ }
}
mask = kcalloc(BITS_TO_LONGS(dev->num_rx_queues), sizeof(long),
GFP_KERNEL);
- if (!mask)
- return -ENOMEM;
+ if (!mask) {
+ ret = -ENOMEM;
+ goto err_rtnl_unlock;
+ }
rcu_read_lock();
dev_maps = rcu_dereference(dev->xps_rxqs_map);
@@ -1377,10 +1405,16 @@ static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf)
out_no_maps:
rcu_read_unlock();
+ rtnl_unlock();
+
len = bitmap_print_to_pagebuf(false, buf, mask, dev->num_rx_queues);
kfree(mask);
return len < PAGE_SIZE ? len : -EINVAL;
+
+err_rtnl_unlock:
+ rtnl_unlock();
+ return ret;
}
static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf,
@@ -1407,10 +1441,17 @@ static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf,
return err;
}
+ if (!rtnl_trylock()) {
+ bitmap_free(mask);
+ return restart_syscall();
+ }
+
cpus_read_lock();
err = __netif_set_xps_queue(dev, mask, index, true);
cpus_read_unlock();
+ rtnl_unlock();
+
kfree(mask);
return err ? : len;
}
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index 67feeb207dad..668330ace961 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -131,10 +131,8 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
cs->classid = (u32)value;
css_task_iter_start(css, 0, &it);
- while ((p = css_task_iter_next(&it))) {
+ while ((p = css_task_iter_next(&it)))
update_classid_task(p, cs->classid);
- cond_resched();
- }
css_task_iter_end(&it);
return 0;
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index a581cf101cd9..41e32a958d08 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -28,6 +28,7 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/if_vlan.h>
+#include <net/dsa.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/addrconf.h>
@@ -161,7 +162,7 @@ static void poll_napi(struct net_device *dev)
struct napi_struct *napi;
int cpu = smp_processor_id();
- list_for_each_entry(napi, &dev->napi_list, dev_list) {
+ list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
poll_one_napi(napi);
smp_store_release(&napi->poll_owner, -1);
@@ -638,15 +639,15 @@ EXPORT_SYMBOL_GPL(__netpoll_setup);
int netpoll_setup(struct netpoll *np)
{
- struct net_device *ndev = NULL;
+ struct net_device *ndev = NULL, *dev = NULL;
+ struct net *net = current->nsproxy->net_ns;
struct in_device *in_dev;
int err;
rtnl_lock();
- if (np->dev_name[0]) {
- struct net *net = current->nsproxy->net_ns;
+ if (np->dev_name[0])
ndev = __dev_get_by_name(net, np->dev_name);
- }
+
if (!ndev) {
np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
err = -ENODEV;
@@ -654,6 +655,19 @@ int netpoll_setup(struct netpoll *np)
}
dev_hold(ndev);
+ /* bring up DSA management network devices up first */
+ for_each_netdev(net, dev) {
+ if (!netdev_uses_dsa(dev))
+ continue;
+
+ err = dev_change_flags(dev, dev->flags | IFF_UP);
+ if (err < 0) {
+ np_err(np, "%s failed to open %s\n",
+ np->dev_name, dev->name);
+ goto put;
+ }
+ }
+
if (netdev_master_upper_dev_get(ndev)) {
np_err(np, "%s is a slave device, aborting\n", np->dev_name);
err = -EBUSY;
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index b9057478d69c..239786608ee4 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -240,6 +240,8 @@ static void net_prio_attach(struct cgroup_taskset *tset)
struct task_struct *p;
struct cgroup_subsys_state *css;
+ cgroup_sk_alloc_disable();
+
cgroup_taskset_for_each(p, css, tset) {
void *v = (void *)(unsigned long)css->cgroup->id;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 092fa3d75b32..3714cd9e3111 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3471,7 +3471,7 @@ static int pktgen_thread_worker(void *arg)
struct pktgen_dev *pkt_dev = NULL;
int cpu = t->cpu;
- BUG_ON(smp_processor_id() != cpu);
+ WARN_ON(smp_processor_id() != cpu);
init_waitqueue_head(&t->queue);
complete(&t->start_done);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index f51973f458e4..935053ee7765 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -3146,7 +3146,8 @@ replay:
*/
if (err < 0) {
/* If device is not registered at all, free it now */
- if (dev->reg_state == NETREG_UNINITIALIZED)
+ if (dev->reg_state == NETREG_UNINITIALIZED ||
+ dev->reg_state == NETREG_UNREGISTERED)
free_netdev(dev);
goto out;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 0629ca89ab74..ea9684bcc2e8 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -398,7 +398,11 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
len += NET_SKB_PAD;
- if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
+ /* If requested length is either too small or too big,
+ * we use kmalloc() for skb->head allocation.
+ */
+ if (len <= SKB_WITH_OVERHEAD(1024) ||
+ len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
(gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
if (!skb)
@@ -459,13 +463,17 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
gfp_t gfp_mask)
{
- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
+ struct napi_alloc_cache *nc;
struct sk_buff *skb;
void *data;
len += NET_SKB_PAD + NET_IP_ALIGN;
- if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
+ /* If requested length is either too small or too big,
+ * we use kmalloc() for skb->head allocation.
+ */
+ if (len <= SKB_WITH_OVERHEAD(1024) ||
+ len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
(gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
if (!skb)
@@ -473,6 +481,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
goto skb_success;
}
+ nc = this_cpu_ptr(&napi_alloc_cache);
len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
len = SKB_DATA_ALIGN(len);
@@ -1853,6 +1862,12 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
skb->csum = csum_block_sub(skb->csum,
skb_checksum(skb, len, delta, 0),
len);
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len;
+ int offset = skb_checksum_start_offset(skb) + skb->csum_offset;
+
+ if (offset + sizeof(__sum16) > hdlen)
+ return -EINVAL;
}
return __pskb_trim(skb, len);
}
@@ -3077,7 +3092,19 @@ EXPORT_SYMBOL(skb_split);
*/
static int skb_prepare_for_shift(struct sk_buff *skb)
{
- return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ int ret = 0;
+
+ if (skb_cloned(skb)) {
+ /* Save and restore truesize: pskb_expand_head() may reallocate
+ * memory where ksize(kmalloc(S)) != ksize(kmalloc(S)), but we
+ * cannot change truesize at this point.
+ */
+ unsigned int save_truesize = skb->truesize;
+
+ ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ skb->truesize = save_truesize;
+ }
+ return ret;
}
/**
@@ -4265,7 +4292,7 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
if (skb && (skb_next = skb_peek(q))) {
icmp_next = is_icmp_err_skb(skb_next);
if (icmp_next)
- sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin;
+ sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
}
spin_unlock_irqrestore(&q->lock, flags);
@@ -5128,8 +5155,8 @@ struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
goto err_free;
-
- if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
+ /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */
+ if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short))))
goto err_free;
vhdr = (struct vlan_hdr *)skb->data;
@@ -5521,9 +5548,13 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
if (skb_has_frag_list(skb))
skb_clone_fraglist(skb);
- if (k == 0) {
- /* split line is in frag list */
- pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask);
+ /* split line is in frag list */
+ if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) {
+ /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */
+ if (skb_has_frag_list(skb))
+ kfree_skb_list(skb_shinfo(skb)->frag_list);
+ kfree(data);
+ return -ENOMEM;
}
skb_release_data(skb);
diff --git a/net/core/sock.c b/net/core/sock.c
index 8abfde0d28ee..e6cbe137cb6f 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -640,7 +640,7 @@ bool sk_mc_loop(struct sock *sk)
return inet6_sk(sk)->mc_loop;
#endif
}
- WARN_ON(1);
+ WARN_ON_ONCE(1);
return true;
}
EXPORT_SYMBOL(sk_mc_loop);
@@ -1540,6 +1540,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
cgroup_sk_alloc(&sk->sk_cgrp_data);
sock_update_classid(&sk->sk_cgrp_data);
sock_update_netprioidx(&sk->sk_cgrp_data);
+ sk_tx_queue_clear(sk);
}
return sk;
@@ -1693,7 +1694,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
/* sk->sk_memcg will be populated at accept() time */
newsk->sk_memcg = NULL;
- cgroup_sk_alloc(&newsk->sk_cgrp_data);
+ cgroup_sk_clone(&newsk->sk_cgrp_data);
rcu_read_lock();
filter = rcu_dereference(sk->sk_filter);
@@ -1747,6 +1748,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
*/
sk_refcnt_debug_inc(newsk);
sk_set_socket(newsk, NULL);
+ sk_tx_queue_clear(newsk);
newsk->sk_wq = NULL;
if (newsk->sk_prot->sockets_allocated)
@@ -2634,6 +2636,27 @@ int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *
}
EXPORT_SYMBOL(sock_no_mmap);
+/*
+ * When a file is received (via SCM_RIGHTS, etc), we must bump the
+ * various sock-based usage counts.
+ */
+void __receive_sock(struct file *file)
+{
+ struct socket *sock;
+ int error;
+
+ /*
+ * The resulting value of "error" is ignored here since we only
+ * need to take action when the file is a socket and testing
+ * "sock" for NULL is sufficient.
+ */
+ sock = sock_from_file(file, &error);
+ if (sock) {
+ sock_update_netprioidx(&sock->sk->sk_cgrp_data);
+ sock_update_classid(&sock->sk->sk_cgrp_data);
+ }
+}
+
ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
{
ssize_t res;
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index fd38cf1d2b02..375a3bbe6485 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -112,6 +112,7 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
more_reuse->prog = reuse->prog;
more_reuse->reuseport_id = reuse->reuseport_id;
more_reuse->bind_inany = reuse->bind_inany;
+ more_reuse->has_conns = reuse->has_conns;
memcpy(more_reuse->socks, reuse->socks,
reuse->num_socks * sizeof(struct sock *));
@@ -298,7 +299,7 @@ select_by_hash:
i = j = reciprocal_scale(hash, socks);
while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
i++;
- if (i >= reuse->num_socks)
+ if (i >= socks)
i = 0;
if (i == j)
goto out;
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 6cec08cd0bb9..2597449ae9b4 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -270,7 +270,7 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
if (write && !ret) {
if (jit_enable < 2 ||
- (jit_enable == 2 && bpf_dump_raw_ok())) {
+ (jit_enable == 2 && bpf_dump_raw_ok(current_cred()))) {
*(int *)table->data = jit_enable;
if (jit_enable == 2)
pr_warn("bpf_jit_enable = 2 was set! NEVER use this in production, only for JIT debugging!\n");
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index a556cd708885..33684f1818a8 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -1421,6 +1421,7 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
{
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
+ int prio;
int err;
if (!ops)
@@ -1469,6 +1470,13 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
struct dcbnl_buffer *buffer =
nla_data(ieee[DCB_ATTR_DCB_BUFFER]);
+ for (prio = 0; prio < ARRAY_SIZE(buffer->prio2buffer); prio++) {
+ if (buffer->prio2buffer[prio] >= DCBX_MAX_BUFFERS) {
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
err = ops->dcbnl_setbuffer(netdev, buffer);
if (err)
goto err;
@@ -1748,6 +1756,8 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
fn = &reply_funcs[dcb->cmd];
if (!fn->cb)
return -EOPNOTSUPP;
+ if (fn->type == RTM_SETDCB && !netlink_capable(skb, CAP_NET_ADMIN))
+ return -EPERM;
if (!tb[DCB_ATTR_IFNAME])
return -EINVAL;
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 58a401e9cf09..2cd3508a3786 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -211,7 +211,7 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
rcu_read_unlock();
- dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
+ dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
dst = NULL;
@@ -282,7 +282,7 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6));
/* sk = NULL, but it is safe for now. RST socket required. */
- dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
+ dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
if (!IS_ERR(dst)) {
skb_dst_set(skb, dst);
ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0);
@@ -319,6 +319,11 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
if (!ipv6_unicast_destination(skb))
return 0; /* discard, don't send a reset here */
+ if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
+ __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
+ return 0;
+ }
+
if (dccp_bad_service_code(sk, service)) {
dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
goto drop;
@@ -912,7 +917,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
final_p = fl6_update_dst(&fl6, opt, &final);
- dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
+ dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto failure;
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 7f4534828f6c..a0494206cfda 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -241,7 +241,7 @@ static void dns_resolver_describe(const struct key *key, struct seq_file *m)
* - the key's semaphore is read-locked
*/
static long dns_resolver_read(const struct key *key,
- char __user *buffer, size_t buflen)
+ char *buffer, size_t buflen)
{
int err = PTR_ERR(key->payload.data[dns_key_error]);
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 4183e4ba27a5..81f620a3c32b 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -8,6 +8,7 @@ config NET_DSA
tristate "Distributed Switch Architecture"
depends on HAVE_NET_DSA && MAY_USE_DEVLINK
depends on BRIDGE || BRIDGE=n
+ select GRO_CELLS
select NET_SWITCHDEV
select PHYLINK
---help---
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 601534a5bfe8..598200e9d522 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -191,7 +191,7 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
if (dsa_skb_defer_rx_timestamp(p, skb))
return 0;
- netif_receive_skb(skb);
+ gro_cells_receive(&p->gcells, skb);
return 0;
}
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 46ae4dee522a..7c10bc4dacd3 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -261,6 +261,7 @@ static int dsa_port_setup(struct dsa_port *dp)
int err = 0;
memset(&dp->devlink_port, 0, sizeof(dp->devlink_port));
+ dp->mac = of_get_mac_address(dp->dn);
if (dp->type != DSA_PORT_TYPE_UNUSED)
err = devlink_port_register(ds->devlink, &dp->devlink_port,
@@ -412,7 +413,7 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
err = dsa_switch_setup(ds);
if (err)
- return err;
+ continue;
for (port = 0; port < ds->num_ports; port++) {
dp = &ds->ports[port];
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index 3964c6f7a7c0..79d17507609a 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -15,6 +15,7 @@
#include <linux/netdevice.h>
#include <linux/netpoll.h>
#include <net/dsa.h>
+#include <net/gro_cells.h>
enum {
DSA_NOTIFIER_AGEING_TIME,
@@ -72,6 +73,8 @@ struct dsa_slave_priv {
struct pcpu_sw_netstats *stats64;
+ struct gro_cells gcells;
+
/* DSA port data, such as switch, port index, etc. */
struct dsa_port *dp;
diff --git a/net/dsa/master.c b/net/dsa/master.c
index aae478d61101..22e9ce6e51a4 100644
--- a/net/dsa/master.c
+++ b/net/dsa/master.c
@@ -87,8 +87,7 @@ static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset,
struct dsa_switch *ds = cpu_dp->ds;
int port = cpu_dp->index;
int len = ETH_GSTRING_LEN;
- int mcount = 0, count;
- unsigned int i;
+ int mcount = 0, count, i;
uint8_t pfx[4];
uint8_t *ndata;
@@ -118,6 +117,8 @@ static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset,
*/
ds->ops->get_strings(ds, port, stringset, ndata);
count = ds->ops->get_sset_count(ds, port, stringset);
+ if (count < 0)
+ return;
for (i = 0; i < count; i++) {
memmove(ndata + (i * len + sizeof(pfx)),
ndata + i * len, len - sizeof(pfx));
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 8ee28b6016d8..b887d9edb9c3 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -598,13 +598,15 @@ static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
struct dsa_switch *ds = dp->ds;
if (sset == ETH_SS_STATS) {
- int count;
+ int count = 0;
- count = 4;
- if (ds->ops->get_sset_count)
- count += ds->ops->get_sset_count(ds, dp->index, sset);
+ if (ds->ops->get_sset_count) {
+ count = ds->ops->get_sset_count(ds, dp->index, sset);
+ if (count < 0)
+ return count;
+ }
- return count;
+ return count + 4;
}
return -EOPNOTSUPP;
@@ -1313,7 +1315,10 @@ int dsa_slave_create(struct dsa_port *port)
slave_dev->features = master->vlan_features | NETIF_F_HW_TC;
slave_dev->hw_features |= NETIF_F_HW_TC;
slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
- eth_hw_addr_inherit(slave_dev, master);
+ if (port->mac && is_valid_ether_addr(port->mac))
+ ether_addr_copy(slave_dev->dev_addr, port->mac);
+ else
+ eth_hw_addr_inherit(slave_dev, master);
slave_dev->priv_flags |= IFF_NO_QUEUE;
slave_dev->netdev_ops = &dsa_slave_netdev_ops;
slave_dev->switchdev_ops = &dsa_slave_switchdev_ops;
@@ -1334,6 +1339,11 @@ int dsa_slave_create(struct dsa_port *port)
free_netdev(slave_dev);
return -ENOMEM;
}
+
+ ret = gro_cells_init(&p->gcells, slave_dev);
+ if (ret)
+ goto out_free;
+
p->dp = port;
INIT_LIST_HEAD(&p->mall_tc_list);
p->xmit = cpu_dp->tag_ops->xmit;
@@ -1344,7 +1354,7 @@ int dsa_slave_create(struct dsa_port *port)
ret = dsa_slave_phy_setup(slave_dev);
if (ret) {
netdev_err(master, "error %d setting up slave phy\n", ret);
- goto out_free;
+ goto out_gcells;
}
dsa_slave_notify(slave_dev, DSA_PORT_REGISTER);
@@ -1363,6 +1373,8 @@ out_phy:
phylink_disconnect_phy(p->dp->pl);
rtnl_unlock();
phylink_destroy(p->dp->pl);
+out_gcells:
+ gro_cells_destroy(&p->gcells);
out_free:
free_percpu(p->stats64);
free_netdev(slave_dev);
@@ -1383,6 +1395,7 @@ void dsa_slave_destroy(struct net_device *slave_dev)
dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER);
unregister_netdev(slave_dev);
phylink_destroy(dp->pl);
+ gro_cells_destroy(&p->gcells);
free_percpu(p->stats64);
free_netdev(slave_dev);
}
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c
index 4083326b806e..d62d28d358d9 100644
--- a/net/dsa/tag_edsa.c
+++ b/net/dsa/tag_edsa.c
@@ -17,6 +17,16 @@
#define DSA_HLEN 4
#define EDSA_HLEN 8
+#define FRAME_TYPE_TO_CPU 0x00
+#define FRAME_TYPE_FORWARD 0x03
+
+#define TO_CPU_CODE_MGMT_TRAP 0x00
+#define TO_CPU_CODE_FRAME2REG 0x01
+#define TO_CPU_CODE_IGMP_MLD_TRAP 0x02
+#define TO_CPU_CODE_POLICY_TRAP 0x03
+#define TO_CPU_CODE_ARP_MIRROR 0x04
+#define TO_CPU_CODE_POLICY_MIRROR 0x05
+
static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
@@ -81,6 +91,8 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt)
{
u8 *edsa_header;
+ int frame_type;
+ int code;
int source_device;
int source_port;
@@ -95,8 +107,29 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev,
/*
* Check that frame type is either TO_CPU or FORWARD.
*/
- if ((edsa_header[0] & 0xc0) != 0x00 && (edsa_header[0] & 0xc0) != 0xc0)
+ frame_type = edsa_header[0] >> 6;
+
+ switch (frame_type) {
+ case FRAME_TYPE_TO_CPU:
+ code = (edsa_header[1] & 0x6) | ((edsa_header[2] >> 4) & 1);
+
+ /*
+ * Mark the frame to never egress on any port of the same switch
+ * unless it's a trapped IGMP/MLD packet, in which case the
+ * bridge might want to forward it.
+ */
+ if (code != TO_CPU_CODE_IGMP_MLD_TRAP)
+ skb->offload_fwd_mark = 1;
+
+ break;
+
+ case FRAME_TYPE_FORWARD:
+ skb->offload_fwd_mark = 1;
+ break;
+
+ default:
return NULL;
+ }
/*
* Determine source device and port.
@@ -160,8 +193,6 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev,
2 * ETH_ALEN);
}
- skb->offload_fwd_mark = 1;
-
return skb;
}
diff --git a/net/dsa/tag_mtk.c b/net/dsa/tag_mtk.c
index 11535bc70743..1f57dce969f7 100644
--- a/net/dsa/tag_mtk.c
+++ b/net/dsa/tag_mtk.c
@@ -20,15 +20,20 @@
#define MTK_HDR_LEN 4
#define MTK_HDR_XMIT_UNTAGGED 0
#define MTK_HDR_XMIT_TAGGED_TPID_8100 1
+#define MTK_HDR_XMIT_TAGGED_TPID_88A8 2
#define MTK_HDR_RECV_SOURCE_PORT_MASK GENMASK(2, 0)
#define MTK_HDR_XMIT_DP_BIT_MASK GENMASK(5, 0)
+#define MTK_HDR_XMIT_SA_DIS BIT(6)
static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
+ u8 xmit_tpid;
u8 *mtk_tag;
- bool is_vlan_skb = true;
+ unsigned char *dest = eth_hdr(skb)->h_dest;
+ bool is_multicast_skb = is_multicast_ether_addr(dest) &&
+ !is_broadcast_ether_addr(dest);
/* Build the special tag after the MAC Source Address. If VLAN header
* is present, it's required that VLAN header and special tag is
@@ -36,13 +41,20 @@ static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
* the both special and VLAN tag at the same time and then look up VLAN
* table with VID.
*/
- if (!skb_vlan_tagged(skb)) {
+ switch (skb->protocol) {
+ case htons(ETH_P_8021Q):
+ xmit_tpid = MTK_HDR_XMIT_TAGGED_TPID_8100;
+ break;
+ case htons(ETH_P_8021AD):
+ xmit_tpid = MTK_HDR_XMIT_TAGGED_TPID_88A8;
+ break;
+ default:
if (skb_cow_head(skb, MTK_HDR_LEN) < 0)
return NULL;
+ xmit_tpid = MTK_HDR_XMIT_UNTAGGED;
skb_push(skb, MTK_HDR_LEN);
memmove(skb->data, skb->data + MTK_HDR_LEN, 2 * ETH_ALEN);
- is_vlan_skb = false;
}
mtk_tag = skb->data + 2 * ETH_ALEN;
@@ -50,12 +62,15 @@ static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
/* Mark tag attribute on special tag insertion to notify hardware
* whether that's a combined special tag with 802.1Q header.
*/
- mtk_tag[0] = is_vlan_skb ? MTK_HDR_XMIT_TAGGED_TPID_8100 :
- MTK_HDR_XMIT_UNTAGGED;
+ mtk_tag[0] = xmit_tpid;
mtk_tag[1] = (1 << dp->index) & MTK_HDR_XMIT_DP_BIT_MASK;
+ /* Disable SA learning for multicast frames */
+ if (unlikely(is_multicast_skb))
+ mtk_tag[1] |= MTK_HDR_XMIT_SA_DIS;
+
/* Tag control information is kept for 802.1Q */
- if (!is_vlan_skb) {
+ if (xmit_tpid == MTK_HDR_XMIT_UNTAGGED) {
mtk_tag[2] = 0;
mtk_tag[3] = 0;
}
@@ -68,6 +83,9 @@ static struct sk_buff *mtk_tag_rcv(struct sk_buff *skb, struct net_device *dev,
{
int port;
__be16 *phdr, hdr;
+ unsigned char *dest = eth_hdr(skb)->h_dest;
+ bool is_multicast_skb = is_multicast_ether_addr(dest) &&
+ !is_broadcast_ether_addr(dest);
if (unlikely(!pskb_may_pull(skb, MTK_HDR_LEN)))
return NULL;
@@ -93,6 +111,10 @@ static struct sk_buff *mtk_tag_rcv(struct sk_buff *skb, struct net_device *dev,
if (!skb->dev)
return NULL;
+ /* Only unicast or broadcast frames are offloaded */
+ if (likely(!is_multicast_skb))
+ skb->offload_fwd_mark = 1;
+
return skb;
}
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index f5a3601948ca..37795502bb51 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -310,7 +310,8 @@ void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
node_dst = find_node_by_AddrA(&port->hsr->node_db, eth_hdr(skb)->h_dest);
if (!node_dst) {
- WARN_ONCE(1, "%s: Unknown node\n", __func__);
+ if (net_ratelimit())
+ netdev_err(skb->dev, "%s: Unknown node\n", __func__);
return;
}
if (port->type != node_dst->AddrB_port)
diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
index 37708dabebd1..606bc7fe5cc7 100644
--- a/net/hsr/hsr_netlink.c
+++ b/net/hsr/hsr_netlink.c
@@ -64,10 +64,16 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
else
multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]);
- if (!data[IFLA_HSR_VERSION])
+ if (!data[IFLA_HSR_VERSION]) {
hsr_version = 0;
- else
+ } else {
hsr_version = nla_get_u8(data[IFLA_HSR_VERSION]);
+ if (hsr_version > 1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only versions 0..1 are supported");
+ return -EINVAL;
+ }
+ }
return hsr_dev_finalize(dev, link, multicast_spec, hsr_version);
}
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index d3cbb3258718..c0930b9fe848 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -559,9 +559,7 @@ ieee802154_llsec_parse_key_id(struct genl_info *info,
desc->mode = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE]);
if (desc->mode == IEEE802154_SCF_KEY_IMPLICIT) {
- if (!info->attrs[IEEE802154_ATTR_PAN_ID] &&
- !(info->attrs[IEEE802154_ATTR_SHORT_ADDR] ||
- info->attrs[IEEE802154_ATTR_HW_ADDR]))
+ if (!info->attrs[IEEE802154_ATTR_PAN_ID])
return -EINVAL;
desc->device_addr.pan_id = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_PAN_ID]);
@@ -570,6 +568,9 @@ ieee802154_llsec_parse_key_id(struct genl_info *info,
desc->device_addr.mode = IEEE802154_ADDR_SHORT;
desc->device_addr.short_addr = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_SHORT_ADDR]);
} else {
+ if (!info->attrs[IEEE802154_ATTR_HW_ADDR])
+ return -EINVAL;
+
desc->device_addr.mode = IEEE802154_ADDR_LONG;
desc->device_addr.extended_addr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
}
diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
index 99f6c254ea77..b1c55db73764 100644
--- a/net/ieee802154/nl802154.c
+++ b/net/ieee802154/nl802154.c
@@ -836,8 +836,13 @@ nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags,
goto nla_put_failure;
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+ goto out;
+
if (nl802154_get_llsec_params(msg, rdev, wpan_dev) < 0)
goto nla_put_failure;
+
+out:
#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
genlmsg_end(msg, hdr);
@@ -1402,6 +1407,9 @@ static int nl802154_set_llsec_params(struct sk_buff *skb,
u32 changed = 0;
int ret;
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+ return -EOPNOTSUPP;
+
if (info->attrs[NL802154_ATTR_SEC_ENABLED]) {
u8 enabled;
@@ -1508,6 +1516,11 @@ nl802154_dump_llsec_key(struct sk_buff *skb, struct netlink_callback *cb)
if (err)
return err;
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
+ err = skb->len;
+ goto out_err;
+ }
+
if (!wpan_dev->netdev) {
err = -EINVAL;
goto out_err;
@@ -1562,7 +1575,8 @@ static int nl802154_add_llsec_key(struct sk_buff *skb, struct genl_info *info)
struct ieee802154_llsec_key_id id = { };
u32 commands[NL802154_CMD_FRAME_NR_IDS / 32] = { };
- if (nla_parse_nested(attrs, NL802154_KEY_ATTR_MAX,
+ if (!info->attrs[NL802154_ATTR_SEC_KEY] ||
+ nla_parse_nested(attrs, NL802154_KEY_ATTR_MAX,
info->attrs[NL802154_ATTR_SEC_KEY],
nl802154_key_policy, info->extack))
return -EINVAL;
@@ -1612,7 +1626,8 @@ static int nl802154_del_llsec_key(struct sk_buff *skb, struct genl_info *info)
struct nlattr *attrs[NL802154_KEY_ATTR_MAX + 1];
struct ieee802154_llsec_key_id id;
- if (nla_parse_nested(attrs, NL802154_KEY_ATTR_MAX,
+ if (!info->attrs[NL802154_ATTR_SEC_KEY] ||
+ nla_parse_nested(attrs, NL802154_KEY_ATTR_MAX,
info->attrs[NL802154_ATTR_SEC_KEY],
nl802154_key_policy, info->extack))
return -EINVAL;
@@ -1678,6 +1693,11 @@ nl802154_dump_llsec_dev(struct sk_buff *skb, struct netlink_callback *cb)
if (err)
return err;
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
+ err = skb->len;
+ goto out_err;
+ }
+
if (!wpan_dev->netdev) {
err = -EINVAL;
goto out_err;
@@ -1765,6 +1785,9 @@ static int nl802154_add_llsec_dev(struct sk_buff *skb, struct genl_info *info)
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
struct ieee802154_llsec_device dev_desc;
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+ return -EOPNOTSUPP;
+
if (ieee802154_llsec_parse_device(info->attrs[NL802154_ATTR_SEC_DEVICE],
&dev_desc) < 0)
return -EINVAL;
@@ -1780,7 +1803,8 @@ static int nl802154_del_llsec_dev(struct sk_buff *skb, struct genl_info *info)
struct nlattr *attrs[NL802154_DEV_ATTR_MAX + 1];
__le64 extended_addr;
- if (nla_parse_nested(attrs, NL802154_DEV_ATTR_MAX,
+ if (!info->attrs[NL802154_ATTR_SEC_DEVICE] ||
+ nla_parse_nested(attrs, NL802154_DEV_ATTR_MAX,
info->attrs[NL802154_ATTR_SEC_DEVICE],
nl802154_dev_policy, info->extack))
return -EINVAL;
@@ -1850,6 +1874,11 @@ nl802154_dump_llsec_devkey(struct sk_buff *skb, struct netlink_callback *cb)
if (err)
return err;
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
+ err = skb->len;
+ goto out_err;
+ }
+
if (!wpan_dev->netdev) {
err = -EINVAL;
goto out_err;
@@ -1907,6 +1936,9 @@ static int nl802154_add_llsec_devkey(struct sk_buff *skb, struct genl_info *info
struct ieee802154_llsec_device_key key;
__le64 extended_addr;
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+ return -EOPNOTSUPP;
+
if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] ||
nla_parse_nested(attrs, NL802154_DEVKEY_ATTR_MAX,
info->attrs[NL802154_ATTR_SEC_DEVKEY],
@@ -1940,7 +1972,8 @@ static int nl802154_del_llsec_devkey(struct sk_buff *skb, struct genl_info *info
struct ieee802154_llsec_device_key key;
__le64 extended_addr;
- if (nla_parse_nested(attrs, NL802154_DEVKEY_ATTR_MAX,
+ if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] ||
+ nla_parse_nested(attrs, NL802154_DEVKEY_ATTR_MAX,
info->attrs[NL802154_ATTR_SEC_DEVKEY],
nl802154_devkey_policy, info->extack))
return -EINVAL;
@@ -2015,6 +2048,11 @@ nl802154_dump_llsec_seclevel(struct sk_buff *skb, struct netlink_callback *cb)
if (err)
return err;
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
+ err = skb->len;
+ goto out_err;
+ }
+
if (!wpan_dev->netdev) {
err = -EINVAL;
goto out_err;
@@ -2100,6 +2138,9 @@ static int nl802154_add_llsec_seclevel(struct sk_buff *skb,
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
struct ieee802154_llsec_seclevel sl;
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+ return -EOPNOTSUPP;
+
if (llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL],
&sl) < 0)
return -EINVAL;
@@ -2115,6 +2156,9 @@ static int nl802154_del_llsec_seclevel(struct sk_buff *skb,
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
struct ieee802154_llsec_seclevel sl;
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+ return -EOPNOTSUPP;
+
if (!info->attrs[NL802154_ATTR_SEC_LEVEL] ||
llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL],
&sl) < 0)
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 1c21dc5d6dd4..6a1b52b34e20 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -533,16 +533,10 @@ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info)
ret_val = -ENOENT;
goto doi_remove_return;
}
- if (!refcount_dec_and_test(&doi_def->refcount)) {
- spin_unlock(&cipso_v4_doi_list_lock);
- ret_val = -EBUSY;
- goto doi_remove_return;
- }
list_del_rcu(&doi_def->list);
spin_unlock(&cipso_v4_doi_list_lock);
- cipso_v4_cache_invalidate();
- call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu);
+ cipso_v4_doi_putdef(doi_def);
ret_val = 0;
doi_remove_return:
@@ -599,9 +593,6 @@ void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def)
if (!refcount_dec_and_test(&doi_def->refcount))
return;
- spin_lock(&cipso_v4_doi_list_lock);
- list_del_rcu(&doi_def->list);
- spin_unlock(&cipso_v4_doi_list_lock);
cipso_v4_cache_invalidate();
call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu);
@@ -1272,7 +1263,8 @@ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def,
return ret_val;
}
- secattr->flags |= NETLBL_SECATTR_MLS_CAT;
+ if (secattr->attr.mls.cat)
+ secattr->flags |= NETLBL_SECATTR_MLS_CAT;
}
return 0;
@@ -1453,7 +1445,8 @@ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def,
return ret_val;
}
- secattr->flags |= NETLBL_SECATTR_MLS_CAT;
+ if (secattr->attr.mls.cat)
+ secattr->flags |= NETLBL_SECATTR_MLS_CAT;
}
return 0;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index fabece4b8997..12a2cea9d606 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -269,6 +269,7 @@ static struct in_device *inetdev_init(struct net_device *dev)
err = devinet_sysctl_register(in_dev);
if (err) {
in_dev->dead = 1;
+ neigh_parms_release(&arp_tbl, in_dev->arp_parms);
in_dev_put(in_dev);
in_dev = NULL;
goto out;
@@ -587,12 +588,15 @@ struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
return NULL;
}
-static int ip_mc_config(struct sock *sk, bool join, const struct in_ifaddr *ifa)
+static int ip_mc_autojoin_config(struct net *net, bool join,
+ const struct in_ifaddr *ifa)
{
+#if defined(CONFIG_IP_MULTICAST)
struct ip_mreqn mreq = {
.imr_multiaddr.s_addr = ifa->ifa_address,
.imr_ifindex = ifa->ifa_dev->dev->ifindex,
};
+ struct sock *sk = net->ipv4.mc_autojoin_sk;
int ret;
ASSERT_RTNL();
@@ -605,6 +609,9 @@ static int ip_mc_config(struct sock *sk, bool join, const struct in_ifaddr *ifa)
release_sock(sk);
return ret;
+#else
+ return -EOPNOTSUPP;
+#endif
}
static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -646,7 +653,7 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
continue;
if (ipv4_is_multicast(ifa->ifa_address))
- ip_mc_config(net->ipv4.mc_autojoin_sk, false, ifa);
+ ip_mc_autojoin_config(net, false, ifa);
__inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
return 0;
}
@@ -907,8 +914,7 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
*/
set_ifa_lifetime(ifa, valid_lft, prefered_lft);
if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) {
- int ret = ip_mc_config(net->ipv4.mc_autojoin_sk,
- true, ifa);
+ int ret = ip_mc_autojoin_config(net, true, ifa);
if (ret < 0) {
inet_free_ifa(ifa);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 114f9def1ec5..0792a9e2a555 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -270,7 +270,6 @@ static int esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struc
int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
{
u8 *tail;
- u8 *vaddr;
int nfrags;
int esph_offset;
struct page *page;
@@ -312,14 +311,10 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
page = pfrag->page;
get_page(page);
- vaddr = kmap_atomic(page);
-
- tail = vaddr + pfrag->offset;
+ tail = page_address(page) + pfrag->offset;
esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
- kunmap_atomic(vaddr);
-
nfrags = skb_shinfo(skb)->nr_frags;
__skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 7f4ec36e5f70..b96aa88087be 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -302,7 +302,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
.flowi4_iif = LOOPBACK_IFINDEX,
.flowi4_oif = l3mdev_master_ifindex_rcu(dev),
.daddr = ip_hdr(skb)->saddr,
- .flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
+ .flowi4_tos = ip_hdr(skb)->tos & IPTOS_RT_MASK,
.flowi4_scope = scope,
.flowi4_mark = vmark ? skb->mark : 0,
};
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index a8fc4e83cd95..9573cd242b90 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -831,7 +831,7 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_nh *nh,
if (fl4.flowi4_scope < RT_SCOPE_LINK)
fl4.flowi4_scope = RT_SCOPE_LINK;
- if (cfg->fc_table)
+ if (cfg->fc_table && cfg->fc_table != RT_TABLE_MAIN)
tbl = fib_get_table(net, cfg->fc_table);
if (tbl)
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 3047fc4737c4..48d7125501b4 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1749,7 +1749,7 @@ struct fib_table *fib_trie_unmerge(struct fib_table *oldtb)
while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
struct key_vector *local_l = NULL, *local_tp;
- hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
+ hlist_for_each_entry(fa, &l->leaf, fa_list) {
struct fib_alias *new_fa;
if (local_tb->tb_id != fa->tb_id)
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index ad9ea82daeb3..9376b30cf626 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -133,7 +133,7 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
* to 0 and sets the configured key in the
* inner erspan header field
*/
- if (greh->protocol == htons(ETH_P_ERSPAN) ||
+ if ((greh->protocol == htons(ETH_P_ERSPAN) && hdr_len != 4) ||
greh->protocol == htons(ETH_P_ERSPAN2)) {
struct erspan_base_hdr *ershdr;
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index 6c63524f598a..89c613f19566 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -19,12 +19,12 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
+ bool need_csum, need_recompute_csum, gso_partial;
struct sk_buff *segs = ERR_PTR(-EINVAL);
u16 mac_offset = skb->mac_header;
__be16 protocol = skb->protocol;
u16 mac_len = skb->mac_len;
int gre_offset, outer_hlen;
- bool need_csum, gso_partial;
if (!skb->encapsulation)
goto out;
@@ -45,6 +45,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
skb->protocol = skb->inner_protocol;
need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM);
+ need_recompute_csum = skb->csum_not_inet;
skb->encap_hdr_csum = need_csum;
features &= skb->dev->hw_enc_features;
@@ -102,7 +103,15 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
}
*(pcsum + 1) = 0;
- *pcsum = gso_make_checksum(skb, 0);
+ if (need_recompute_csum && !skb_is_gso(skb)) {
+ __wsum csum;
+
+ csum = skb_checksum(skb, gre_offset,
+ skb->len - gre_offset, 0);
+ *pcsum = csum_fold(csum);
+ } else {
+ *pcsum = gso_make_checksum(skb, 0);
+ }
} while ((skb = skb->next));
out:
return segs;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 4efa5e33513e..b048125ea099 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -244,7 +244,7 @@ static struct {
/**
* icmp_global_allow - Are we allowed to send one more ICMP message ?
*
- * Uses a token bucket to limit our ICMP messages to sysctl_icmp_msgs_per_sec.
+ * Uses a token bucket to limit our ICMP messages to ~sysctl_icmp_msgs_per_sec.
* Returns false if we reached the limit and can not send another packet.
* Note: called with BH disabled
*/
@@ -272,7 +272,10 @@ bool icmp_global_allow(void)
}
credit = min_t(u32, icmp_global.credit + incr, sysctl_icmp_msgs_burst);
if (credit) {
- credit--;
+ /* We want to use a credit of one in average, but need to randomize
+ * it for security reasons.
+ */
+ credit = max_t(int, credit - prandom_u32_max(3), 0);
rc = true;
}
WRITE_ONCE(icmp_global.credit, credit);
@@ -752,6 +755,40 @@ out:;
}
EXPORT_SYMBOL(__icmp_send);
+#if IS_ENABLED(CONFIG_NF_NAT)
+#include <net/netfilter/nf_conntrack.h>
+void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info)
+{
+ struct sk_buff *cloned_skb = NULL;
+ struct ip_options opts = { 0 };
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct;
+ __be32 orig_ip;
+
+ ct = nf_ct_get(skb_in, &ctinfo);
+ if (!ct || !(ct->status & IPS_SRC_NAT)) {
+ __icmp_send(skb_in, type, code, info, &opts);
+ return;
+ }
+
+ if (skb_shared(skb_in))
+ skb_in = cloned_skb = skb_clone(skb_in, GFP_ATOMIC);
+
+ if (unlikely(!skb_in || skb_network_header(skb_in) < skb_in->head ||
+ (skb_network_header(skb_in) + sizeof(struct iphdr)) >
+ skb_tail_pointer(skb_in) || skb_ensure_writable(skb_in,
+ skb_network_offset(skb_in) + sizeof(struct iphdr))))
+ goto out;
+
+ orig_ip = ip_hdr(skb_in)->saddr;
+ ip_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.ip;
+ __icmp_send(skb_in, type, code, info, &opts);
+ ip_hdr(skb_in)->saddr = orig_ip;
+out:
+ consume_skb(cloned_skb);
+}
+EXPORT_SYMBOL(icmp_ndo_send);
+#endif
static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
{
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 34bd6230e9f4..439a55d1aa99 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -28,17 +28,19 @@
#include <net/addrconf.h>
#if IS_ENABLED(CONFIG_IPV6)
-/* match_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses if IPv6
- * only, and any IPv4 addresses if not IPv6 only
- * match_wildcard == false: addresses must be exactly the same, i.e.
- * IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
- * and 0.0.0.0 equals to 0.0.0.0 only
+/* match_sk*_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses
+ * if IPv6 only, and any IPv4 addresses
+ * if not IPv6 only
+ * match_sk*_wildcard == false: addresses must be exactly the same, i.e.
+ * IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
+ * and 0.0.0.0 equals to 0.0.0.0 only
*/
static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
const struct in6_addr *sk2_rcv_saddr6,
__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
bool sk1_ipv6only, bool sk2_ipv6only,
- bool match_wildcard)
+ bool match_sk1_wildcard,
+ bool match_sk2_wildcard)
{
int addr_type = ipv6_addr_type(sk1_rcv_saddr6);
int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
@@ -48,8 +50,8 @@ static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
if (!sk2_ipv6only) {
if (sk1_rcv_saddr == sk2_rcv_saddr)
return true;
- if (!sk1_rcv_saddr || !sk2_rcv_saddr)
- return match_wildcard;
+ return (match_sk1_wildcard && !sk1_rcv_saddr) ||
+ (match_sk2_wildcard && !sk2_rcv_saddr);
}
return false;
}
@@ -57,11 +59,11 @@ static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY)
return true;
- if (addr_type2 == IPV6_ADDR_ANY && match_wildcard &&
+ if (addr_type2 == IPV6_ADDR_ANY && match_sk2_wildcard &&
!(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
return true;
- if (addr_type == IPV6_ADDR_ANY && match_wildcard &&
+ if (addr_type == IPV6_ADDR_ANY && match_sk1_wildcard &&
!(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
return true;
@@ -73,18 +75,19 @@ static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
}
#endif
-/* match_wildcard == true: 0.0.0.0 equals to any IPv4 addresses
- * match_wildcard == false: addresses must be exactly the same, i.e.
- * 0.0.0.0 only equals to 0.0.0.0
+/* match_sk*_wildcard == true: 0.0.0.0 equals to any IPv4 addresses
+ * match_sk*_wildcard == false: addresses must be exactly the same, i.e.
+ * 0.0.0.0 only equals to 0.0.0.0
*/
static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
- bool sk2_ipv6only, bool match_wildcard)
+ bool sk2_ipv6only, bool match_sk1_wildcard,
+ bool match_sk2_wildcard)
{
if (!sk2_ipv6only) {
if (sk1_rcv_saddr == sk2_rcv_saddr)
return true;
- if (!sk1_rcv_saddr || !sk2_rcv_saddr)
- return match_wildcard;
+ return (match_sk1_wildcard && !sk1_rcv_saddr) ||
+ (match_sk2_wildcard && !sk2_rcv_saddr);
}
return false;
}
@@ -100,10 +103,12 @@ bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
sk2->sk_rcv_saddr,
ipv6_only_sock(sk),
ipv6_only_sock(sk2),
+ match_wildcard,
match_wildcard);
#endif
return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr,
- ipv6_only_sock(sk2), match_wildcard);
+ ipv6_only_sock(sk2), match_wildcard,
+ match_wildcard);
}
EXPORT_SYMBOL(inet_rcv_saddr_equal);
@@ -274,57 +279,18 @@ static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
tb->fast_rcv_saddr,
sk->sk_rcv_saddr,
tb->fast_ipv6_only,
- ipv6_only_sock(sk), true);
+ ipv6_only_sock(sk), true, false);
#endif
return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr,
- ipv6_only_sock(sk), true);
+ ipv6_only_sock(sk), true, false);
}
-/* Obtain a reference to a local port for the given sock,
- * if snum is zero it means select any available local port.
- * We try to allocate an odd port (and leave even ports for connect())
- */
-int inet_csk_get_port(struct sock *sk, unsigned short snum)
+void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
+ struct sock *sk)
{
- bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
- struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
- int ret = 1, port = snum;
- struct inet_bind_hashbucket *head;
- struct net *net = sock_net(sk);
- struct inet_bind_bucket *tb = NULL;
kuid_t uid = sock_i_uid(sk);
+ bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
- if (!port) {
- head = inet_csk_find_open_port(sk, &tb, &port);
- if (!head)
- return ret;
- if (!tb)
- goto tb_not_found;
- goto success;
- }
- head = &hinfo->bhash[inet_bhashfn(net, port,
- hinfo->bhash_size)];
- spin_lock_bh(&head->lock);
- inet_bind_bucket_for_each(tb, &head->chain)
- if (net_eq(ib_net(tb), net) && tb->port == port)
- goto tb_found;
-tb_not_found:
- tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
- net, head, port);
- if (!tb)
- goto fail_unlock;
-tb_found:
- if (!hlist_empty(&tb->owners)) {
- if (sk->sk_reuse == SK_FORCE_REUSE)
- goto success;
-
- if ((tb->fastreuse > 0 && reuse) ||
- sk_reuseport_match(tb, sk))
- goto success;
- if (inet_csk_bind_conflict(sk, tb, true, true))
- goto fail_unlock;
- }
-success:
if (hlist_empty(&tb->owners)) {
tb->fastreuse = reuse;
if (sk->sk_reuseport) {
@@ -368,6 +334,54 @@ success:
tb->fastreuseport = 0;
}
}
+}
+
+/* Obtain a reference to a local port for the given sock,
+ * if snum is zero it means select any available local port.
+ * We try to allocate an odd port (and leave even ports for connect())
+ */
+int inet_csk_get_port(struct sock *sk, unsigned short snum)
+{
+ bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
+ struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
+ int ret = 1, port = snum;
+ struct inet_bind_hashbucket *head;
+ struct net *net = sock_net(sk);
+ struct inet_bind_bucket *tb = NULL;
+
+ if (!port) {
+ head = inet_csk_find_open_port(sk, &tb, &port);
+ if (!head)
+ return ret;
+ if (!tb)
+ goto tb_not_found;
+ goto success;
+ }
+ head = &hinfo->bhash[inet_bhashfn(net, port,
+ hinfo->bhash_size)];
+ spin_lock_bh(&head->lock);
+ inet_bind_bucket_for_each(tb, &head->chain)
+ if (net_eq(ib_net(tb), net) && tb->port == port)
+ goto tb_found;
+tb_not_found:
+ tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
+ net, head, port);
+ if (!tb)
+ goto fail_unlock;
+tb_found:
+ if (!hlist_empty(&tb->owners)) {
+ if (sk->sk_reuse == SK_FORCE_REUSE)
+ goto success;
+
+ if ((tb->fastreuse > 0 && reuse) ||
+ sk_reuseport_match(tb, sk))
+ goto success;
+ if (inet_csk_bind_conflict(sk, tb, true, true))
+ goto fail_unlock;
+ }
+success:
+ inet_csk_update_fastreuse(tb, sk);
+
if (!inet_csk(sk)->icsk_bind_hash)
inet_bind_hash(sk, tb, port);
WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
@@ -684,12 +698,15 @@ static bool reqsk_queue_unlink(struct request_sock_queue *queue,
return found;
}
-void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
+bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
{
- if (reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req)) {
+ bool unlinked = reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req);
+
+ if (unlinked) {
reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
reqsk_put(req);
}
+ return unlinked;
}
EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index f0957ebf82cf..d07917059d70 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -392,8 +392,10 @@ static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
r->idiag_inode = 0;
if (net_admin && nla_put_u32(skb, INET_DIAG_MARK,
- inet_rsk(reqsk)->ir_mark))
+ inet_rsk(reqsk)->ir_mark)) {
+ nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
+ }
nlmsg_end(skb, nlh);
return 0;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index b53da2691adb..3a5f12f011cb 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -161,6 +161,7 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child)
return -ENOMEM;
}
}
+ inet_csk_update_fastreuse(tb, child);
}
inet_bind_hash(child, tb, port);
spin_unlock(&head->lock);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index ffcb5983107d..de6f89511a21 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -680,9 +680,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
}
if (dev->header_ops) {
- /* Need space for new headers */
- if (skb_cow_head(skb, dev->needed_headroom -
- (tunnel->hlen + sizeof(struct iphdr))))
+ if (skb_cow_head(skb, 0))
goto free_skb;
tnl_params = (const struct iphdr *)skb->data;
@@ -800,7 +798,11 @@ static void ipgre_link_update(struct net_device *dev, bool set_mtu)
len = tunnel->tun_hlen - len;
tunnel->hlen = tunnel->hlen + len;
- dev->needed_headroom = dev->needed_headroom + len;
+ if (dev->header_ops)
+ dev->hard_header_len += len;
+ else
+ dev->needed_headroom += len;
+
if (set_mtu)
dev->mtu = max_t(int, dev->mtu - len, 68);
@@ -1003,6 +1005,7 @@ static void __gre_tunnel_init(struct net_device *dev)
tunnel->parms.iph.protocol = IPPROTO_GRE;
tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
+ dev->needed_headroom = tunnel->hlen + sizeof(tunnel->parms.iph);
dev->features |= GRE_FEATURES;
dev->hw_features |= GRE_FEATURES;
@@ -1046,10 +1049,14 @@ static int ipgre_tunnel_init(struct net_device *dev)
return -EINVAL;
dev->flags = IFF_BROADCAST;
dev->header_ops = &ipgre_header_ops;
+ dev->hard_header_len = tunnel->hlen + sizeof(*iph);
+ dev->needed_headroom = 0;
}
#endif
} else if (!tunnel->collect_md) {
dev->header_ops = &ipgre_header_ops;
+ dev->hard_header_len = tunnel->hlen + sizeof(*iph);
+ dev->needed_headroom = 0;
}
return ip_tunnel_init(dev);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index fbf30122e8bf..e411c42d8428 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -73,6 +73,7 @@
#include <net/icmp.h>
#include <net/checksum.h>
#include <net/inetpeer.h>
+#include <net/inet_ecn.h>
#include <net/lwtunnel.h>
#include <linux/bpf-cgroup.h>
#include <linux/igmp.h>
@@ -311,7 +312,7 @@ static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *sk
if (skb_is_gso(skb))
return ip_finish_output_gso(net, sk, skb, mtu);
- if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU))
+ if (skb->len > mtu || IPCB(skb)->frag_max_size)
return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
return ip_finish_output2(net, sk, skb);
@@ -1582,7 +1583,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
if (IS_ERR(rt))
return;
- inet_sk(sk)->tos = arg->tos;
+ inet_sk(sk)->tos = arg->tos & ~INET_ECN_MASK;
sk->sk_priority = skb->priority;
sk->sk_protocol = ip_hdr(skb)->protocol;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index b37abba3b369..bdd073ea300a 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -98,9 +98,10 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
__be32 remote, __be32 local,
__be32 key)
{
- unsigned int hash;
struct ip_tunnel *t, *cand = NULL;
struct hlist_head *head;
+ struct net_device *ndev;
+ unsigned int hash;
hash = ip_tunnel_hash(key, remote);
head = &itn->tunnels[hash];
@@ -175,8 +176,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
if (t && t->dev->flags & IFF_UP)
return t;
- if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP)
- return netdev_priv(itn->fb_tunnel_dev);
+ ndev = READ_ONCE(itn->fb_tunnel_dev);
+ if (ndev && ndev->flags & IFF_UP)
+ return netdev_priv(ndev);
return NULL;
}
@@ -328,7 +330,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
}
dev->needed_headroom = t_hlen + hlen;
- mtu -= (dev->hard_header_len + t_hlen);
+ mtu -= t_hlen;
if (mtu < IPV4_MIN_MTU)
mtu = IPV4_MIN_MTU;
@@ -358,7 +360,7 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net,
nt = netdev_priv(dev);
t_hlen = nt->hlen + sizeof(struct iphdr);
dev->min_mtu = ETH_MIN_MTU;
- dev->max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen;
+ dev->max_mtu = IP_MAX_MTU - t_hlen;
ip_tunnel_add(itn, nt);
return nt;
@@ -500,12 +502,11 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
const struct iphdr *inner_iph)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len;
+ int pkt_size = skb->len - tunnel->hlen;
int mtu;
if (df)
- mtu = dst_mtu(&rt->dst) - dev->hard_header_len
- - sizeof(struct iphdr) - tunnel->hlen;
+ mtu = dst_mtu(&rt->dst) - (sizeof(struct iphdr) + tunnel->hlen);
else
mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
@@ -734,7 +735,11 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
goto tx_error;
}
- if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph)) {
+ df = tnl_params->frag_off;
+ if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df)
+ df |= (inner_iph->frag_off & htons(IP_DF));
+
+ if (tnl_update_pmtu(dev, skb, rt, df, inner_iph)) {
ip_rt_put(rt);
goto tx_error;
}
@@ -762,10 +767,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
ttl = ip4_dst_hoplimit(&rt->dst);
}
- df = tnl_params->frag_off;
- if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df)
- df |= (inner_iph->frag_off&htons(IP_DF));
-
max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
+ rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
if (max_headroom > dev->needed_headroom)
@@ -933,7 +934,7 @@ int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
int t_hlen = tunnel->hlen + sizeof(struct iphdr);
- int max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen;
+ int max_mtu = IP_MAX_MTU - t_hlen;
if (new_mtu < ETH_MIN_MTU)
return -EINVAL;
@@ -1110,10 +1111,9 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
mtu = ip_tunnel_bind_dev(dev);
if (tb[IFLA_MTU]) {
- unsigned int max = IP_MAX_MTU - dev->hard_header_len - nt->hlen;
+ unsigned int max = IP_MAX_MTU - (nt->hlen + sizeof(struct iphdr));
- mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU,
- (unsigned int)(max - sizeof(struct iphdr)));
+ mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, max);
}
err = dev_set_mtu(dev, mtu);
@@ -1212,9 +1212,9 @@ void ip_tunnel_uninit(struct net_device *dev)
struct ip_tunnel_net *itn;
itn = net_generic(net, tunnel->ip_tnl_net_id);
- /* fb_tunnel_dev will be unregisted in net-exit call. */
- if (itn->fb_tunnel_dev != dev)
- ip_tunnel_del(itn, netdev_priv(dev));
+ ip_tunnel_del(itn, netdev_priv(dev));
+ if (itn->fb_tunnel_dev == dev)
+ WRITE_ONCE(itn->fb_tunnel_dev, NULL);
dst_cache_reset(&tunnel->dst_cache);
}
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index ccb1d97dfa05..15c71b08c2df 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -50,7 +50,7 @@ static unsigned int vti_net_id __read_mostly;
static int vti_tunnel_init(struct net_device *dev);
static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi,
- int encap_type)
+ int encap_type, bool update_skb_dev)
{
struct ip_tunnel *tunnel;
const struct iphdr *iph = ip_hdr(skb);
@@ -65,6 +65,9 @@ static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi,
XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
+ if (update_skb_dev)
+ skb->dev = tunnel->dev;
+
return xfrm_input(skb, nexthdr, spi, encap_type);
}
@@ -74,25 +77,43 @@ drop:
return 0;
}
-static int vti_input_ipip(struct sk_buff *skb, int nexthdr, __be32 spi,
- int encap_type)
+static int vti_input_proto(struct sk_buff *skb, int nexthdr, __be32 spi,
+ int encap_type)
{
- struct ip_tunnel *tunnel;
+ return vti_input(skb, nexthdr, spi, encap_type, false);
+}
+
+static int vti_rcv(struct sk_buff *skb, __be32 spi, bool update_skb_dev)
+{
+ XFRM_SPI_SKB_CB(skb)->family = AF_INET;
+ XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
+
+ return vti_input(skb, ip_hdr(skb)->protocol, spi, 0, update_skb_dev);
+}
+
+static int vti_rcv_proto(struct sk_buff *skb)
+{
+ return vti_rcv(skb, 0, false);
+}
+
+static int vti_rcv_tunnel(struct sk_buff *skb)
+{
+ struct ip_tunnel_net *itn = net_generic(dev_net(skb->dev), vti_net_id);
const struct iphdr *iph = ip_hdr(skb);
- struct net *net = dev_net(skb->dev);
- struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
+ struct ip_tunnel *tunnel;
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
iph->saddr, iph->daddr, 0);
if (tunnel) {
+ struct tnl_ptk_info tpi = {
+ .proto = htons(ETH_P_IP),
+ };
+
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto drop;
-
- XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
-
- skb->dev = tunnel->dev;
-
- return xfrm_input(skb, nexthdr, spi, encap_type);
+ if (iptunnel_pull_header(skb, 0, tpi.proto, false))
+ goto drop;
+ return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, false);
}
return -EINVAL;
@@ -101,22 +122,6 @@ drop:
return 0;
}
-static int vti_rcv(struct sk_buff *skb)
-{
- XFRM_SPI_SKB_CB(skb)->family = AF_INET;
- XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
-
- return vti_input(skb, ip_hdr(skb)->protocol, 0, 0);
-}
-
-static int vti_rcv_ipip(struct sk_buff *skb)
-{
- XFRM_SPI_SKB_CB(skb)->family = AF_INET;
- XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
-
- return vti_input_ipip(skb, ip_hdr(skb)->protocol, ip_hdr(skb)->saddr, 0);
-}
-
static int vti_rcv_cb(struct sk_buff *skb, int err)
{
unsigned short family;
@@ -478,31 +483,31 @@ static void __net_init vti_fb_tunnel_init(struct net_device *dev)
}
static struct xfrm4_protocol vti_esp4_protocol __read_mostly = {
- .handler = vti_rcv,
- .input_handler = vti_input,
+ .handler = vti_rcv_proto,
+ .input_handler = vti_input_proto,
.cb_handler = vti_rcv_cb,
.err_handler = vti4_err,
.priority = 100,
};
static struct xfrm4_protocol vti_ah4_protocol __read_mostly = {
- .handler = vti_rcv,
- .input_handler = vti_input,
+ .handler = vti_rcv_proto,
+ .input_handler = vti_input_proto,
.cb_handler = vti_rcv_cb,
.err_handler = vti4_err,
.priority = 100,
};
static struct xfrm4_protocol vti_ipcomp4_protocol __read_mostly = {
- .handler = vti_rcv,
- .input_handler = vti_input,
+ .handler = vti_rcv_proto,
+ .input_handler = vti_input_proto,
.cb_handler = vti_rcv_cb,
.err_handler = vti4_err,
.priority = 100,
};
static struct xfrm_tunnel ipip_handler __read_mostly = {
- .handler = vti_rcv_ipip,
+ .handler = vti_rcv_tunnel,
.err_handler = vti4_err,
.priority = 0,
};
@@ -677,10 +682,8 @@ static int __init vti_init(void)
msg = "ipip tunnel";
err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
- if (err < 0) {
- pr_info("%s: cant't register tunnel\n",__func__);
+ if (err < 0)
goto xfrm_tunnel_failed;
- }
msg = "netlink interface";
err = rtnl_link_register(&vti_link_ops);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 4368282eb6f8..da2e92380d78 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -704,7 +704,7 @@ out:
rtnl_link_failed:
#if IS_ENABLED(CONFIG_MPLS)
- xfrm4_tunnel_deregister(&mplsip_handler, AF_INET);
+ xfrm4_tunnel_deregister(&mplsip_handler, AF_MPLS);
xfrm_tunnel_mplsip_failed:
#endif
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 8d2e5dc9a827..3d670d5aea34 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -17,17 +17,19 @@
#include <net/netfilter/nf_queue.h>
/* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */
-int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_type)
+int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned int addr_type)
{
const struct iphdr *iph = ip_hdr(skb);
struct rtable *rt;
struct flowi4 fl4 = {};
__be32 saddr = iph->saddr;
- const struct sock *sk = skb_to_full_sk(skb);
- __u8 flags = sk ? inet_sk_flowi_flags(sk) : 0;
+ __u8 flags;
struct net_device *dev = skb_dst(skb)->dev;
unsigned int hh_len;
+ sk = sk_to_full_sk(sk);
+ flags = sk ? inet_sk_flowi_flags(sk) : 0;
+
if (addr_type == RTN_UNSPEC)
addr_type = inet_addr_type_dev_table(net, dev, saddr);
if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST)
@@ -91,8 +93,8 @@ int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry)
skb->mark == rt_info->mark &&
iph->daddr == rt_info->daddr &&
iph->saddr == rt_info->saddr))
- return ip_route_me_harder(entry->state.net, skb,
- RTN_UNSPEC);
+ return ip_route_me_harder(entry->state.net, entry->state.sk,
+ skb, RTN_UNSPEC);
}
return 0;
}
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 10d8f95eb771..fba56cd95896 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -1195,6 +1195,8 @@ static int translate_compat_table(struct net *net,
if (!newinfo)
goto out_unlock;
+ memset(newinfo->entries, 0, size);
+
newinfo->number = compatr->num_entries;
for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
newinfo->hook_entry[i] = compatr->hook_entry[i];
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index e77872c93c20..730a40dc829a 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1433,6 +1433,8 @@ translate_compat_table(struct net *net,
if (!newinfo)
goto out_unlock;
+ memset(newinfo->entries, 0, size);
+
newinfo->number = compatr->num_entries;
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
newinfo->hook_entry[i] = compatr->hook_entry[i];
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index 690b17ef6a44..d64b1ef43c10 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -54,7 +54,7 @@ synproxy_send_tcp(struct net *net,
skb_dst_set_noref(nskb, skb_dst(skb));
nskb->protocol = htons(ETH_P_IP);
- if (ip_route_me_harder(net, nskb, RTN_UNSPEC))
+ if (ip_route_me_harder(net, nskb->sk, nskb, RTN_UNSPEC))
goto free_nskb;
if (nfct) {
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index 74b19a5c572e..088320ce77a1 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -94,7 +94,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
flow.daddr = iph->saddr;
flow.saddr = rpfilter_get_saddr(iph->daddr);
flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
- flow.flowi4_tos = RT_TOS(iph->tos);
+ flow.flowi4_tos = iph->tos & IPTOS_RT_MASK;
flow.flowi4_scope = RT_SCOPE_UNIVERSE;
flow.flowi4_oif = l3mdev_master_ifindex_rcu(xt_in(par));
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index dea138ca8925..0829f46ddfdd 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -65,7 +65,7 @@ ipt_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state)
iph->daddr != daddr ||
skb->mark != mark ||
iph->tos != tos) {
- err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
+ err = ip_route_me_harder(state->net, state->sk, skb, RTN_UNSPEC);
if (err < 0)
ret = NF_DROP_ERR(err);
}
diff --git a/net/ipv4/netfilter/nf_log_arp.c b/net/ipv4/netfilter/nf_log_arp.c
index df5c2a2061a4..19fff2c589fa 100644
--- a/net/ipv4/netfilter/nf_log_arp.c
+++ b/net/ipv4/netfilter/nf_log_arp.c
@@ -46,16 +46,31 @@ static void dump_arp_packet(struct nf_log_buf *m,
const struct nf_loginfo *info,
const struct sk_buff *skb, unsigned int nhoff)
{
- const struct arphdr *ah;
- struct arphdr _arph;
const struct arppayload *ap;
struct arppayload _arpp;
+ const struct arphdr *ah;
+ unsigned int logflags;
+ struct arphdr _arph;
ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
if (ah == NULL) {
nf_log_buf_add(m, "TRUNCATED");
return;
}
+
+ if (info->type == NF_LOG_TYPE_LOG)
+ logflags = info->u.log.logflags;
+ else
+ logflags = NF_LOG_DEFAULT_MASK;
+
+ if (logflags & NF_LOG_MACDECODE) {
+ nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ",
+ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest);
+ nf_log_dump_vlan(m, skb);
+ nf_log_buf_add(m, "MACPROTO=%04x ",
+ ntohs(eth_hdr(skb)->h_proto));
+ }
+
nf_log_buf_add(m, "ARP HTYPE=%d PTYPE=0x%04x OPCODE=%d",
ntohs(ah->ar_hrd), ntohs(ah->ar_pro), ntohs(ah->ar_op));
diff --git a/net/ipv4/netfilter/nf_log_ipv4.c b/net/ipv4/netfilter/nf_log_ipv4.c
index 1e6f28c97d3a..cde1918607e9 100644
--- a/net/ipv4/netfilter/nf_log_ipv4.c
+++ b/net/ipv4/netfilter/nf_log_ipv4.c
@@ -287,8 +287,10 @@ static void dump_ipv4_mac_header(struct nf_log_buf *m,
switch (dev->type) {
case ARPHRD_ETHER:
- nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
- eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
+ nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ",
+ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest);
+ nf_log_dump_vlan(m, skb);
+ nf_log_buf_add(m, "MACPROTO=%04x ",
ntohs(eth_hdr(skb)->h_proto));
return;
default:
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
index 6115bf1ff6f0..6a27766b7d0f 100644
--- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
@@ -329,7 +329,7 @@ nf_nat_ipv4_local_fn(void *priv, struct sk_buff *skb,
if (ct->tuplehash[dir].tuple.dst.u3.ip !=
ct->tuplehash[!dir].tuple.src.u3.ip) {
- err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
+ err = ip_route_me_harder(state->net, state->sk, skb, RTN_UNSPEC);
if (err < 0)
ret = NF_DROP_ERR(err);
}
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index 5d259a12e25f..2518c1ca63ae 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -165,8 +165,7 @@ pptp_outbound_pkt(struct sk_buff *skb,
break;
default:
pr_debug("unknown outbound packet 0x%04x:%s\n", msg,
- msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] :
- pptp_msg_name[0]);
+ pptp_msg_name(msg));
/* fall through */
case PPTP_SET_LINK_INFO:
/* only need to NAT in case PAC is behind NAT box */
@@ -267,9 +266,7 @@ pptp_inbound_pkt(struct sk_buff *skb,
pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID);
break;
default:
- pr_debug("unknown inbound packet %s\n",
- msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] :
- pptp_msg_name[0]);
+ pr_debug("unknown inbound packet %s\n", pptp_msg_name(msg));
/* fall through */
case PPTP_START_SESSION_REQUEST:
case PPTP_START_SESSION_REPLY:
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index 5cd06ba3535d..4996db1f64a1 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -129,7 +129,7 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
ip4_dst_hoplimit(skb_dst(nskb)));
nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
- if (ip_route_me_harder(net, nskb, RTN_UNSPEC))
+ if (ip_route_me_harder(net, nskb->sk, nskb, RTN_UNSPEC))
goto free_nskb;
niph = ip_hdr(nskb);
diff --git a/net/ipv4/netfilter/nft_chain_route_ipv4.c b/net/ipv4/netfilter/nft_chain_route_ipv4.c
index 7d82934c46f4..61003768e52b 100644
--- a/net/ipv4/netfilter/nft_chain_route_ipv4.c
+++ b/net/ipv4/netfilter/nft_chain_route_ipv4.c
@@ -50,7 +50,7 @@ static unsigned int nf_route_table_hook(void *priv,
iph->daddr != daddr ||
skb->mark != mark ||
iph->tos != tos) {
- err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
+ err = ip_route_me_harder(state->net, state->sk, skb, RTN_UNSPEC);
if (err < 0)
ret = NF_DROP_ERR(err);
}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 8d7aaf118a30..c59144502ee8 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -791,6 +791,9 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
inet_sk_flowi_flags(sk), faddr, saddr, 0, 0,
sk->sk_uid);
+ fl4.fl4_icmp_type = user_icmph.type;
+ fl4.fl4_icmp_code = user_icmph.code;
+
security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
rt = ip_route_output_flow(net, &fl4, sk);
if (IS_ERR(rt)) {
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 4590af506244..2fe50f6f876d 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -274,6 +274,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
*pos = cpu+1;
return &per_cpu(rt_cache_stat, cpu);
}
+ (*pos)++;
return NULL;
}
@@ -484,18 +485,16 @@ u32 ip_idents_reserve(u32 hash, int segs)
atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
u32 old = READ_ONCE(*p_tstamp);
u32 now = (u32)jiffies;
- u32 new, delta = 0;
+ u32 delta = 0;
if (old != now && cmpxchg(p_tstamp, old, now) == old)
delta = prandom_u32_max(now - old);
- /* Do not use atomic_add_return() as it makes UBSAN unhappy */
- do {
- old = (u32)atomic_read(p_id);
- new = old + delta + segs;
- } while (atomic_cmpxchg(p_id, old, new) != old);
-
- return new - segs;
+ /* If UBSAN reports an error there, please make sure your compiler
+ * supports -fno-strict-overflow before reporting it that was a bug
+ * in UBSAN, and it has been fixed in GCC-8.
+ */
+ return atomic_add_return(segs + delta, p_id) - segs;
}
EXPORT_SYMBOL(ip_idents_reserve);
@@ -779,8 +778,10 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
neigh_event_send(n, NULL);
} else {
if (fib_lookup(net, fl4, &res, 0) == 0) {
- struct fib_nh *nh = &FIB_RES_NH(res);
+ struct fib_nh *nh;
+ fib_select_path(net, &res, fl4, skb);
+ nh = &FIB_RES_NH(res);
update_or_create_fnhe(nh, fl4->daddr, new_gw,
0, false,
jiffies + ip_rt_gc_timeout);
@@ -906,7 +907,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
/* Check for load limit; set rate_last to the latest sent
* redirect.
*/
- if (peer->rate_tokens == 0 ||
+ if (peer->n_redirects == 0 ||
time_after(jiffies,
(peer->rate_last +
(ip_rt_redirect_load << peer->n_redirects)))) {
@@ -1006,6 +1007,7 @@ out: kfree_skb(skb);
static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
{
struct dst_entry *dst = &rt->dst;
+ struct net *net = dev_net(dst->dev);
u32 old_mtu = ipv4_mtu(dst);
struct fib_result res;
bool lock = false;
@@ -1026,9 +1028,11 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
return;
rcu_read_lock();
- if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
- struct fib_nh *nh = &FIB_RES_NH(res);
+ if (fib_lookup(net, fl4, &res, 0) == 0) {
+ struct fib_nh *nh;
+ fib_select_path(net, &res, fl4, NULL);
+ nh = &FIB_RES_NH(res);
update_or_create_fnhe(nh, fl4->daddr, 0, mtu, lock,
jiffies + ip_rt_mtu_expires);
}
@@ -2538,8 +2542,6 @@ struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
fib_select_path(net, res, fl4, skb);
dev_out = FIB_RES_DEV(*res);
- fl4->flowi4_oif = dev_out->ifindex;
-
make_route:
rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
@@ -2632,10 +2634,12 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
if (IS_ERR(rt))
return rt;
- if (flp4->flowi4_proto)
+ if (flp4->flowi4_proto) {
+ flp4->flowi4_oif = rt->dst.dev->ifindex;
rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
flowi4_to_flowi(flp4),
sk, 0);
+ }
return rt;
}
@@ -2872,7 +2876,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
memset(&fl4, 0, sizeof(fl4));
fl4.daddr = dst;
fl4.saddr = src;
- fl4.flowi4_tos = rtm->rtm_tos;
+ fl4.flowi4_tos = rtm->rtm_tos & IPTOS_RT_MASK;
fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
fl4.flowi4_mark = mark;
fl4.flowi4_uid = uid;
@@ -2896,8 +2900,9 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
fl4.flowi4_iif = iif; /* for rt_fill_info */
skb->dev = dev;
skb->mark = mark;
- err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos,
- dev, &res);
+ err = ip_route_input_rcu(skb, dst, src,
+ rtm->rtm_tos & IPTOS_RT_MASK, dev,
+ &res);
rt = skb_rtable(skb);
if (err == 0 && rt->dst.error)
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index f66b2e6d97a7..1a06850ef3cc 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -296,7 +296,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
__u32 cookie = ntohl(th->ack_seq) - 1;
struct sock *ret = sk;
struct request_sock *req;
- int mss;
+ int full_space, mss;
struct rtable *rt;
__u8 rcv_wscale;
struct flowi4 fl4;
@@ -391,8 +391,13 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
/* Try to redo what tcp_v4_send_synack did. */
req->rsk_window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW);
+ /* limit the window selection if the user enforce a smaller rx buffer */
+ full_space = tcp_full_space(sk);
+ if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
+ (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
+ req->rsk_window_clamp = full_space;
- tcp_select_initial_window(sk, tcp_full_space(sk), req->mss,
+ tcp_select_initial_window(sk, full_space, req->mss,
&req->rsk_rcv_wnd, &req->rsk_window_clamp,
ireq->wscale_ok, &rcv_wscale,
dst_metric(&rt->dst, RTAX_INITRWND));
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 54dda47a9a56..769e1f683471 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -488,9 +488,19 @@ static void tcp_tx_timestamp(struct sock *sk, u16 tsflags)
static inline bool tcp_stream_is_readable(const struct tcp_sock *tp,
int target, struct sock *sk)
{
- return (READ_ONCE(tp->rcv_nxt) - tp->copied_seq >= target) ||
- (sk->sk_prot->stream_memory_read ?
- sk->sk_prot->stream_memory_read(sk) : false);
+ int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq);
+
+ if (avail > 0) {
+ if (avail >= target)
+ return true;
+ if (tcp_rmem_pressure(sk))
+ return true;
+ if (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss)
+ return true;
+ }
+ if (sk->sk_prot->stream_memory_read)
+ return sk->sk_prot->stream_memory_read(sk);
+ return false;
}
/*
@@ -557,7 +567,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
(state != TCP_SYN_RECV || tp->fastopen_rsk)) {
int target = sock_rcvlowat(sk, 0, INT_MAX);
- if (tp->urg_seq == tp->copied_seq &&
+ if (tp->urg_seq == READ_ONCE(tp->copied_seq) &&
!sock_flag(sk, SOCK_URGINLINE) &&
tp->urg_data)
target++;
@@ -618,7 +628,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
unlock_sock_fast(sk, slow);
break;
case SIOCATMARK:
- answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
+ answ = tp->urg_data && tp->urg_seq == READ_ONCE(tp->copied_seq);
break;
case SIOCOUTQ:
if (sk->sk_state == TCP_LISTEN)
@@ -627,7 +637,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
answ = 0;
else
- answ = tp->write_seq - tp->snd_una;
+ answ = READ_ONCE(tp->write_seq) - tp->snd_una;
break;
case SIOCOUTQNSD:
if (sk->sk_state == TCP_LISTEN)
@@ -636,7 +646,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
answ = 0;
else
- answ = tp->write_seq - tp->snd_nxt;
+ answ = READ_ONCE(tp->write_seq) - tp->snd_nxt;
break;
default:
return -ENOIOCTLCMD;
@@ -1027,7 +1037,7 @@ new_segment:
sk->sk_wmem_queued += copy;
sk_mem_charge(sk, copy);
skb->ip_summed = CHECKSUM_PARTIAL;
- tp->write_seq += copy;
+ WRITE_ONCE(tp->write_seq, tp->write_seq + copy);
TCP_SKB_CB(skb)->end_seq += copy;
tcp_skb_pcount_set(skb, 0);
@@ -1381,7 +1391,7 @@ new_segment:
if (!copied)
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
- tp->write_seq += copy;
+ WRITE_ONCE(tp->write_seq, tp->write_seq + copy);
TCP_SKB_CB(skb)->end_seq += copy;
tcp_skb_pcount_set(skb, 0);
@@ -1686,9 +1696,9 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
sk_eat_skb(sk, skb);
if (!desc->count)
break;
- tp->copied_seq = seq;
+ WRITE_ONCE(tp->copied_seq, seq);
}
- tp->copied_seq = seq;
+ WRITE_ONCE(tp->copied_seq, seq);
tcp_rcv_space_adjust(sk);
@@ -1774,10 +1784,11 @@ static int tcp_zerocopy_receive(struct sock *sk,
down_read(&current->mm->mmap_sem);
- ret = -EINVAL;
vma = find_vma(current->mm, address);
- if (!vma || vma->vm_start > address || vma->vm_ops != &tcp_vm_ops)
- goto out;
+ if (!vma || vma->vm_start > address || vma->vm_ops != &tcp_vm_ops) {
+ up_read(&current->mm->mmap_sem);
+ return -EINVAL;
+ }
zc->length = min_t(unsigned long, zc->length, vma->vm_end - address);
tp = tcp_sk(sk);
@@ -1824,7 +1835,7 @@ static int tcp_zerocopy_receive(struct sock *sk,
out:
up_read(&current->mm->mmap_sem);
if (length) {
- tp->copied_seq = seq;
+ WRITE_ONCE(tp->copied_seq, seq);
tcp_rcv_space_adjust(sk);
/* Clean up data we have read: This will do ACK frames. */
@@ -2029,7 +2040,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
/* Well, if we have backlog, try to process it now yet. */
- if (copied >= target && !sk->sk_backlog.tail)
+ if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
break;
if (copied) {
@@ -2101,7 +2112,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
if (urg_offset < used) {
if (!urg_offset) {
if (!sock_flag(sk, SOCK_URGINLINE)) {
- ++*seq;
+ WRITE_ONCE(*seq, *seq + 1);
urg_hole++;
offset++;
used--;
@@ -2123,7 +2134,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
}
}
- *seq += used;
+ WRITE_ONCE(*seq, *seq + used);
copied += used;
len -= used;
@@ -2134,14 +2145,16 @@ skip_copy:
tp->urg_data = 0;
tcp_fast_path_check(sk);
}
- if (used + offset < skb->len)
- continue;
if (TCP_SKB_CB(skb)->has_rxtstamp) {
tcp_update_recv_tstamps(skb, &tss);
has_tss = true;
has_cmsg = true;
}
+
+ if (used + offset < skb->len)
+ continue;
+
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
goto found_fin_ok;
if (!(flags & MSG_PEEK))
@@ -2150,7 +2163,7 @@ skip_copy:
found_fin_ok:
/* Process the FIN. */
- ++*seq;
+ WRITE_ONCE(*seq, *seq + 1);
if (!(flags & MSG_PEEK))
sk_eat_skb(sk, skb);
break;
@@ -2543,6 +2556,7 @@ int tcp_disconnect(struct sock *sk, int flags)
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int old_state = sk->sk_state;
+ u32 seq;
if (old_state != TCP_CLOSE)
tcp_set_state(sk, TCP_CLOSE);
@@ -2565,7 +2579,7 @@ int tcp_disconnect(struct sock *sk, int flags)
tcp_clear_xmit_timers(sk);
__skb_queue_purge(&sk->sk_receive_queue);
- tp->copied_seq = tp->rcv_nxt;
+ WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
tp->urg_data = 0;
tcp_write_queue_purge(sk);
tcp_fastopen_active_disable_ofo_check(sk);
@@ -2580,9 +2594,12 @@ int tcp_disconnect(struct sock *sk, int flags)
sock_reset_flag(sk, SOCK_DONE);
tp->srtt_us = 0;
tp->rcv_rtt_last_tsecr = 0;
- tp->write_seq += tp->max_window + 2;
- if (tp->write_seq == 0)
- tp->write_seq = 1;
+
+ seq = tp->write_seq + tp->max_window + 2;
+ if (!seq)
+ seq = 1;
+ WRITE_ONCE(tp->write_seq, seq);
+
tp->snd_cwnd = 2;
icsk->icsk_probes_out = 0;
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
@@ -2590,6 +2607,9 @@ int tcp_disconnect(struct sock *sk, int flags)
tp->window_clamp = 0;
tp->delivered = 0;
tp->delivered_ce = 0;
+ if (icsk->icsk_ca_ops->release)
+ icsk->icsk_ca_ops->release(sk);
+ memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
tcp_set_ca_state(sk, TCP_CA_Open);
tp->is_sack_reneg = 0;
tcp_clear_retrans(tp);
@@ -2866,16 +2886,23 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
break;
case TCP_QUEUE_SEQ:
- if (sk->sk_state != TCP_CLOSE)
+ if (sk->sk_state != TCP_CLOSE) {
err = -EPERM;
- else if (tp->repair_queue == TCP_SEND_QUEUE)
- tp->write_seq = val;
- else if (tp->repair_queue == TCP_RECV_QUEUE) {
- WRITE_ONCE(tp->rcv_nxt, val);
- WRITE_ONCE(tp->copied_seq, val);
- }
- else
+ } else if (tp->repair_queue == TCP_SEND_QUEUE) {
+ if (!tcp_rtx_queue_empty(sk))
+ err = -EPERM;
+ else
+ WRITE_ONCE(tp->write_seq, val);
+ } else if (tp->repair_queue == TCP_RECV_QUEUE) {
+ if (tp->rcv_nxt != tp->copied_seq) {
+ err = -EPERM;
+ } else {
+ WRITE_ONCE(tp->rcv_nxt, val);
+ WRITE_ONCE(tp->copied_seq, val);
+ }
+ } else {
err = -EINVAL;
+ }
break;
case TCP_REPAIR_OPTIONS:
@@ -3001,10 +3028,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
#ifdef CONFIG_TCP_MD5SIG
case TCP_MD5SIG:
case TCP_MD5SIG_EXT:
- if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
- err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
- else
- err = -EINVAL;
+ err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
break;
#endif
case TCP_USER_TIMEOUT:
@@ -3742,10 +3766,13 @@ EXPORT_SYMBOL(tcp_md5_hash_skb_data);
int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key)
{
+ u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */
struct scatterlist sg;
- sg_init_one(&sg, key->key, key->keylen);
- ahash_request_set_crypt(hp->md5_req, &sg, NULL, key->keylen);
+ sg_init_one(&sg, key->key, keylen);
+ ahash_request_set_crypt(hp->md5_req, &sg, NULL, keylen);
+
+ /* tcp_md5_do_add() might change key->key under us */
return crypto_ahash_update(hp->md5_req);
}
EXPORT_SYMBOL(tcp_md5_hash_key);
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index b371e66502c3..b70c9365e131 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -115,6 +115,14 @@ struct bbr {
unused_b:5;
u32 prior_cwnd; /* prior cwnd upon entering loss recovery */
u32 full_bw; /* recent bw, to estimate if pipe is full */
+
+ /* For tracking ACK aggregation: */
+ u64 ack_epoch_mstamp; /* start of ACK sampling epoch */
+ u16 extra_acked[2]; /* max excess data ACKed in epoch */
+ u32 ack_epoch_acked:20, /* packets (S)ACKed in sampling epoch */
+ extra_acked_win_rtts:5, /* age of extra_acked, in round trips */
+ extra_acked_win_idx:1, /* current index in extra_acked array */
+ unused_c:6;
};
#define CYCLE_LEN 8 /* number of phases in a pacing gain cycle */
@@ -174,6 +182,15 @@ static const u32 bbr_lt_bw_diff = 4000 / 8;
/* If we estimate we're policed, use lt_bw for this many round trips: */
static const u32 bbr_lt_bw_max_rtts = 48;
+/* Gain factor for adding extra_acked to target cwnd: */
+static const int bbr_extra_acked_gain = BBR_UNIT;
+/* Window length of extra_acked window. */
+static const u32 bbr_extra_acked_win_rtts = 5;
+/* Max allowed val for ack_epoch_acked, after which sampling epoch is reset */
+static const u32 bbr_ack_epoch_acked_reset_thresh = 1U << 20;
+/* Time period for clamping cwnd increment due to ack aggregation */
+static const u32 bbr_extra_acked_max_us = 100 * 1000;
+
static void bbr_check_probe_rtt_done(struct sock *sk);
/* Do we estimate that STARTUP filled the pipe? */
@@ -200,6 +217,16 @@ static u32 bbr_bw(const struct sock *sk)
return bbr->lt_use_bw ? bbr->lt_bw : bbr_max_bw(sk);
}
+/* Return maximum extra acked in past k-2k round trips,
+ * where k = bbr_extra_acked_win_rtts.
+ */
+static u16 bbr_extra_acked(const struct sock *sk)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ return max(bbr->extra_acked[0], bbr->extra_acked[1]);
+}
+
/* Return rate in bytes per second, optionally with a gain.
* The order here is chosen carefully to avoid overflow of u64. This should
* work for input rates of up to 2.9Tbit/sec and gain of 2.89x.
@@ -305,6 +332,8 @@ static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
if (event == CA_EVENT_TX_START && tp->app_limited) {
bbr->idle_restart = 1;
+ bbr->ack_epoch_mstamp = tp->tcp_mstamp;
+ bbr->ack_epoch_acked = 0;
/* Avoid pointless buffer overflows: pace at est. bw if we don't
* need more speed (we're restarting from idle and app-limited).
*/
@@ -315,30 +344,19 @@ static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
}
}
-/* Find target cwnd. Right-size the cwnd based on min RTT and the
- * estimated bottleneck bandwidth:
+/* Calculate bdp based on min RTT and the estimated bottleneck bandwidth:
*
- * cwnd = bw * min_rtt * gain = BDP * gain
+ * bdp = bw * min_rtt * gain
*
* The key factor, gain, controls the amount of queue. While a small gain
* builds a smaller queue, it becomes more vulnerable to noise in RTT
* measurements (e.g., delayed ACKs or other ACK compression effects). This
* noise may cause BBR to under-estimate the rate.
- *
- * To achieve full performance in high-speed paths, we budget enough cwnd to
- * fit full-sized skbs in-flight on both end hosts to fully utilize the path:
- * - one skb in sending host Qdisc,
- * - one skb in sending host TSO/GSO engine
- * - one skb being received by receiver host LRO/GRO/delayed-ACK engine
- * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because
- * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets,
- * which allows 2 outstanding 2-packet sequences, to try to keep pipe
- * full even with ACK-every-other-packet delayed ACKs.
*/
-static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain)
+static u32 bbr_bdp(struct sock *sk, u32 bw, int gain)
{
struct bbr *bbr = inet_csk_ca(sk);
- u32 cwnd;
+ u32 bdp;
u64 w;
/* If we've never had a valid RTT sample, cap cwnd at the initial
@@ -353,7 +371,24 @@ static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain)
w = (u64)bw * bbr->min_rtt_us;
/* Apply a gain to the given value, then remove the BW_SCALE shift. */
- cwnd = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT;
+ bdp = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT;
+
+ return bdp;
+}
+
+/* To achieve full performance in high-speed paths, we budget enough cwnd to
+ * fit full-sized skbs in-flight on both end hosts to fully utilize the path:
+ * - one skb in sending host Qdisc,
+ * - one skb in sending host TSO/GSO engine
+ * - one skb being received by receiver host LRO/GRO/delayed-ACK engine
+ * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because
+ * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets,
+ * which allows 2 outstanding 2-packet sequences, to try to keep pipe
+ * full even with ACK-every-other-packet delayed ACKs.
+ */
+static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd, int gain)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
/* Allow enough full-sized skbs in flight to utilize end systems. */
cwnd += 3 * bbr_tso_segs_goal(sk);
@@ -368,6 +403,33 @@ static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain)
return cwnd;
}
+/* Find inflight based on min RTT and the estimated bottleneck bandwidth. */
+static u32 bbr_inflight(struct sock *sk, u32 bw, int gain)
+{
+ u32 inflight;
+
+ inflight = bbr_bdp(sk, bw, gain);
+ inflight = bbr_quantization_budget(sk, inflight, gain);
+
+ return inflight;
+}
+
+/* Find the cwnd increment based on estimate of ack aggregation */
+static u32 bbr_ack_aggregation_cwnd(struct sock *sk)
+{
+ u32 max_aggr_cwnd, aggr_cwnd = 0;
+
+ if (bbr_extra_acked_gain && bbr_full_bw_reached(sk)) {
+ max_aggr_cwnd = ((u64)bbr_bw(sk) * bbr_extra_acked_max_us)
+ / BW_UNIT;
+ aggr_cwnd = (bbr_extra_acked_gain * bbr_extra_acked(sk))
+ >> BBR_SCALE;
+ aggr_cwnd = min(aggr_cwnd, max_aggr_cwnd);
+ }
+
+ return aggr_cwnd;
+}
+
/* An optimization in BBR to reduce losses: On the first round of recovery, we
* follow the packet conservation principle: send P packets per P packets acked.
* After that, we slow-start and send at most 2*P packets per P packets acked.
@@ -428,8 +490,15 @@ static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd))
goto done;
+ target_cwnd = bbr_bdp(sk, bw, gain);
+
+ /* Increment the cwnd to account for excess ACKed data that seems
+ * due to aggregation (of data and/or ACKs) visible in the ACK stream.
+ */
+ target_cwnd += bbr_ack_aggregation_cwnd(sk);
+ target_cwnd = bbr_quantization_budget(sk, target_cwnd, gain);
+
/* If we're below target cwnd, slow start cwnd toward target cwnd. */
- target_cwnd = bbr_target_cwnd(sk, bw, gain);
if (bbr_full_bw_reached(sk)) /* only cut cwnd if we filled the pipe */
cwnd = min(cwnd + acked, target_cwnd);
else if (cwnd < target_cwnd || tp->delivered < TCP_INIT_CWND)
@@ -470,14 +539,14 @@ static bool bbr_is_next_cycle_phase(struct sock *sk,
if (bbr->pacing_gain > BBR_UNIT)
return is_full_length &&
(rs->losses || /* perhaps pacing_gain*BDP won't fit */
- inflight >= bbr_target_cwnd(sk, bw, bbr->pacing_gain));
+ inflight >= bbr_inflight(sk, bw, bbr->pacing_gain));
/* A pacing_gain < 1.0 tries to drain extra queue we added if bw
* probing didn't find more bw. If inflight falls to match BDP then we
* estimate queue is drained; persisting would underutilize the pipe.
*/
return is_full_length ||
- inflight <= bbr_target_cwnd(sk, bw, BBR_UNIT);
+ inflight <= bbr_inflight(sk, bw, BBR_UNIT);
}
static void bbr_advance_cycle_phase(struct sock *sk)
@@ -699,6 +768,67 @@ static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs)
}
}
+/* Estimates the windowed max degree of ack aggregation.
+ * This is used to provision extra in-flight data to keep sending during
+ * inter-ACK silences.
+ *
+ * Degree of ack aggregation is estimated as extra data acked beyond expected.
+ *
+ * max_extra_acked = "maximum recent excess data ACKed beyond max_bw * interval"
+ * cwnd += max_extra_acked
+ *
+ * Max extra_acked is clamped by cwnd and bw * bbr_extra_acked_max_us (100 ms).
+ * Max filter is an approximate sliding window of 5-10 (packet timed) round
+ * trips.
+ */
+static void bbr_update_ack_aggregation(struct sock *sk,
+ const struct rate_sample *rs)
+{
+ u32 epoch_us, expected_acked, extra_acked;
+ struct bbr *bbr = inet_csk_ca(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (!bbr_extra_acked_gain || rs->acked_sacked <= 0 ||
+ rs->delivered < 0 || rs->interval_us <= 0)
+ return;
+
+ if (bbr->round_start) {
+ bbr->extra_acked_win_rtts = min(0x1F,
+ bbr->extra_acked_win_rtts + 1);
+ if (bbr->extra_acked_win_rtts >= bbr_extra_acked_win_rtts) {
+ bbr->extra_acked_win_rtts = 0;
+ bbr->extra_acked_win_idx = bbr->extra_acked_win_idx ?
+ 0 : 1;
+ bbr->extra_acked[bbr->extra_acked_win_idx] = 0;
+ }
+ }
+
+ /* Compute how many packets we expected to be delivered over epoch. */
+ epoch_us = tcp_stamp_us_delta(tp->delivered_mstamp,
+ bbr->ack_epoch_mstamp);
+ expected_acked = ((u64)bbr_bw(sk) * epoch_us) / BW_UNIT;
+
+ /* Reset the aggregation epoch if ACK rate is below expected rate or
+ * significantly large no. of ack received since epoch (potentially
+ * quite old epoch).
+ */
+ if (bbr->ack_epoch_acked <= expected_acked ||
+ (bbr->ack_epoch_acked + rs->acked_sacked >=
+ bbr_ack_epoch_acked_reset_thresh)) {
+ bbr->ack_epoch_acked = 0;
+ bbr->ack_epoch_mstamp = tp->delivered_mstamp;
+ expected_acked = 0;
+ }
+
+ /* Compute excess data delivered, beyond what was expected. */
+ bbr->ack_epoch_acked = min_t(u32, 0xFFFFF,
+ bbr->ack_epoch_acked + rs->acked_sacked);
+ extra_acked = bbr->ack_epoch_acked - expected_acked;
+ extra_acked = min(extra_acked, tp->snd_cwnd);
+ if (extra_acked > bbr->extra_acked[bbr->extra_acked_win_idx])
+ bbr->extra_acked[bbr->extra_acked_win_idx] = extra_acked;
+}
+
/* Estimate when the pipe is full, using the change in delivery rate: BBR
* estimates that STARTUP filled the pipe if the estimated bw hasn't changed by
* at least bbr_full_bw_thresh (25%) after bbr_full_bw_cnt (3) non-app-limited
@@ -736,11 +866,11 @@ static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs)
bbr->pacing_gain = bbr_drain_gain; /* pace slow to drain */
bbr->cwnd_gain = bbr_high_gain; /* maintain cwnd */
tcp_sk(sk)->snd_ssthresh =
- bbr_target_cwnd(sk, bbr_max_bw(sk), BBR_UNIT);
+ bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT);
} /* fall through to check if in-flight is already small: */
if (bbr->mode == BBR_DRAIN &&
tcp_packets_in_flight(tcp_sk(sk)) <=
- bbr_target_cwnd(sk, bbr_max_bw(sk), BBR_UNIT))
+ bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT))
bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */
}
@@ -787,7 +917,7 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
filter_expired = after(tcp_jiffies32,
bbr->min_rtt_stamp + bbr_min_rtt_win_sec * HZ);
if (rs->rtt_us >= 0 &&
- (rs->rtt_us <= bbr->min_rtt_us ||
+ (rs->rtt_us < bbr->min_rtt_us ||
(filter_expired && !rs->is_ack_delayed))) {
bbr->min_rtt_us = rs->rtt_us;
bbr->min_rtt_stamp = tcp_jiffies32;
@@ -828,6 +958,7 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
{
bbr_update_bw(sk, rs);
+ bbr_update_ack_aggregation(sk, rs);
bbr_update_cycle_phase(sk, rs);
bbr_check_full_bw_reached(sk, rs);
bbr_check_drain(sk, rs);
@@ -878,6 +1009,13 @@ static void bbr_init(struct sock *sk)
bbr_reset_lt_bw_sampling(sk);
bbr_reset_startup_mode(sk);
+ bbr->ack_epoch_mstamp = tp->tcp_mstamp;
+ bbr->ack_epoch_acked = 0;
+ bbr->extra_acked_win_rtts = 0;
+ bbr->extra_acked_win_idx = 0;
+ bbr->extra_acked[0] = 0;
+ bbr->extra_acked[1] = 0;
+
cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED);
}
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 48f79db446a0..533f8d84d2f7 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -196,7 +196,12 @@ static void tcp_reinit_congestion_control(struct sock *sk,
icsk->icsk_ca_setsockopt = 1;
memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
- if (sk->sk_state != TCP_CLOSE)
+ if (ca->flags & TCP_CONG_NEEDS_ECN)
+ INET_ECN_xmit(sk);
+ else
+ INET_ECN_dontxmit(sk);
+
+ if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
tcp_init_congestion_control(sk);
}
@@ -223,6 +228,10 @@ int tcp_set_default_congestion_control(struct net *net, const char *name)
ret = -ENOENT;
} else if (!try_module_get(ca->owner)) {
ret = -EBUSY;
+ } else if (!net_eq(net, &init_net) &&
+ !(ca->flags & TCP_CONG_NON_RESTRICTED)) {
+ /* Only init netns can set default to a restricted algorithm */
+ ret = -EPERM;
} else {
prev = xchg(&net->ipv4.tcp_congestion_control, ca);
if (prev)
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 78bfadfcf342..8b5ba0a5cd38 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -403,6 +403,8 @@ static void hystart_update(struct sock *sk, u32 delay)
if (hystart_detect & HYSTART_DELAY) {
/* obtain the minimum delay of more than sampling packets */
+ if (ca->curr_rtt > delay)
+ ca->curr_rtt = delay;
if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
if (ca->curr_rtt == 0 || ca->curr_rtt > delay)
ca->curr_rtt = delay;
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index c9e97f304f98..2a46f9f81ba0 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -30,8 +30,9 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
} else if (sk->sk_type == SOCK_STREAM) {
const struct tcp_sock *tp = tcp_sk(sk);
- r->idiag_rqueue = max_t(int, READ_ONCE(tp->rcv_nxt) - tp->copied_seq, 0);
- r->idiag_wqueue = tp->write_seq - tp->snd_una;
+ r->idiag_rqueue = max_t(int, READ_ONCE(tp->rcv_nxt) -
+ READ_ONCE(tp->copied_seq), 0);
+ r->idiag_wqueue = READ_ONCE(tp->write_seq) - tp->snd_una;
}
if (info)
tcp_get_info(sk, info);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 38b6d8f90a44..36bff9291530 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -254,7 +254,8 @@ static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb)
* cwnd may be very low (even just 1 packet), so we should ACK
* immediately.
*/
- inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
+ if (TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq)
+ inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
}
}
@@ -438,7 +439,6 @@ void tcp_init_buffer_space(struct sock *sk)
if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
tcp_sndbuf_expand(sk);
- tp->rcvq_space.space = min_t(u32, tp->rcv_wnd, TCP_INIT_CWND * tp->advmss);
tcp_mstamp_refresh(tp);
tp->rcvq_space.time = tp->tcp_mstamp;
tp->rcvq_space.seq = tp->copied_seq;
@@ -462,6 +462,8 @@ void tcp_init_buffer_space(struct sock *sk)
tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
tp->snd_cwnd_stamp = tcp_jiffies32;
+ tp->rcvq_space.space = min3(tp->rcv_ssthresh, tp->rcv_wnd,
+ (u32)TCP_INIT_CWND * tp->advmss);
}
/* 4. Recalculate window clamp after socket hit its memory bounds. */
@@ -2748,7 +2750,8 @@ static void tcp_identify_packet_loss(struct sock *sk, int *ack_flag)
} else if (tcp_is_rack(sk)) {
u32 prior_retrans = tp->retrans_out;
- tcp_rack_mark_lost(sk);
+ if (tcp_rack_mark_lost(sk))
+ *ack_flag &= ~FLAG_SET_XMIT_TIMER;
if (prior_retrans > tp->retrans_out)
*ack_flag |= FLAG_LOST_RETRANS;
}
@@ -3488,10 +3491,8 @@ static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
}
}
-/* This routine deals with acks during a TLP episode.
- * We mark the end of a TLP episode on receiving TLP dupack or when
- * ack is after tlp_high_seq.
- * Ref: loss detection algorithm in draft-dukkipati-tcpm-tcp-loss-probe.
+/* This routine deals with acks during a TLP episode and ends an episode by
+ * resetting tlp_high_seq. Ref: TLP algorithm in draft-ietf-tcpm-rack
*/
static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
{
@@ -3500,7 +3501,10 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
if (before(ack, tp->tlp_high_seq))
return;
- if (flag & FLAG_DSACKING_ACK) {
+ if (!tp->tlp_retrans) {
+ /* TLP of new data has been acknowledged */
+ tp->tlp_high_seq = 0;
+ } else if (flag & FLAG_DSACKING_ACK) {
/* This DSACK means original and TLP probe arrived; no loss */
tp->tlp_high_seq = 0;
} else if (after(ack, tp->tlp_high_seq)) {
@@ -3665,6 +3669,15 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
tcp_in_ack_event(sk, ack_ev_flags);
}
+ /* This is a deviation from RFC3168 since it states that:
+ * "When the TCP data sender is ready to set the CWR bit after reducing
+ * the congestion window, it SHOULD set the CWR bit only on the first
+ * new data packet that it transmits."
+ * We accept CWR on pure ACKs to be more robust
+ * with widely-deployed TCP implementations that do this.
+ */
+ tcp_ecn_accept_cwr(sk, skb);
+
/* We passed data and got it acked, remove any soft error
* log. Something worked...
*/
@@ -3681,9 +3694,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if (tp->tlp_high_seq)
tcp_process_tlp_ack(sk, ack, flag);
- /* If needed, reset TLP/RTO timer; RACK may later override this. */
- if (flag & FLAG_SET_XMIT_TIMER)
- tcp_set_xmit_timer(sk);
if (tcp_ack_is_dubious(sk, flag)) {
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
@@ -3691,6 +3701,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
&rexmit);
}
+ /* If needed, reset TLP/RTO timer when RACK doesn't set. */
+ if (flag & FLAG_SET_XMIT_TIMER)
+ tcp_set_xmit_timer(sk);
+
if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
sk_dst_confirm(sk);
@@ -4484,6 +4498,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP);
+ sk->sk_data_ready(sk);
tcp_drop(sk, skb);
return;
}
@@ -4518,7 +4533,11 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb,
skb, &fragstolen)) {
coalesce_done:
- tcp_grow_window(sk, skb);
+ /* For non sack flows, do not grow window to force DUPACK
+ * and trigger fast retransmit.
+ */
+ if (tcp_is_sack(tp))
+ tcp_grow_window(sk, skb);
kfree_skb_partial(skb, fragstolen);
skb = NULL;
goto add_sack;
@@ -4602,7 +4621,11 @@ add_sack:
tcp_sack_new_ofo_skb(sk, seq, end_seq);
end:
if (skb) {
- tcp_grow_window(sk, skb);
+ /* For non sack flows, do not grow window to force DUPACK
+ * and trigger fast retransmit.
+ */
+ if (tcp_is_sack(tp))
+ tcp_grow_window(sk, skb);
skb_condense(skb);
skb_set_owner_r(skb, sk);
}
@@ -4683,7 +4706,9 @@ void tcp_data_ready(struct sock *sk)
const struct tcp_sock *tp = tcp_sk(sk);
int avail = tp->rcv_nxt - tp->copied_seq;
- if (avail < sk->sk_rcvlowat && !sock_flag(sk, SOCK_DONE))
+ if (avail < sk->sk_rcvlowat && !tcp_rmem_pressure(sk) &&
+ !sock_flag(sk, SOCK_DONE) &&
+ tcp_receive_window(tp) > inet_csk(sk)->icsk_ack.rcv_mss)
return;
sk->sk_data_ready(sk);
@@ -4702,8 +4727,6 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
skb_dst_drop(skb);
__skb_pull(skb, tcp_hdr(skb)->doff * 4);
- tcp_ecn_accept_cwr(sk, skb);
-
tp->rx_opt.dsack = 0;
/* Queue data for delivery to the user.
@@ -4722,6 +4745,7 @@ queue_and_out:
sk_forced_mem_schedule(sk, skb->truesize);
else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
+ sk->sk_data_ready(sk);
goto drop;
}
@@ -5611,6 +5635,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
tcp_data_snd_check(sk);
if (!inet_csk_ack_scheduled(sk))
goto no_ack;
+ } else {
+ tcp_update_wl(tp, TCP_SKB_CB(skb)->seq);
}
__tcp_ack_snd_check(sk, 0);
@@ -5863,7 +5889,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
/* Remember, tcp_poll() does not lock socket!
* Change state from SYN-SENT only after copied_seq
* is initialized. */
- tp->copied_seq = tp->rcv_nxt;
+ WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
smc_check_reset_syn(tp);
@@ -5938,7 +5964,7 @@ discard:
}
WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1);
- tp->copied_seq = tp->rcv_nxt;
+ WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
/* RFC1323: The window in SYN & SYN/ACK segments is
@@ -6100,7 +6126,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
tcp_rearm_rto(sk);
} else {
tcp_init_transfer(sk, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB);
- tp->copied_seq = tp->rcv_nxt;
+ WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
}
smp_mb();
tcp_set_state(sk, TCP_ESTABLISHED);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 6da393016c11..ac6135555e24 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -169,9 +169,11 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
* without appearing to create any others.
*/
if (likely(!tp->repair)) {
- tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
- if (tp->write_seq == 0)
- tp->write_seq = 1;
+ u32 seq = tcptw->tw_snd_nxt + 65535 + 2;
+
+ if (!seq)
+ seq = 1;
+ WRITE_ONCE(tp->write_seq, seq);
tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
}
@@ -258,7 +260,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
tp->rx_opt.ts_recent = 0;
tp->rx_opt.ts_recent_stamp = 0;
if (likely(!tp->repair))
- tp->write_seq = 0;
+ WRITE_ONCE(tp->write_seq, 0);
}
inet->inet_dport = usin->sin_port;
@@ -296,10 +298,11 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (likely(!tp->repair)) {
if (!tp->write_seq)
- tp->write_seq = secure_tcp_seq(inet->inet_saddr,
- inet->inet_daddr,
- inet->inet_sport,
- usin->sin_port);
+ WRITE_ONCE(tp->write_seq,
+ secure_tcp_seq(inet->inet_saddr,
+ inet->inet_daddr,
+ inet->inet_sport,
+ usin->sin_port));
tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
inet->inet_saddr,
inet->inet_daddr);
@@ -1063,9 +1066,18 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
if (key) {
- /* Pre-existing entry - just update that one. */
+ /* Pre-existing entry - just update that one.
+ * Note that the key might be used concurrently.
+ */
memcpy(key->key, newkey, newkeylen);
- key->keylen = newkeylen;
+
+ /* Pairs with READ_ONCE() in tcp_md5_hash_key().
+ * Also note that a reader could catch new key->keylen value
+ * but old key->key[], this is the reason we use __GFP_ZERO
+ * at sock_kmalloc() time below these lines.
+ */
+ WRITE_ONCE(key->keylen, newkeylen);
+
return 0;
}
@@ -1081,7 +1093,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
rcu_assign_pointer(tp->md5sig_info, md5sig);
}
- key = sock_kmalloc(sk, sizeof(*key), gfp);
+ key = sock_kmalloc(sk, sizeof(*key), gfp | __GFP_ZERO);
if (!key)
return -ENOMEM;
if (!tcp_alloc_md5sig_pool()) {
@@ -2331,12 +2343,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
* we might find a transient negative value.
*/
rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
- tp->copied_seq, 0);
+ READ_ONCE(tp->copied_seq), 0);
seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
i, src, srcp, dest, destp, state,
- tp->write_seq - tp->snd_una,
+ READ_ONCE(tp->write_seq) - tp->snd_una,
rx_queue,
timer_active,
jiffies_delta_to_clock_t(timer_expires - jiffies),
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 7ba8a90772b0..a20b393b4501 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -470,7 +470,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
seq = treq->rcv_isn + 1;
newtp->rcv_wup = seq;
- newtp->copied_seq = seq;
+ WRITE_ONCE(newtp->copied_seq, seq);
WRITE_ONCE(newtp->rcv_nxt, seq);
newtp->segs_in = 1;
@@ -510,7 +510,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->app_limited = ~0U;
tcp_init_xmit_timers(newsk);
- newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
+ WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
newtp->rx_opt.saw_tstamp = 0;
@@ -815,8 +815,11 @@ embryonic_reset:
tcp_reset(sk);
}
if (!fastopen) {
- inet_csk_reqsk_queue_drop(sk, req);
- __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
+ bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
+
+ if (unlinked)
+ __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
+ *req_stolen = !unlinked;
}
return NULL;
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index cc4ba42052c2..9b74041e8dd1 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -648,7 +648,8 @@ static unsigned int tcp_synack_options(const struct sock *sk,
unsigned int mss, struct sk_buff *skb,
struct tcp_out_options *opts,
const struct tcp_md5sig_key *md5,
- struct tcp_fastopen_cookie *foc)
+ struct tcp_fastopen_cookie *foc,
+ enum tcp_synack_type synack_type)
{
struct inet_request_sock *ireq = inet_rsk(req);
unsigned int remaining = MAX_TCP_OPTION_SPACE;
@@ -663,7 +664,8 @@ static unsigned int tcp_synack_options(const struct sock *sk,
* rather than TS in order to fit in better with old,
* buggy kernels, but that was deemed to be unnecessary.
*/
- ireq->tstamp_ok &= !ireq->sack_ok;
+ if (synack_type != TCP_SYNACK_COOKIE)
+ ireq->tstamp_ok &= !ireq->sack_ok;
}
#endif
@@ -1173,7 +1175,7 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
struct tcp_sock *tp = tcp_sk(sk);
/* Advance write_seq and place onto the write_queue. */
- tp->write_seq = TCP_SKB_CB(skb)->end_seq;
+ WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq);
__skb_header_release(skb);
tcp_add_write_queue_tail(sk, skb);
sk->sk_wmem_queued += skb->truesize;
@@ -1620,7 +1622,8 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
* window, and remember whether we were cwnd-limited then.
*/
if (!before(tp->snd_una, tp->max_packets_seq) ||
- tp->packets_out > tp->max_packets_out) {
+ tp->packets_out > tp->max_packets_out ||
+ is_cwnd_limited) {
tp->max_packets_out = tp->packets_out;
tp->max_packets_seq = tp->snd_nxt;
tp->is_cwnd_limited = is_cwnd_limited;
@@ -2405,6 +2408,10 @@ repair:
else
tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
+ is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
+ if (likely(sent_pkts || is_cwnd_limited))
+ tcp_cwnd_validate(sk, is_cwnd_limited);
+
if (likely(sent_pkts)) {
if (tcp_in_cwnd_reduction(sk))
tp->prr_out += sent_pkts;
@@ -2412,8 +2419,6 @@ repair:
/* Send one loss probe per tail loss episode. */
if (push_one != 2)
tcp_schedule_loss_probe(sk, false);
- is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
- tcp_cwnd_validate(sk, is_cwnd_limited);
return false;
}
return !tp->packets_out && !tcp_write_queue_empty(sk);
@@ -2493,6 +2498,11 @@ void tcp_send_loss_probe(struct sock *sk)
int pcount;
int mss = tcp_current_mss(sk);
+ /* At most one outstanding TLP */
+ if (tp->tlp_high_seq)
+ goto rearm_timer;
+
+ tp->tlp_retrans = 0;
skb = tcp_send_head(sk);
if (skb && tcp_snd_wnd_test(tp, skb, mss)) {
pcount = tp->packets_out;
@@ -2510,10 +2520,6 @@ void tcp_send_loss_probe(struct sock *sk)
return;
}
- /* At most one outstanding TLP retransmission. */
- if (tp->tlp_high_seq)
- goto rearm_timer;
-
if (skb_still_in_host_queue(sk, skb))
goto rearm_timer;
@@ -2535,10 +2541,12 @@ void tcp_send_loss_probe(struct sock *sk)
if (__tcp_retransmit_skb(sk, skb, 1))
goto rearm_timer;
+ tp->tlp_retrans = 1;
+
+probe_sent:
/* Record snd_nxt for loss detection. */
tp->tlp_high_seq = tp->snd_nxt;
-probe_sent:
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
/* Reset s.t. tcp_rearm_rto will restart timer from now */
inet_csk(sk)->icsk_pending = 0;
@@ -3246,7 +3254,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
#endif
skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
- foc) + sizeof(*th);
+ foc, synack_type) + sizeof(*th);
skb_push(skb, tcp_header_size);
skb_reset_transport_header(skb);
@@ -3373,7 +3381,7 @@ static void tcp_connect_init(struct sock *sk)
else
tp->rcv_tstamp = tcp_jiffies32;
tp->rcv_wup = tp->rcv_nxt;
- tp->copied_seq = tp->rcv_nxt;
+ WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
inet_csk(sk)->icsk_rto = tcp_timeout_init(sk);
inet_csk(sk)->icsk_retransmits = 0;
@@ -3389,7 +3397,7 @@ static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
__skb_header_release(skb);
sk->sk_wmem_queued += skb->truesize;
sk_mem_charge(sk, skb->truesize);
- tp->write_seq = tcb->end_seq;
+ WRITE_ONCE(tp->write_seq, tcb->end_seq);
tp->packets_out += tcp_skb_pcount(skb);
}
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
index c81aadff769b..0d96decba13d 100644
--- a/net/ipv4/tcp_recovery.c
+++ b/net/ipv4/tcp_recovery.c
@@ -109,13 +109,13 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
}
}
-void tcp_rack_mark_lost(struct sock *sk)
+bool tcp_rack_mark_lost(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
u32 timeout;
if (!tp->rack.advanced)
- return;
+ return false;
/* Reset the advanced flag to avoid unnecessary queue scanning */
tp->rack.advanced = 0;
@@ -125,6 +125,7 @@ void tcp_rack_mark_lost(struct sock *sk)
inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
timeout, inet_csk(sk)->icsk_rto);
}
+ return !!timeout;
}
/* Record the most recently (re)sent time among the (s)acked packets
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 0ef04cda1b27..110af0e7dc7b 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -433,7 +433,7 @@ static struct sock *udp4_lib_lookup2(struct net *net,
struct udp_hslot *hslot2,
struct sk_buff *skb)
{
- struct sock *sk, *result;
+ struct sock *sk, *result, *reuseport_result;
int score, badness;
u32 hash = 0;
@@ -443,17 +443,20 @@ static struct sock *udp4_lib_lookup2(struct net *net,
score = compute_score(sk, net, saddr, sport,
daddr, hnum, dif, sdif, exact_dif);
if (score > badness) {
+ reuseport_result = NULL;
+
if (sk->sk_reuseport &&
sk->sk_state != TCP_ESTABLISHED) {
hash = udp_ehashfn(net, daddr, hnum,
saddr, sport);
- result = reuseport_select_sock(sk, hash, skb,
- sizeof(struct udphdr));
- if (result && !reuseport_has_conns(sk, false))
- return result;
+ reuseport_result = reuseport_select_sock(sk, hash, skb,
+ sizeof(struct udphdr));
+ if (reuseport_result && !reuseport_has_conns(sk, false))
+ return reuseport_result;
}
+
+ result = reuseport_result ? : sk;
badness = score;
- result = sk;
}
}
return result;
@@ -1986,7 +1989,7 @@ static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
/*
* UDP-Lite specific tests, ignored on UDP sockets
*/
- if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
+ if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
/*
* MIB statistics other than incrementing the error count are
@@ -2413,7 +2416,8 @@ int udp_v4_early_demux(struct sk_buff *skb)
*/
if (!inet_sk(sk)->inet_daddr && in_dev)
return ip_mc_validate_source(skb, iph->daddr,
- iph->saddr, iph->tos,
+ iph->saddr,
+ iph->tos & IPTOS_RT_MASK,
skb->dev, in_dev, &itag);
}
return 0;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index aa343654abfc..2d22d39952da 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -359,7 +359,7 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
struct sock *sk;
if (NAPI_GRO_CB(skb)->encap_mark ||
- (skb->ip_summed != CHECKSUM_PARTIAL &&
+ (uh->check && skb->ip_summed != CHECKSUM_PARTIAL &&
NAPI_GRO_CB(skb)->csum_cnt == 0 &&
!NAPI_GRO_CB(skb)->csum_valid))
goto out;
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index be980c195fc5..510d2ec4c76a 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -77,9 +77,7 @@ int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb)
{
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
-#ifdef CONFIG_NETFILTER
IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
-#endif
return xfrm_output(sk, skb);
}
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 613282c65a10..a32cf50c237d 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -321,6 +321,7 @@ config IPV6_SEG6_LWTUNNEL
config IPV6_SEG6_HMAC
bool "IPv6: Segment Routing HMAC support"
depends on IPV6
+ select CRYPTO
select CRYPTO_HMAC
select CRYPTO_SHA1
select CRYPTO_SHA256
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 627cd24b7c0d..76c097552ea7 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2395,8 +2395,9 @@ static void addrconf_add_mroute(struct net_device *dev)
.fc_ifindex = dev->ifindex,
.fc_dst_len = 8,
.fc_flags = RTF_UP,
- .fc_type = RTN_UNICAST,
+ .fc_type = RTN_MULTICAST,
.fc_nlinfo.nl_net = dev_net(dev),
+ .fc_protocol = RTPROT_KERNEL,
};
ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index 5cd0029d930e..66a1a0eb2ed0 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -127,11 +127,12 @@ int inet6addr_validator_notifier_call_chain(unsigned long val, void *v)
}
EXPORT_SYMBOL(inet6addr_validator_notifier_call_chain);
-static int eafnosupport_ipv6_dst_lookup(struct net *net, struct sock *u1,
- struct dst_entry **u2,
- struct flowi6 *u3)
+static struct dst_entry *eafnosupport_ipv6_dst_lookup_flow(struct net *net,
+ const struct sock *sk,
+ struct flowi6 *fl6,
+ const struct in6_addr *final_dst)
{
- return -EAFNOSUPPORT;
+ return ERR_PTR(-EAFNOSUPPORT);
}
static struct fib6_table *eafnosupport_fib6_get_table(struct net *net, u32 id)
@@ -169,7 +170,7 @@ eafnosupport_ip6_mtu_from_fib6(struct fib6_info *f6i, struct in6_addr *daddr,
}
const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) {
- .ipv6_dst_lookup = eafnosupport_ipv6_dst_lookup,
+ .ipv6_dst_lookup_flow = eafnosupport_ipv6_dst_lookup_flow,
.fib6_get_table = eafnosupport_fib6_get_table,
.fib6_table_lookup = eafnosupport_fib6_table_lookup,
.fib6_lookup = eafnosupport_fib6_lookup,
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index 1d6ced37ad71..c7dc8b2de6c2 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -306,7 +306,9 @@ static int ip6addrlbl_del(struct net *net,
/* add default label */
static int __net_init ip6addrlbl_net_init(struct net *net)
{
- int err = 0;
+ struct ip6addrlbl_entry *p = NULL;
+ struct hlist_node *n;
+ int err;
int i;
ADDRLABEL(KERN_DEBUG "%s\n", __func__);
@@ -315,14 +317,20 @@ static int __net_init ip6addrlbl_net_init(struct net *net)
INIT_HLIST_HEAD(&net->ipv6.ip6addrlbl_table.head);
for (i = 0; i < ARRAY_SIZE(ip6addrlbl_init_table); i++) {
- int ret = ip6addrlbl_add(net,
- ip6addrlbl_init_table[i].prefix,
- ip6addrlbl_init_table[i].prefixlen,
- 0,
- ip6addrlbl_init_table[i].label, 0);
- /* XXX: should we free all rules when we catch an error? */
- if (ret && (!err || err != -ENOMEM))
- err = ret;
+ err = ip6addrlbl_add(net,
+ ip6addrlbl_init_table[i].prefix,
+ ip6addrlbl_init_table[i].prefixlen,
+ 0,
+ ip6addrlbl_init_table[i].label, 0);
+ if (err)
+ goto err_ip6addrlbl_add;
+ }
+ return 0;
+
+err_ip6addrlbl_add:
+ hlist_for_each_entry_safe(p, n, &net->ipv6.ip6addrlbl_table.head, list) {
+ hlist_del_rcu(&p->list);
+ kfree_rcu(p, rcu);
}
return err;
}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 79fcd9550fd2..5c2351deedc8 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -740,7 +740,7 @@ int inet6_sk_rebuild_header(struct sock *sk)
&final);
rcu_read_unlock();
- dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
+ dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
if (IS_ERR(dst)) {
sk->sk_route_caps = 0;
sk->sk_err_soft = -PTR_ERR(dst);
@@ -904,7 +904,7 @@ static struct pernet_operations inet6_net_ops = {
static const struct ipv6_stub ipv6_stub_impl = {
.ipv6_sock_mc_join = ipv6_sock_mc_join,
.ipv6_sock_mc_drop = ipv6_sock_mc_drop,
- .ipv6_dst_lookup = ip6_dst_lookup,
+ .ipv6_dst_lookup_flow = ip6_dst_lookup_flow,
.fib6_get_table = fib6_get_table,
.fib6_table_lookup = fib6_table_lookup,
.fib6_lookup = fib6_lookup,
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 78c974391567..2b68bd7c83c4 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -600,7 +600,8 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
memset(ah->auth_data, 0, ahp->icv_trunc_len);
- if (ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN))
+ err = ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN);
+ if (err)
goto out_free;
ip6h->priority = 0;
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 4e0ff7031edd..55fbe330471c 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -173,7 +173,7 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
return 0;
}
-void ipv6_sock_ac_close(struct sock *sk)
+void __ipv6_sock_ac_close(struct sock *sk)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct net_device *dev = NULL;
@@ -181,10 +181,7 @@ void ipv6_sock_ac_close(struct sock *sk)
struct net *net = sock_net(sk);
int prev_index;
- if (!np->ipv6_ac_list)
- return;
-
- rtnl_lock();
+ ASSERT_RTNL();
pac = np->ipv6_ac_list;
np->ipv6_ac_list = NULL;
@@ -201,6 +198,16 @@ void ipv6_sock_ac_close(struct sock *sk)
sock_kfree_s(sk, pac, sizeof(*pac));
pac = next;
}
+}
+
+void ipv6_sock_ac_close(struct sock *sk)
+{
+ struct ipv6_pinfo *np = inet6_sk(sk);
+
+ if (!np->ipv6_ac_list)
+ return;
+ rtnl_lock();
+ __ipv6_sock_ac_close(sk);
rtnl_unlock();
}
diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
index 1c0bb9fb76e6..afc76062e1a1 100644
--- a/net/ipv6/calipso.c
+++ b/net/ipv6/calipso.c
@@ -97,6 +97,9 @@ struct calipso_map_cache_entry {
static struct calipso_map_cache_bkt *calipso_cache;
+static void calipso_cache_invalidate(void);
+static void calipso_doi_putdef(struct calipso_doi *doi_def);
+
/* Label Mapping Cache Functions
*/
@@ -458,15 +461,10 @@ static int calipso_doi_remove(u32 doi, struct netlbl_audit *audit_info)
ret_val = -ENOENT;
goto doi_remove_return;
}
- if (!refcount_dec_and_test(&doi_def->refcount)) {
- spin_unlock(&calipso_doi_list_lock);
- ret_val = -EBUSY;
- goto doi_remove_return;
- }
list_del_rcu(&doi_def->list);
spin_unlock(&calipso_doi_list_lock);
- call_rcu(&doi_def->rcu, calipso_doi_free_rcu);
+ calipso_doi_putdef(doi_def);
ret_val = 0;
doi_remove_return:
@@ -522,10 +520,8 @@ static void calipso_doi_putdef(struct calipso_doi *doi_def)
if (!refcount_dec_and_test(&doi_def->refcount))
return;
- spin_lock(&calipso_doi_list_lock);
- list_del_rcu(&doi_def->list);
- spin_unlock(&calipso_doi_list_lock);
+ calipso_cache_invalidate();
call_rcu(&doi_def->rcu, calipso_doi_free_rcu);
}
@@ -1061,7 +1057,8 @@ static int calipso_opt_getattr(const unsigned char *calipso,
goto getattr_return;
}
- secattr->flags |= NETLBL_SECATTR_MLS_CAT;
+ if (secattr->attr.mls.cat)
+ secattr->flags |= NETLBL_SECATTR_MLS_CAT;
}
secattr->type = NETLBL_NLTYPE_CALIPSO;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 971a0fdf1fbc..727f958dd869 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -89,7 +89,7 @@ int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr)
final_p = fl6_update_dst(&fl6, opt, &final);
rcu_read_unlock();
- dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
+ dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto out;
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index a7d996148eed..25317d5ccf2c 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -237,7 +237,6 @@ static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
{
u8 *tail;
- u8 *vaddr;
int nfrags;
struct page *page;
struct sk_buff *trailer;
@@ -270,14 +269,10 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
page = pfrag->page;
get_page(page);
- vaddr = kmap_atomic(page);
-
- tail = vaddr + pfrag->offset;
+ tail = page_address(page) + pfrag->offset;
esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
- kunmap_atomic(vaddr);
-
nfrags = skb_shinfo(skb)->nr_frags;
__skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index 6177e2171171..eeee64a8a72c 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -121,9 +121,16 @@ static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
struct ip_esp_hdr *esph;
struct ipv6hdr *iph = ipv6_hdr(skb);
struct xfrm_offload *xo = xfrm_offload(skb);
- int proto = iph->nexthdr;
+ u8 proto = iph->nexthdr;
skb_push(skb, -skb_network_offset(skb));
+
+ if (x->outer_mode->encap == XFRM_MODE_TRANSPORT) {
+ __be16 frag;
+
+ ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &proto, &frag);
+ }
+
esph = ip_esp_hdr(skb);
*skb_mac_header(skb) = IPPROTO_ESP;
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 6d14cbe443f8..fbc8746371b6 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -309,10 +309,9 @@ static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, st
}
#if IS_ENABLED(CONFIG_IPV6_MIP6)
-static void mip6_addr_swap(struct sk_buff *skb)
+static void mip6_addr_swap(struct sk_buff *skb, const struct inet6_skb_parm *opt)
{
struct ipv6hdr *iph = ipv6_hdr(skb);
- struct inet6_skb_parm *opt = IP6CB(skb);
struct ipv6_destopt_hao *hao;
struct in6_addr tmp;
int off;
@@ -329,7 +328,7 @@ static void mip6_addr_swap(struct sk_buff *skb)
}
}
#else
-static inline void mip6_addr_swap(struct sk_buff *skb) {}
+static inline void mip6_addr_swap(struct sk_buff *skb, const struct inet6_skb_parm *opt) {}
#endif
static struct dst_entry *icmpv6_route_lookup(struct net *net,
@@ -418,8 +417,9 @@ static int icmp6_iif(const struct sk_buff *skb)
/*
* Send an ICMP message in response to a packet in error
*/
-static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
- const struct in6_addr *force_saddr)
+void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ const struct in6_addr *force_saddr,
+ const struct inet6_skb_parm *parm)
{
struct inet6_dev *idev = NULL;
struct ipv6hdr *hdr = ipv6_hdr(skb);
@@ -512,7 +512,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
if (!(skb->dev->flags&IFF_LOOPBACK) && !icmpv6_global_allow(type))
goto out_bh_enable;
- mip6_addr_swap(skb);
+ mip6_addr_swap(skb, parm);
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_ICMPV6;
@@ -592,12 +592,13 @@ out:
out_bh_enable:
local_bh_enable();
}
+EXPORT_SYMBOL(icmp6_send);
/* Slightly more convenient version of icmp6_send.
*/
void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
{
- icmp6_send(skb, ICMPV6_PARAMPROB, code, pos, NULL);
+ icmp6_send(skb, ICMPV6_PARAMPROB, code, pos, NULL, IP6CB(skb));
kfree_skb(skb);
}
@@ -654,10 +655,10 @@ int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
}
if (type == ICMP_TIME_EXCEEDED)
icmp6_send(skb2, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
- info, &temp_saddr);
+ info, &temp_saddr, IP6CB(skb2));
else
icmp6_send(skb2, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH,
- info, &temp_saddr);
+ info, &temp_saddr, IP6CB(skb2));
if (rt)
ip6_rt_put(rt);
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 890adadcda16..92fe9e565da0 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -52,7 +52,7 @@ struct dst_entry *inet6_csk_route_req(const struct sock *sk,
fl6->flowi6_uid = sk->sk_uid;
security_req_classify_flow(req, flowi6_to_flowi(fl6));
- dst = ip6_dst_lookup_flow(sk, fl6, final_p);
+ dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
if (IS_ERR(dst))
return NULL;
@@ -107,7 +107,7 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
dst = __inet6_csk_dst_check(sk, np->dst_cookie);
if (!dst) {
- dst = ip6_dst_lookup_flow(sk, fl6, final_p);
+ dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
if (!IS_ERR(dst))
ip6_dst_store(sk, dst, NULL, NULL);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 5e8979c1f76d..e0e464b72c1f 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -906,6 +906,8 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn,
{
struct fib6_table *table = rt->fib6_table;
+ /* Flush all cached dst in exception table */
+ rt6_flush_exceptions(rt);
if (rt->rt6i_pcpu)
fib6_drop_pcpu_from(rt, table);
@@ -1756,9 +1758,6 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
net->ipv6.rt6_stats->fib_rt_entries--;
net->ipv6.rt6_stats->fib_discarded_routes++;
- /* Flush all cached dst in exception table */
- rt6_flush_exceptions(rt);
-
/* Reset round-robin state, if necessary */
if (rcu_access_pointer(fn->rr_ptr) == rt)
fn->rr_ptr = NULL;
@@ -1811,14 +1810,19 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
/* Need to own table->tb6_lock */
int fib6_del(struct fib6_info *rt, struct nl_info *info)
{
- struct fib6_node *fn = rcu_dereference_protected(rt->fib6_node,
- lockdep_is_held(&rt->fib6_table->tb6_lock));
- struct fib6_table *table = rt->fib6_table;
struct net *net = info->nl_net;
struct fib6_info __rcu **rtp;
struct fib6_info __rcu **rtp_next;
+ struct fib6_table *table;
+ struct fib6_node *fn;
- if (!fn || rt == net->ipv6.fib6_null_entry)
+ if (rt == net->ipv6.fib6_null_entry)
+ return -ENOENT;
+
+ table = rt->fib6_table;
+ fn = rcu_dereference_protected(rt->fib6_node,
+ lockdep_is_held(&table->tb6_lock));
+ if (!fn)
return -ENOENT;
WARN_ON(!(fn->fn_flags & RTN_RTINFO));
@@ -2372,14 +2376,13 @@ static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
struct net *net = seq_file_net(seq);
struct ipv6_route_iter *iter = seq->private;
+ ++(*pos);
if (!v)
goto iter_table;
n = rcu_dereference_bh(((struct fib6_info *)v)->fib6_next);
- if (n) {
- ++*pos;
+ if (n)
return n;
- }
iter_table:
ipv6_route_check_sernum(iter);
@@ -2387,8 +2390,6 @@ iter_table:
r = fib6_walk_continue(&iter->w);
spin_unlock_bh(&iter->tbl->tb6_lock);
if (r > 0) {
- if (v)
- ++*pos;
return iter->w.leaf;
} else if (r < 0) {
fib6_walker_unlink(net, &iter->w);
@@ -2415,8 +2416,10 @@ static void *ipv6_route_seq_start(struct seq_file *seq, loff_t *pos)
iter->skip = *pos;
if (iter->tbl) {
+ loff_t p = 0;
+
ipv6_route_seq_setup_walk(iter, net);
- return ipv6_route_seq_next(seq, NULL, pos);
+ return ipv6_route_seq_next(seq, NULL, &p);
} else {
return NULL;
}
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 90621d498fd1..043e57d08a3e 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -132,6 +132,7 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
gre_proto == htons(ETH_P_ERSPAN2)) ?
ARPHRD_ETHER : ARPHRD_IP6GRE;
int score, cand_score = 4;
+ struct net_device *ndev;
for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
if (!ipv6_addr_equal(local, &t->parms.laddr) ||
@@ -243,9 +244,9 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
if (t && t->dev->flags & IFF_UP)
return t;
- dev = ign->fb_tunnel_dev;
- if (dev && dev->flags & IFF_UP)
- return netdev_priv(dev);
+ ndev = READ_ONCE(ign->fb_tunnel_dev);
+ if (ndev && ndev->flags & IFF_UP)
+ return netdev_priv(ndev);
return NULL;
}
@@ -391,7 +392,6 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
if (!(nt->parms.o_flags & TUNNEL_SEQ))
dev->features |= NETIF_F_LLTX;
- dev_hold(dev);
ip6gre_tunnel_link(ign, nt);
return nt;
@@ -418,6 +418,8 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
ip6gre_tunnel_unlink_md(ign, t);
ip6gre_tunnel_unlink(ign, t);
+ if (ign->fb_tunnel_dev == dev)
+ WRITE_ONCE(ign->fb_tunnel_dev, NULL);
dst_cache_reset(&t->dst_cache);
dev_put(dev);
}
@@ -1137,8 +1139,13 @@ static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu,
return;
if (rt->dst.dev) {
- dev->needed_headroom = rt->dst.dev->hard_header_len +
- t_hlen;
+ unsigned short dst_len = rt->dst.dev->hard_header_len +
+ t_hlen;
+
+ if (t->dev->header_ops)
+ dev->hard_header_len = dst_len;
+ else
+ dev->needed_headroom = dst_len;
if (set_mtu) {
dev->mtu = rt->dst.dev->mtu - t_hlen;
@@ -1163,7 +1170,12 @@ static int ip6gre_calc_hlen(struct ip6_tnl *tunnel)
tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
- tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen;
+
+ if (tunnel->dev->header_ops)
+ tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen;
+ else
+ tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen;
+
return t_hlen;
}
@@ -1490,6 +1502,7 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
}
ip6gre_tnl_init_features(dev);
+ dev_hold(dev);
return 0;
cleanup_dst_cache_init:
@@ -1532,8 +1545,6 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
strcpy(tunnel->parms.name, dev->name);
tunnel->hlen = sizeof(struct ipv6hdr) + 4;
-
- dev_hold(dev);
}
static struct inet6_protocol ip6gre_protocol __read_mostly = {
@@ -1577,17 +1588,18 @@ static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head)
static int __net_init ip6gre_init_net(struct net *net)
{
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+ struct net_device *ndev;
int err;
if (!net_has_fallback_tunnels(net))
return 0;
- ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
- NET_NAME_UNKNOWN,
- ip6gre_tunnel_setup);
- if (!ign->fb_tunnel_dev) {
+ ndev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
+ NET_NAME_UNKNOWN, ip6gre_tunnel_setup);
+ if (!ndev) {
err = -ENOMEM;
goto err_alloc_dev;
}
+ ign->fb_tunnel_dev = ndev;
dev_net_set(ign->fb_tunnel_dev, net);
/* FB netdevice is special: we have one, and only one per netns.
* Allowing to move it to another netns is clearly unsafe.
@@ -1607,7 +1619,7 @@ static int __net_init ip6gre_init_net(struct net *net)
return 0;
err_reg_dev:
- free_netdev(ign->fb_tunnel_dev);
+ free_netdev(ndev);
err_alloc_dev:
return err;
}
@@ -1882,6 +1894,7 @@ static int ip6erspan_tap_init(struct net_device *dev)
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
ip6erspan_tnl_link_config(tunnel, 1);
+ dev_hold(dev);
return 0;
cleanup_dst_cache_init:
@@ -1987,8 +2000,6 @@ static int ip6gre_newlink_common(struct net *src_net, struct net_device *dev,
if (tb[IFLA_MTU])
ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
- dev_hold(dev);
-
out:
return err;
}
diff --git a/net/ipv6/ip6_icmp.c b/net/ipv6/ip6_icmp.c
index 02045494c24c..9e3574880cb0 100644
--- a/net/ipv6/ip6_icmp.c
+++ b/net/ipv6/ip6_icmp.c
@@ -9,6 +9,8 @@
#if IS_ENABLED(CONFIG_IPV6)
+#if !IS_BUILTIN(CONFIG_IPV6)
+
static ip6_icmp_send_t __rcu *ip6_icmp_send;
int inet6_register_icmp_sender(ip6_icmp_send_t *fn)
@@ -31,18 +33,52 @@ int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn)
}
EXPORT_SYMBOL(inet6_unregister_icmp_sender);
-void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
+void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ const struct inet6_skb_parm *parm)
{
ip6_icmp_send_t *send;
rcu_read_lock();
send = rcu_dereference(ip6_icmp_send);
+ if (send)
+ send(skb, type, code, info, NULL, parm);
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(__icmpv6_send);
+#endif
+
+#if IS_ENABLED(CONFIG_NF_NAT)
+#include <net/netfilter/nf_conntrack.h>
+void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info)
+{
+ struct inet6_skb_parm parm = { 0 };
+ struct sk_buff *cloned_skb = NULL;
+ enum ip_conntrack_info ctinfo;
+ struct in6_addr orig_ip;
+ struct nf_conn *ct;
- if (!send)
+ ct = nf_ct_get(skb_in, &ctinfo);
+ if (!ct || !(ct->status & IPS_SRC_NAT)) {
+ __icmpv6_send(skb_in, type, code, info, &parm);
+ return;
+ }
+
+ if (skb_shared(skb_in))
+ skb_in = cloned_skb = skb_clone(skb_in, GFP_ATOMIC);
+
+ if (unlikely(!skb_in || skb_network_header(skb_in) < skb_in->head ||
+ (skb_network_header(skb_in) + sizeof(struct ipv6hdr)) >
+ skb_tail_pointer(skb_in) || skb_ensure_writable(skb_in,
+ skb_network_offset(skb_in) + sizeof(struct ipv6hdr))))
goto out;
- send(skb, type, code, info, NULL);
+
+ orig_ip = ipv6_hdr(skb_in)->saddr;
+ ipv6_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.in6;
+ __icmpv6_send(skb_in, type, code, info, &parm);
+ ipv6_hdr(skb_in)->saddr = orig_ip;
out:
- rcu_read_unlock();
+ consume_skb(cloned_skb);
}
-EXPORT_SYMBOL(icmpv6_send);
+EXPORT_SYMBOL(icmpv6_ndo_send);
+#endif
#endif
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index acf0749ee5bb..57d84accbf1e 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -222,16 +222,6 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
if (ipv6_addr_is_multicast(&hdr->saddr))
goto err;
- /* While RFC4291 is not explicit about v4mapped addresses
- * in IPv6 headers, it seems clear linux dual-stack
- * model can not deal properly with these.
- * Security models could be fooled by ::ffff:127.0.0.1 for example.
- *
- * https://tools.ietf.org/html/draft-itojun-v6ops-v4mapped-harmful-02
- */
- if (ipv6_addr_v4mapped(&hdr->saddr))
- goto err;
-
skb->transport_header = skb->network_header + sizeof(*hdr);
IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 9886a84c2511..e1bb7db88483 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -128,8 +128,42 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
return -EINVAL;
}
+static int
+ip6_finish_output_gso_slowpath_drop(struct net *net, struct sock *sk,
+ struct sk_buff *skb, unsigned int mtu)
+{
+ struct sk_buff *segs, *nskb;
+ netdev_features_t features;
+ int ret = 0;
+
+ /* Please see corresponding comment in ip_finish_output_gso
+ * describing the cases where GSO segment length exceeds the
+ * egress MTU.
+ */
+ features = netif_skb_features(skb);
+ segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+ if (IS_ERR_OR_NULL(segs)) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ consume_skb(skb);
+
+ skb_list_walk_safe(segs, segs, nskb) {
+ int err;
+
+ skb_mark_not_on_list(segs);
+ err = ip6_fragment(net, sk, segs, ip6_finish_output2);
+ if (err && ret == 0)
+ ret = err;
+ }
+
+ return ret;
+}
+
static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
+ unsigned int mtu;
int ret;
ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
@@ -146,7 +180,11 @@ static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
}
#endif
- if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
+ mtu = ip6_skb_dst_mtu(skb);
+ if (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu))
+ return ip6_finish_output_gso_slowpath_drop(net, sk, skb, mtu);
+
+ if ((skb->len > mtu && !skb_is_gso(skb)) ||
dst_allfrag(skb_dst(skb)) ||
(IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
return ip6_fragment(net, sk, skb, ip6_finish_output2);
@@ -1071,19 +1109,19 @@ EXPORT_SYMBOL_GPL(ip6_dst_lookup);
* It returns a valid dst pointer on success, or a pointer encoded
* error code.
*/
-struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
+struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst)
{
struct dst_entry *dst = NULL;
int err;
- err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
+ err = ip6_dst_lookup_tail(net, sk, &dst, fl6);
if (err)
return ERR_PTR(err);
if (final_dst)
fl6->daddr = *final_dst;
- return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
+ return xfrm_lookup_route(net, dst, flowi6_to_flowi(fl6), sk, 0);
}
EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
@@ -1115,7 +1153,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
if (dst)
return dst;
- dst = ip6_dst_lookup_flow(sk, fl6, final_dst);
+ dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_dst);
if (connected && !IS_ERR(dst))
ip6_sk_dst_store_flow(sk, dst_clone(dst), fl6);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 8e70a015c792..35c127c3eee7 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -272,7 +272,6 @@ static int ip6_tnl_create2(struct net_device *dev)
strcpy(t->parms.name, dev->name);
- dev_hold(dev);
ip6_tnl_link(ip6n, t);
return 0;
@@ -865,7 +864,15 @@ int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb,
struct metadata_dst *tun_dst,
bool log_ecn_err)
{
- return __ip6_tnl_rcv(t, skb, tpi, tun_dst, ip6ip6_dscp_ecn_decapsulate,
+ int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
+ const struct ipv6hdr *ipv6h,
+ struct sk_buff *skb);
+
+ dscp_ecn_decapsulate = ip6ip6_dscp_ecn_decapsulate;
+ if (tpi->proto == htons(ETH_P_IP))
+ dscp_ecn_decapsulate = ip4ip6_dscp_ecn_decapsulate;
+
+ return __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
log_ecn_err);
}
EXPORT_SYMBOL(ip6_tnl_rcv);
@@ -1858,6 +1865,7 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
+ dev_hold(dev);
return 0;
destroy_dst:
@@ -1901,7 +1909,6 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
t->parms.proto = IPPROTO_IPV6;
- dev_hold(dev);
rcu_assign_pointer(ip6n->tnls_wc[0], t);
return 0;
@@ -2214,6 +2221,16 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct net *net, struct list_head
t = rtnl_dereference(t->next);
}
}
+
+ t = rtnl_dereference(ip6n->tnls_wc[0]);
+ while (t) {
+ /* If dev is in the same netns, it has already
+ * been added to the list by the previous loop.
+ */
+ if (!net_eq(dev_net(t->dev), net))
+ unregister_netdevice_queue(t->dev, list);
+ t = rtnl_dereference(t->next);
+ }
}
static int __net_init ip6_tnl_init_net(struct net *net)
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 94f16e82a458..290badfe70e0 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -196,7 +196,6 @@ static int vti6_tnl_create2(struct net_device *dev)
strcpy(t->parms.name, dev->name);
- dev_hold(dev);
vti6_tnl_link(ip6n, t);
return 0;
@@ -925,6 +924,7 @@ static inline int vti6_dev_init_gen(struct net_device *dev)
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
+ dev_hold(dev);
return 0;
}
@@ -956,7 +956,6 @@ static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev)
struct vti6_net *ip6n = net_generic(net, vti6_net_id);
t->parms.proto = IPPROTO_IPV6;
- dev_hold(dev);
rcu_assign_pointer(ip6n->tnls_wc[0], t);
return 0;
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index a20be08f0e0b..4e1da6cb9ed7 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -190,10 +190,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
retv = -EBUSY;
break;
}
- break;
} else {
break;
}
+
if (sk->sk_state != TCP_ESTABLISHED) {
retv = -ENOTCONN;
break;
@@ -207,6 +207,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
fl6_free_socklist(sk);
__ipv6_sock_mc_close(sk);
+ __ipv6_sock_ac_close(sk);
/*
* Sock is moving from IPv6 to IPv4 (sk_prot), so
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 2d80e913b82f..3d048401141f 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1606,10 +1606,7 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
IPV6_TLV_PADN, 0 };
/* we assume size > sizeof(ra) here */
- /* limit our allocations to order-0 page */
- size = min_t(int, size, SKB_MAX_ORDER(0, 0));
skb = sock_alloc_send_skb(sk, size, 1, &err);
-
if (!skb)
return NULL;
@@ -2620,6 +2617,7 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
idev->mc_list = i->next;
write_unlock_bh(&idev->lock);
+ ip6_mc_clear_src(i);
ma_put(i);
write_lock_bh(&idev->lock);
}
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 6d0b1f3e927b..5679fa3f696a 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -17,10 +17,10 @@
#include <net/xfrm.h>
#include <net/netfilter/nf_queue.h>
-int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
+int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff *skb)
{
const struct ipv6hdr *iph = ipv6_hdr(skb);
- struct sock *sk = sk_to_full_sk(skb->sk);
+ struct sock *sk = sk_to_full_sk(sk_partial);
unsigned int hh_len;
struct dst_entry *dst;
int strict = (ipv6_addr_type(&iph->daddr) &
@@ -81,7 +81,7 @@ static int nf_ip6_reroute(struct sk_buff *skb,
if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) ||
!ipv6_addr_equal(&iph->saddr, &rt_info->saddr) ||
skb->mark != rt_info->mark)
- return ip6_route_me_harder(entry->state.net, skb);
+ return ip6_route_me_harder(entry->state.net, entry->state.sk, skb);
}
return 0;
}
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index daf2e9e9193d..dd0c1073dc8e 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1448,6 +1448,8 @@ translate_compat_table(struct net *net,
if (!newinfo)
goto out_unlock;
+ memset(newinfo->entries, 0, size);
+
newinfo->number = compatr->num_entries;
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
newinfo->hook_entry[i] = compatr->hook_entry[i];
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index b0524b18c4fb..acba3757ff60 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -60,7 +60,7 @@ ip6t_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state)
skb->mark != mark ||
ipv6_hdr(skb)->hop_limit != hop_limit ||
flowlabel != *((u_int32_t *)ipv6_hdr(skb)))) {
- err = ip6_route_me_harder(state->net, skb);
+ err = ip6_route_me_harder(state->net, state->sk, skb);
if (err < 0)
ret = NF_DROP_ERR(err);
}
diff --git a/net/ipv6/netfilter/nf_log_ipv6.c b/net/ipv6/netfilter/nf_log_ipv6.c
index c6bf580d0f33..c456e2f902b9 100644
--- a/net/ipv6/netfilter/nf_log_ipv6.c
+++ b/net/ipv6/netfilter/nf_log_ipv6.c
@@ -300,9 +300,11 @@ static void dump_ipv6_mac_header(struct nf_log_buf *m,
switch (dev->type) {
case ARPHRD_ETHER:
- nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
- eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
- ntohs(eth_hdr(skb)->h_proto));
+ nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ",
+ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest);
+ nf_log_dump_vlan(m, skb);
+ nf_log_buf_add(m, "MACPROTO=%04x ",
+ ntohs(eth_hdr(skb)->h_proto));
return;
default:
break;
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
index ca6d38698b1a..2b6a3b27f670 100644
--- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
@@ -352,7 +352,7 @@ nf_nat_ipv6_local_fn(void *priv, struct sk_buff *skb,
if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3,
&ct->tuplehash[!dir].tuple.src.u3)) {
- err = ip6_route_me_harder(state->net, skb);
+ err = ip6_route_me_harder(state->net, state->sk, skb);
if (err < 0)
ret = NF_DROP_ERR(err);
}
diff --git a/net/ipv6/netfilter/nft_chain_route_ipv6.c b/net/ipv6/netfilter/nft_chain_route_ipv6.c
index da3f1f8cb325..afe79cb46e63 100644
--- a/net/ipv6/netfilter/nft_chain_route_ipv6.c
+++ b/net/ipv6/netfilter/nft_chain_route_ipv6.c
@@ -52,7 +52,7 @@ static unsigned int nf_route_table_hook(void *priv,
skb->mark != mark ||
ipv6_hdr(skb)->hop_limit != hop_limit ||
flowlabel != *((u_int32_t *)ipv6_hdr(skb)))) {
- err = ip6_route_me_harder(state->net, skb);
+ err = ip6_route_me_harder(state->net, state->sk, skb);
if (err < 0)
ret = NF_DROP_ERR(err);
}
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index a41156a00dd4..98c8f98a7660 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -303,7 +303,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
*/
v4addr = LOOPBACK4_IPV6;
if (!(addr_type & IPV6_ADDR_MULTICAST) &&
- !sock_net(sk)->ipv6.sysctl.ip_nonlocal_bind) {
+ !ipv6_can_nonlocal_bind(sock_net(sk), inet)) {
err = -EADDRNOTAVAIL;
if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr,
dev, 0)) {
@@ -928,7 +928,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
- dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
+ dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto out;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index c6132e39ab16..60dfd0d11851 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -347,7 +347,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
hdr = ipv6_hdr(skb);
fhdr = (struct frag_hdr *)skb_transport_header(skb);
- if (!(fhdr->frag_off & htons(0xFFF9))) {
+ if (!(fhdr->frag_off & htons(IP6_OFFSET | IP6_MF))) {
/* It is not a fragmented frame */
skb->transport_header += sizeof(struct frag_hdr);
__IP6_INC_STATS(net,
@@ -355,6 +355,8 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
+ IP6CB(skb)->frag_max_size = ntohs(hdr->payload_len) +
+ sizeof(struct ipv6hdr);
return 1;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 9c36a743ddbc..f884739a0c1c 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2360,8 +2360,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
const struct in6_addr *daddr, *saddr;
struct rt6_info *rt6 = (struct rt6_info *)dst;
- if (dst_metric_locked(dst, RTAX_MTU))
- return;
+ /* Note: do *NOT* check dst_metric_locked(dst, RTAX_MTU)
+ * IPv6 pmtu discovery isn't optional, so 'mtu lock' cannot disable it.
+ * [see also comment in rt6_mtu_change_route()]
+ */
if (iph) {
daddr = &iph->daddr;
@@ -4512,9 +4514,11 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
* nexthops have been replaced by first new, the rest should
* be added to it.
*/
- cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
- NLM_F_REPLACE);
- cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
+ if (cfg->fc_nlinfo.nlh) {
+ cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
+ NLM_F_REPLACE);
+ cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
+ }
nhn++;
}
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index bfed7508ba19..4a49200d0d11 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -215,8 +215,6 @@ static int ipip6_tunnel_create(struct net_device *dev)
ipip6_tunnel_clone_6rd(dev, sitn);
- dev_hold(dev);
-
ipip6_tunnel_link(sitn, t);
return 0;
@@ -1087,7 +1085,6 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
if (tdev && !netif_is_l3_master(tdev)) {
int t_hlen = tunnel->hlen + sizeof(struct iphdr);
- dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr);
dev->mtu = tdev->mtu - t_hlen;
if (dev->mtu < IPV6_MIN_MTU)
dev->mtu = IPV6_MIN_MTU;
@@ -1377,7 +1374,6 @@ static void ipip6_tunnel_setup(struct net_device *dev)
dev->priv_destructor = ipip6_dev_free;
dev->type = ARPHRD_SIT;
- dev->hard_header_len = LL_MAX_HEADER + t_hlen;
dev->mtu = ETH_DATA_LEN - t_hlen;
dev->min_mtu = IPV6_MIN_MTU;
dev->max_mtu = IP6_MAX_MTU - t_hlen;
@@ -1409,7 +1405,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
dev->tstats = NULL;
return err;
}
-
+ dev_hold(dev);
return 0;
}
@@ -1425,7 +1421,6 @@ static void __net_init ipip6_fb_tunnel_init(struct net_device *dev)
iph->ihl = 5;
iph->ttl = 64;
- dev_hold(dev);
rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
}
@@ -1598,8 +1593,11 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev,
}
#ifdef CONFIG_IPV6_SIT_6RD
- if (ipip6_netlink_6rd_parms(data, &ip6rd))
+ if (ipip6_netlink_6rd_parms(data, &ip6rd)) {
err = ipip6_tunnel_update_6rd(nt, &ip6rd);
+ if (err < 0)
+ unregister_netdevice_queue(dev, NULL);
+ }
#endif
return err;
@@ -1817,9 +1815,9 @@ static void __net_exit sit_destroy_tunnels(struct net *net,
if (dev->rtnl_link_ops == &sit_link_ops)
unregister_netdevice_queue(dev, head);
- for (prio = 1; prio < 4; prio++) {
+ for (prio = 0; prio < 4; prio++) {
int h;
- for (h = 0; h < IP6_SIT_HASH_SIZE; h++) {
+ for (h = 0; h < (prio ? IP6_SIT_HASH_SIZE : 1); h++) {
struct ip_tunnel *t;
t = rtnl_dereference(sitn->tunnels[prio][h]);
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index e997141aed8c..ec61b67a92be 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -141,7 +141,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
__u32 cookie = ntohl(th->ack_seq) - 1;
struct sock *ret = sk;
struct request_sock *req;
- int mss;
+ int full_space, mss;
struct dst_entry *dst;
__u8 rcv_wscale;
u32 tsoff = 0;
@@ -240,13 +240,19 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
fl6.flowi6_uid = sk->sk_uid;
security_req_classify_flow(req, flowi6_to_flowi(&fl6));
- dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
+ dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
if (IS_ERR(dst))
goto out_free;
}
req->rsk_window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
- tcp_select_initial_window(sk, tcp_full_space(sk), req->mss,
+ /* limit the window selection if the user enforce a smaller rx buffer */
+ full_space = tcp_full_space(sk);
+ if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
+ (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
+ req->rsk_window_clamp = full_space;
+
+ tcp_select_initial_window(sk, full_space, req->mss,
&req->rsk_rcv_wnd, &req->rsk_window_clamp,
ireq->wscale_ok, &rcv_wscale,
dst_metric(dst, RTAX_INITRWND));
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 7b0c2498f461..8d822df83b08 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -206,7 +206,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
!ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
tp->rx_opt.ts_recent = 0;
tp->rx_opt.ts_recent_stamp = 0;
- tp->write_seq = 0;
+ WRITE_ONCE(tp->write_seq, 0);
}
sk->sk_v6_daddr = usin->sin6_addr;
@@ -268,7 +268,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
- dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
+ dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto failure;
@@ -304,10 +304,11 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
if (likely(!tp->repair)) {
if (!tp->write_seq)
- tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
- sk->sk_v6_daddr.s6_addr32,
- inet->inet_sport,
- inet->inet_dport);
+ WRITE_ONCE(tp->write_seq,
+ secure_tcpv6_seq(np->saddr.s6_addr32,
+ sk->sk_v6_daddr.s6_addr32,
+ inet->inet_sport,
+ inet->inet_dport));
tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
np->saddr.s6_addr32,
sk->sk_v6_daddr.s6_addr32);
@@ -885,7 +886,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
* Underlying function will use this to retrieve the network
* namespace
*/
- dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
+ dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
if (!IS_ERR(dst)) {
skb_dst_set(buff, dst);
ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
@@ -1038,6 +1039,11 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
if (!ipv6_unicast_destination(skb))
goto drop;
+ if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
+ __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
+ return 0;
+ }
+
return tcp_conn_request(&tcp6_request_sock_ops,
&tcp_request_sock_ipv6_ops, sk, skb);
@@ -1839,7 +1845,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
* we might find a transient negative value.
*/
rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
- tp->copied_seq, 0);
+ READ_ONCE(tp->copied_seq), 0);
seq_printf(seq,
"%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
@@ -1850,7 +1856,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3], destp,
state,
- tp->write_seq - tp->snd_una,
+ READ_ONCE(tp->write_seq) - tp->snd_una,
rx_queue,
timer_active,
jiffies_delta_to_clock_t(timer_expires - jiffies),
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 1979922bcf67..6799ad462be3 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -167,7 +167,7 @@ static struct sock *udp6_lib_lookup2(struct net *net,
int dif, int sdif, bool exact_dif,
struct udp_hslot *hslot2, struct sk_buff *skb)
{
- struct sock *sk, *result;
+ struct sock *sk, *result, *reuseport_result;
int score, badness;
u32 hash = 0;
@@ -177,17 +177,20 @@ static struct sock *udp6_lib_lookup2(struct net *net,
score = compute_score(sk, net, saddr, sport,
daddr, hnum, dif, sdif, exact_dif);
if (score > badness) {
+ reuseport_result = NULL;
+
if (sk->sk_reuseport &&
sk->sk_state != TCP_ESTABLISHED) {
hash = udp6_ehashfn(net, daddr, hnum,
saddr, sport);
- result = reuseport_select_sock(sk, hash, skb,
- sizeof(struct udphdr));
- if (result && !reuseport_has_conns(sk, false))
- return result;
+ reuseport_result = reuseport_select_sock(sk, hash, skb,
+ sizeof(struct udphdr));
+ if (reuseport_result && !reuseport_has_conns(sk, false))
+ return reuseport_result;
}
- result = sk;
+
+ result = reuseport_result ? : sk;
badness = score;
}
}
@@ -606,7 +609,7 @@ static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
/*
* UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
*/
- if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
+ if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
if (up->pcrlen == 0) { /* full coverage was set */
net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 6a74080005cf..71d022704923 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -130,9 +130,7 @@ int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb)
{
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
-#ifdef CONFIG_NETFILTER
IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
-#endif
return xfrm_output(sk, skb);
}
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 23a1002ed86d..d59f2341bfec 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1571,7 +1571,8 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
break;
}
- if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
+ if ((how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) &&
+ sk->sk_state == IUCV_CONNECTED) {
if (iucv->transport == AF_IUCV_TRANS_IUCV) {
txmsg.class = 0;
txmsg.tag = 0;
@@ -1781,7 +1782,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
}
/* Create the new socket */
- nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
+ nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0);
if (!nsk) {
err = pr_iucv->path_sever(path, user_data);
iucv_path_free(path);
@@ -1991,7 +1992,7 @@ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
goto out;
}
- nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
+ nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0);
bh_lock_sock(sk);
if ((sk->sk_state != IUCV_LISTEN) ||
sk_acceptq_is_full(sk) ||
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 1982f9f31deb..c7d5a6015389 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1855,6 +1855,13 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
if (ext_hdrs[SADB_X_EXT_FILTER - 1]) {
struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1];
+ if ((xfilter->sadb_x_filter_splen >=
+ (sizeof(xfrm_address_t) << 3)) ||
+ (xfilter->sadb_x_filter_dplen >=
+ (sizeof(xfrm_address_t) << 3))) {
+ mutex_unlock(&pfk->dump_lock);
+ return -EINVAL;
+ }
filter = kmalloc(sizeof(*filter), GFP_KERNEL);
if (filter == NULL) {
mutex_unlock(&pfk->dump_lock);
@@ -2901,7 +2908,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t)
break;
if (!aalg->pfkey_supported)
continue;
- if (aalg_tmpl_set(t, aalg) && aalg->available)
+ if (aalg_tmpl_set(t, aalg))
sz += sizeof(struct sadb_comb);
}
return sz + sizeof(struct sadb_prop);
@@ -2919,7 +2926,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
if (!ealg->pfkey_supported)
continue;
- if (!(ealg_tmpl_set(t, ealg) && ealg->available))
+ if (!(ealg_tmpl_set(t, ealg)))
continue;
for (k = 1; ; k++) {
@@ -2930,7 +2937,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
if (!aalg->pfkey_supported)
continue;
- if (aalg_tmpl_set(t, aalg) && aalg->available)
+ if (aalg_tmpl_set(t, aalg))
sz += sizeof(struct sadb_comb);
}
}
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index d0a295cd71ef..9abdc0a04d99 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1033,6 +1033,7 @@ static void l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
/* Queue the packet to IP for output */
skb->ignore_df = 1;
+ skb_dst_drop(skb);
#if IS_ENABLED(CONFIG_IPV6)
if (l2tp_sk_is_v6(tunnel->sock))
error = inet6_csk_xmit(tunnel->sock, skb, NULL);
@@ -1104,10 +1105,6 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
goto out_unlock;
}
- /* Get routing info from the tunnel socket */
- skb_dst_drop(skb);
- skb_dst_set(skb, sk_dst_check(sk, 0));
-
inet = inet_sk(sk);
fl = &inet->cork.fl;
switch (tunnel->encap) {
@@ -1463,6 +1460,9 @@ static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
if (sk->sk_type != SOCK_DGRAM)
return -EPROTONOSUPPORT;
+ if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
+ return -EPROTONOSUPPORT;
+
if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) ||
(encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP))
return -EPROTONOSUPPORT;
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index d4c60523c549..99881b81f302 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -24,7 +24,6 @@
#include <net/icmp.h>
#include <net/udp.h>
#include <net/inet_common.h>
-#include <net/inet_hashtables.h>
#include <net/tcp_states.h>
#include <net/protocol.h>
#include <net/xfrm.h>
@@ -213,15 +212,31 @@ discard:
return 0;
}
-static int l2tp_ip_open(struct sock *sk)
+static int l2tp_ip_hash(struct sock *sk)
{
- /* Prevent autobind. We don't have ports. */
- inet_sk(sk)->inet_num = IPPROTO_L2TP;
+ if (sk_unhashed(sk)) {
+ write_lock_bh(&l2tp_ip_lock);
+ sk_add_node(sk, &l2tp_ip_table);
+ write_unlock_bh(&l2tp_ip_lock);
+ }
+ return 0;
+}
+static void l2tp_ip_unhash(struct sock *sk)
+{
+ if (sk_unhashed(sk))
+ return;
write_lock_bh(&l2tp_ip_lock);
- sk_add_node(sk, &l2tp_ip_table);
+ sk_del_node_init(sk);
write_unlock_bh(&l2tp_ip_lock);
+}
+
+static int l2tp_ip_open(struct sock *sk)
+{
+ /* Prevent autobind. We don't have ports. */
+ inet_sk(sk)->inet_num = IPPROTO_L2TP;
+ l2tp_ip_hash(sk);
return 0;
}
@@ -598,8 +613,8 @@ static struct proto l2tp_ip_prot = {
.sendmsg = l2tp_ip_sendmsg,
.recvmsg = l2tp_ip_recvmsg,
.backlog_rcv = l2tp_ip_backlog_recv,
- .hash = inet_hash,
- .unhash = inet_unhash,
+ .hash = l2tp_ip_hash,
+ .unhash = l2tp_ip_unhash,
.obj_size = sizeof(struct l2tp_ip_sock),
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ip_setsockopt,
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 37a69df17cab..2ff25c445b82 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -24,8 +24,6 @@
#include <net/icmp.h>
#include <net/udp.h>
#include <net/inet_common.h>
-#include <net/inet_hashtables.h>
-#include <net/inet6_hashtables.h>
#include <net/tcp_states.h>
#include <net/protocol.h>
#include <net/xfrm.h>
@@ -226,15 +224,31 @@ discard:
return 0;
}
-static int l2tp_ip6_open(struct sock *sk)
+static int l2tp_ip6_hash(struct sock *sk)
{
- /* Prevent autobind. We don't have ports. */
- inet_sk(sk)->inet_num = IPPROTO_L2TP;
+ if (sk_unhashed(sk)) {
+ write_lock_bh(&l2tp_ip6_lock);
+ sk_add_node(sk, &l2tp_ip6_table);
+ write_unlock_bh(&l2tp_ip6_lock);
+ }
+ return 0;
+}
+static void l2tp_ip6_unhash(struct sock *sk)
+{
+ if (sk_unhashed(sk))
+ return;
write_lock_bh(&l2tp_ip6_lock);
- sk_add_node(sk, &l2tp_ip6_table);
+ sk_del_node_init(sk);
write_unlock_bh(&l2tp_ip6_lock);
+}
+
+static int l2tp_ip6_open(struct sock *sk)
+{
+ /* Prevent autobind. We don't have ports. */
+ inet_sk(sk)->inet_num = IPPROTO_L2TP;
+ l2tp_ip6_hash(sk);
return 0;
}
@@ -619,7 +633,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
- dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
+ dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto out;
@@ -732,8 +746,8 @@ static struct proto l2tp_ip6_prot = {
.sendmsg = l2tp_ip6_sendmsg,
.recvmsg = l2tp_ip6_recvmsg,
.backlog_rcv = l2tp_ip6_backlog_recv,
- .hash = inet6_hash,
- .unhash = inet_unhash,
+ .hash = l2tp_ip6_hash,
+ .unhash = l2tp_ip6_unhash,
.obj_size = sizeof(struct l2tp_ip6_sock),
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ipv6_setsockopt,
diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c
index eda726e22f64..621c66f00117 100644
--- a/net/lapb/lapb_out.c
+++ b/net/lapb/lapb_out.c
@@ -87,7 +87,8 @@ void lapb_kick(struct lapb_cb *lapb)
skb = skb_dequeue(&lapb->write_queue);
do {
- if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
+ skbn = skb_copy(skb, GFP_ATOMIC);
+ if (!skbn) {
skb_queue_head(&lapb->write_queue, skb);
break;
}
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index ce841d59bc72..bcba579e292f 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -273,6 +273,10 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr)
if (!sock_flag(sk, SOCK_ZAPPED))
goto out;
+ if (!addr->sllc_arphrd)
+ addr->sllc_arphrd = ARPHRD_ETHER;
+ if (addr->sllc_arphrd != ARPHRD_ETHER)
+ goto out;
rc = -ENODEV;
if (sk->sk_bound_dev_if) {
llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
@@ -330,15 +334,15 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)))
goto out;
rc = -EAFNOSUPPORT;
- if (unlikely(addr->sllc_family != AF_LLC))
+ if (!addr->sllc_arphrd)
+ addr->sllc_arphrd = ARPHRD_ETHER;
+ if (unlikely(addr->sllc_family != AF_LLC || addr->sllc_arphrd != ARPHRD_ETHER))
goto out;
rc = -ENODEV;
rcu_read_lock();
if (sk->sk_bound_dev_if) {
llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if);
if (llc->dev) {
- if (!addr->sllc_arphrd)
- addr->sllc_arphrd = llc->dev->type;
if (is_zero_ether_addr(addr->sllc_mac))
memcpy(addr->sllc_mac, llc->dev->dev_addr,
IFHWADDRLEN);
@@ -781,7 +785,7 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
}
/* Well, if we have backlog, try to process it now yet. */
- if (copied >= target && !sk->sk_backlog.tail)
+ if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
break;
if (copied) {
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index cb7076d9a769..6804cdd43bef 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -664,7 +664,8 @@ void sta_set_rate_info_tx(struct sta_info *sta,
u16 brate;
sband = ieee80211_get_sband(sta->sdata);
- if (sband) {
+ WARN_ON_ONCE(sband && !sband->bitrates);
+ if (sband && sband->bitrates) {
brate = sband->bitrates[rate->idx].bitrate;
rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
}
@@ -1547,8 +1548,10 @@ static int ieee80211_change_station(struct wiphy *wiphy,
}
if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
- sta->sdata->u.vlan.sta)
+ sta->sdata->u.vlan.sta) {
+ ieee80211_clear_fast_rx(sta);
RCU_INIT_POINTER(sta->sdata->u.vlan.sta, NULL);
+ }
if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
ieee80211_vif_dec_num_mcast(sta->sdata);
@@ -2011,6 +2014,7 @@ static int ieee80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev)
ieee80211_stop_mesh(sdata);
mutex_lock(&sdata->local->mtx);
ieee80211_vif_release_channel(sdata);
+ kfree(sdata->u.mesh.ie);
mutex_unlock(&sdata->local->mtx);
return 0;
@@ -2775,14 +2779,14 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
continue;
for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) {
- if (~sdata->rc_rateidx_mcs_mask[i][j]) {
+ if (sdata->rc_rateidx_mcs_mask[i][j] != 0xff) {
sdata->rc_has_mcs_mask[i] = true;
break;
}
}
for (j = 0; j < NL80211_VHT_NSS_MAX; j++) {
- if (~sdata->rc_rateidx_vht_mcs_mask[i][j]) {
+ if (sdata->rc_rateidx_vht_mcs_mask[i][j] != 0xffff) {
sdata->rc_has_vht_mcs_mask[i] = true;
break;
}
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
index f783d1377d9a..9f0f437a09b9 100644
--- a/net/mac80211/driver-ops.c
+++ b/net/mac80211/driver-ops.c
@@ -128,8 +128,11 @@ int drv_sta_state(struct ieee80211_local *local,
} else if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC) {
ret = drv_sta_add(local, sdata, &sta->sta);
- if (ret == 0)
+ if (ret == 0) {
sta->uploaded = true;
+ if (rcu_access_pointer(sta->sta.rates))
+ drv_sta_rate_tbl_update(local, sdata, &sta->sta);
+ }
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH) {
drv_sta_remove(local, sdata, &sta->sta);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index f0f5fedb8caa..fa13eef25f2c 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -1861,6 +1861,8 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
/* remove beacon */
kfree(sdata->u.ibss.ie);
+ sdata->u.ibss.ie = NULL;
+ sdata->u.ibss.ie_len = 0;
/* on the next join, re-program HT parameters */
memset(&ifibss->ht_capa, 0, sizeof(ifibss->ht_capa));
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index a879d8071712..6c9d9c94983b 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -53,12 +53,6 @@ struct ieee80211_local;
#define IEEE80211_ENCRYPT_HEADROOM 8
#define IEEE80211_ENCRYPT_TAILROOM 18
-/* IEEE 802.11 (Ch. 9.5 Defragmentation) requires support for concurrent
- * reception of at least three fragmented frames. This limit can be increased
- * by changing this define, at the cost of slower frame reassembly and
- * increased memory use (about 2 kB of RAM per entry). */
-#define IEEE80211_FRAGMENT_MAX 4
-
/* power level hasn't been configured (or set to automatic) */
#define IEEE80211_UNSET_POWER_LEVEL INT_MIN
@@ -91,18 +85,6 @@ extern const u8 ieee80211_ac_to_qos_mask[IEEE80211_NUM_ACS];
#define IEEE80211_MAX_NAN_INSTANCE_ID 255
-struct ieee80211_fragment_entry {
- struct sk_buff_head skb_list;
- unsigned long first_frag_time;
- u16 seq;
- u16 extra_len;
- u16 last_frag;
- u8 rx_queue;
- bool check_sequential_pn; /* needed for CCMP/GCMP */
- u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
-};
-
-
struct ieee80211_bss {
u32 device_ts_beacon, device_ts_presp;
@@ -243,8 +225,15 @@ struct ieee80211_rx_data {
*/
int security_idx;
- u32 tkip_iv32;
- u16 tkip_iv16;
+ union {
+ struct {
+ u32 iv32;
+ u16 iv16;
+ } tkip;
+ struct {
+ u8 pn[IEEE80211_CCMP_PN_LEN];
+ } ccm_gcm;
+ };
};
struct ieee80211_csa_settings {
@@ -884,9 +873,7 @@ struct ieee80211_sub_if_data {
char name[IFNAMSIZ];
- /* Fragment table for host-based reassembly */
- struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX];
- unsigned int fragment_next;
+ struct ieee80211_fragment_cache frags;
/* TID bitmap for NoAck policy */
u16 noack_map;
@@ -1051,6 +1038,7 @@ enum queue_stop_reason {
IEEE80211_QUEUE_STOP_REASON_FLUSH,
IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN,
IEEE80211_QUEUE_STOP_REASON_RESERVE_TID,
+ IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE,
IEEE80211_QUEUE_STOP_REASONS,
};
@@ -2203,4 +2191,7 @@ extern const struct ethtool_ops ieee80211_ethtool_ops;
#define debug_noinline
#endif
+void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache);
+void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache);
+
#endif /* IEEE80211_I_H */
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 152d4365f961..358028a09ce4 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -7,7 +7,7 @@
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (c) 2016 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -1111,16 +1111,12 @@ static void ieee80211_set_multicast_list(struct net_device *dev)
*/
static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata)
{
- int i;
-
/* free extra data */
ieee80211_free_keys(sdata, false);
ieee80211_debugfs_remove_netdev(sdata);
- for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
- __skb_queue_purge(&sdata->fragments[i].skb_list);
- sdata->fragment_next = 0;
+ ieee80211_destroy_frag_cache(&sdata->frags);
if (ieee80211_vif_is_mesh(&sdata->vif))
ieee80211_mesh_teardown_sdata(sdata);
@@ -1542,6 +1538,10 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
if (ret)
return ret;
+ ieee80211_stop_vif_queues(local, sdata,
+ IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE);
+ synchronize_net();
+
ieee80211_do_stop(sdata, false);
ieee80211_teardown_sdata(sdata);
@@ -1562,6 +1562,8 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
err = ieee80211_do_open(&sdata->wdev, false);
WARN(err, "type change: do_open returned %d", err);
+ ieee80211_wake_vif_queues(local, sdata,
+ IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE);
return ret;
}
@@ -1826,8 +1828,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
sdata->wdev.wiphy = local->hw.wiphy;
sdata->local = local;
- for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
- skb_queue_head_init(&sdata->fragments[i].skb_list);
+ ieee80211_init_frag_cache(&sdata->frags);
INIT_LIST_HEAD(&sdata->key_list);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index f20bb39f492d..6775d6cb7d3d 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -653,6 +653,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
struct ieee80211_sub_if_data *sdata,
struct sta_info *sta)
{
+ static atomic_t key_color = ATOMIC_INIT(0);
struct ieee80211_local *local = sdata->local;
struct ieee80211_key *old_key;
int idx = key->conf.keyidx;
@@ -688,6 +689,12 @@ int ieee80211_key_link(struct ieee80211_key *key,
key->sdata = sdata;
key->sta = sta;
+ /*
+ * Assign a unique ID to every key so we can easily prevent mixed
+ * key and fragment cache attacks.
+ */
+ key->color = atomic_inc_return(&key_color);
+
increment_tailroom_need_count(sdata);
ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index ebdb80b85dc3..d8e187bcb751 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -127,6 +127,8 @@ struct ieee80211_key {
} debugfs;
#endif
+ unsigned int color;
+
/*
* key config, must be last because it contains key
* material as variable length member
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 68db2a356443..e8c4e9c0c5a0 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -931,8 +931,19 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
continue;
if (!dflt_chandef.chan) {
+ /*
+ * Assign the first enabled channel to dflt_chandef
+ * from the list of channels
+ */
+ for (i = 0; i < sband->n_channels; i++)
+ if (!(sband->channels[i].flags &
+ IEEE80211_CHAN_DISABLED))
+ break;
+ /* if none found then use the first anyway */
+ if (i == sband->n_channels)
+ i = 0;
cfg80211_chandef_create(&dflt_chandef,
- &sband->channels[0],
+ &sband->channels[i],
NL80211_CHAN_NO_HT);
/* init channel we're on */
if (!local->use_chanctx && !local->_oper_chandef.chan) {
@@ -1069,8 +1080,11 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (local->hw.wiphy->max_scan_ie_len)
local->hw.wiphy->max_scan_ie_len -= local->scan_ies_len;
- WARN_ON(!ieee80211_cs_list_valid(local->hw.cipher_schemes,
- local->hw.n_cipher_schemes));
+ if (WARN_ON(!ieee80211_cs_list_valid(local->hw.cipher_schemes,
+ local->hw.n_cipher_schemes))) {
+ result = -EINVAL;
+ goto fail_workqueue;
+ }
result = ieee80211_init_cipher_suites(local);
if (result < 0)
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 433d136282de..18158855d98c 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -355,7 +355,7 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
*/
tx_time = (device_constant + 10 * test_frame_len / rate);
estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err));
- result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT);
+ result = ((u64)tx_time * estimated_retx) >> (2 * ARITH_SHIFT);
return (u32)result;
}
@@ -1088,7 +1088,14 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr, ifmsh->sn,
target_flags, mpath->dst, mpath->sn, da, 0,
ttl, lifetime, 0, ifmsh->preq_id++, sdata);
+
+ spin_lock_bh(&mpath->state_lock);
+ if (mpath->flags & MESH_PATH_DELETED) {
+ spin_unlock_bh(&mpath->state_lock);
+ goto enddiscovery;
+ }
mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
+ spin_unlock_bh(&mpath->state_lock);
enddiscovery:
rcu_read_unlock();
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index ac1f5db52994..6dc5f93b1e4d 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -63,6 +63,7 @@ static struct mesh_table *mesh_table_alloc(void)
atomic_set(&newtbl->entries, 0);
spin_lock_init(&newtbl->gates_lock);
spin_lock_init(&newtbl->walk_lock);
+ rhashtable_init(&newtbl->rhead, &mesh_rht_params);
return newtbl;
}
@@ -532,6 +533,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl,
del_timer_sync(&mpath->timer);
atomic_dec(&sdata->u.mesh.mpaths);
atomic_dec(&tbl->entries);
+ mesh_path_flush_pending(mpath);
kfree_rcu(mpath, rcu);
}
@@ -785,9 +787,6 @@ int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
goto free_path;
}
- rhashtable_init(&tbl_path->rhead, &mesh_rht_params);
- rhashtable_init(&tbl_mpp->rhead, &mesh_rht_params);
-
sdata->u.mesh.mesh_paths = tbl_path;
sdata->u.mesh.mpp_paths = tbl_mpp;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index b0667467337d..cbcb60face2c 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1185,6 +1185,11 @@ static void ieee80211_chswitch_post_beacon(struct ieee80211_sub_if_data *sdata)
sdata->vif.csa_active = false;
ifmgd->csa_waiting_bcn = false;
+ /*
+ * If the CSA IE is still present on the beacon after the switch,
+ * we need to consider it as a new CSA (possibly to self).
+ */
+ ifmgd->beacon_crc_valid = false;
ret = drv_post_channel_switch(sdata);
if (ret) {
@@ -2384,7 +2389,7 @@ void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
if (!ieee80211_is_data(hdr->frame_control))
return;
- if (ieee80211_is_nullfunc(hdr->frame_control) &&
+ if (ieee80211_is_any_nullfunc(hdr->frame_control) &&
sdata->u.mgd.probe_send_count > 0) {
if (ack)
ieee80211_sta_reset_conn_monitor(sdata);
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 76f303fda3ed..954b932fd7b8 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -941,7 +941,8 @@ int rate_control_set_rates(struct ieee80211_hw *hw,
if (old)
kfree_rcu(old, rcu_head);
- drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta);
+ if (sta->uploaded)
+ drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta);
ieee80211_sta_set_expected_throughput(pubsta, sta_get_expected_throughput(sta));
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 07fb219327d6..1f3c420c01c7 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -276,7 +276,7 @@ minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband,
success = !!(info->flags & IEEE80211_TX_STAT_ACK);
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
- if (ar[i].idx < 0)
+ if (ar[i].idx < 0 || !ar[i].count)
break;
ndx = rix_to_ndx(mi, ar[i].idx);
@@ -289,12 +289,6 @@ minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband,
mi->r[ndx].stats.success += success;
}
- if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && (i >= 0))
- mi->sample_packets++;
-
- if (mi->sample_deferred > 0)
- mi->sample_deferred--;
-
if (time_after(jiffies, mi->last_stats_update +
(mp->update_interval * HZ) / 1000))
minstrel_update_stats(mp, mi);
@@ -373,7 +367,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
return;
delta = (mi->total_packets * sampling_ratio / 100) -
- (mi->sample_packets + mi->sample_deferred / 2);
+ mi->sample_packets;
/* delta < 0: no sampling required */
prev_sample = mi->prev_sample;
@@ -382,7 +376,6 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
return;
if (mi->total_packets >= 10000) {
- mi->sample_deferred = 0;
mi->sample_packets = 0;
mi->total_packets = 0;
} else if (delta > mi->n_rates * 2) {
@@ -407,19 +400,8 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
* rate sampling method should be used.
* Respect such rates that are not sampled for 20 interations.
*/
- if (mrr_capable &&
- msr->perfect_tx_time > mr->perfect_tx_time &&
- msr->stats.sample_skipped < 20) {
- /* Only use IEEE80211_TX_CTL_RATE_CTRL_PROBE to mark
- * packets that have the sampling rate deferred to the
- * second MRR stage. Increase the sample counter only
- * if the deferred sample rate was actually used.
- * Use the sample_deferred counter to make sure that
- * the sampling is not done in large bursts */
- info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
- rate++;
- mi->sample_deferred++;
- } else {
+ if (msr->perfect_tx_time < mr->perfect_tx_time ||
+ msr->stats.sample_skipped >= 20) {
if (!msr->sample_limit)
return;
@@ -439,6 +421,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
rate->idx = mi->r[ndx].rix;
rate->count = minstrel_get_retry_count(&mi->r[ndx], info);
+ info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
}
diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h
index be6c3f35f48b..d60413adb215 100644
--- a/net/mac80211/rc80211_minstrel.h
+++ b/net/mac80211/rc80211_minstrel.h
@@ -98,7 +98,6 @@ struct minstrel_sta_info {
u8 max_prob_rate;
unsigned int total_packets;
unsigned int sample_packets;
- int sample_deferred;
unsigned int sample_row;
unsigned int sample_column;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index c7c456c86b0d..2ba19decb126 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -5,7 +5,7 @@
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -1373,8 +1373,7 @@ ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
return RX_CONTINUE;
if (ieee80211_is_ctl(hdr->frame_control) ||
- ieee80211_is_nullfunc(hdr->frame_control) ||
- ieee80211_is_qos_nullfunc(hdr->frame_control) ||
+ ieee80211_is_any_nullfunc(hdr->frame_control) ||
is_multicast_ether_addr(hdr->addr1))
return RX_CONTINUE;
@@ -1753,8 +1752,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
* Drop (qos-)data::nullfunc frames silently, since they
* are used only to control station power saving mode.
*/
- if (ieee80211_is_nullfunc(hdr->frame_control) ||
- ieee80211_is_qos_nullfunc(hdr->frame_control)) {
+ if (ieee80211_is_any_nullfunc(hdr->frame_control)) {
I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
/*
@@ -2011,19 +2009,34 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
return result;
}
+void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
+ skb_queue_head_init(&cache->entries[i].skb_list);
+}
+
+void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
+ __skb_queue_purge(&cache->entries[i].skb_list);
+}
+
static inline struct ieee80211_fragment_entry *
-ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
+ieee80211_reassemble_add(struct ieee80211_fragment_cache *cache,
unsigned int frag, unsigned int seq, int rx_queue,
struct sk_buff **skb)
{
struct ieee80211_fragment_entry *entry;
- entry = &sdata->fragments[sdata->fragment_next++];
- if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
- sdata->fragment_next = 0;
+ entry = &cache->entries[cache->next++];
+ if (cache->next >= IEEE80211_FRAGMENT_MAX)
+ cache->next = 0;
- if (!skb_queue_empty(&entry->skb_list))
- __skb_queue_purge(&entry->skb_list);
+ __skb_queue_purge(&entry->skb_list);
__skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
*skb = NULL;
@@ -2038,14 +2051,14 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
}
static inline struct ieee80211_fragment_entry *
-ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
+ieee80211_reassemble_find(struct ieee80211_fragment_cache *cache,
unsigned int frag, unsigned int seq,
int rx_queue, struct ieee80211_hdr *hdr)
{
struct ieee80211_fragment_entry *entry;
int i, idx;
- idx = sdata->fragment_next;
+ idx = cache->next;
for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
struct ieee80211_hdr *f_hdr;
@@ -2053,7 +2066,7 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
if (idx < 0)
idx = IEEE80211_FRAGMENT_MAX - 1;
- entry = &sdata->fragments[idx];
+ entry = &cache->entries[idx];
if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
entry->rx_queue != rx_queue ||
entry->last_frag + 1 != frag)
@@ -2080,15 +2093,27 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
return NULL;
}
+static bool requires_sequential_pn(struct ieee80211_rx_data *rx, __le16 fc)
+{
+ return rx->key &&
+ (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
+ rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
+ rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
+ rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
+ ieee80211_has_protected(fc);
+}
+
static ieee80211_rx_result debug_noinline
ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
{
+ struct ieee80211_fragment_cache *cache = &rx->sdata->frags;
struct ieee80211_hdr *hdr;
u16 sc;
__le16 fc;
unsigned int frag, seq;
struct ieee80211_fragment_entry *entry;
struct sk_buff *skb;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
hdr = (struct ieee80211_hdr *)rx->skb->data;
fc = hdr->frame_control;
@@ -2104,6 +2129,9 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
goto out_no_led;
}
+ if (rx->sta)
+ cache = &rx->sta->frags;
+
if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
goto out;
@@ -2122,20 +2150,17 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
if (frag == 0) {
/* This is the first fragment of a new frame. */
- entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
+ entry = ieee80211_reassemble_add(cache, frag, seq,
rx->seqno_idx, &(rx->skb));
- if (rx->key &&
- (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
- rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
- rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
- rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
- ieee80211_has_protected(fc)) {
+ if (requires_sequential_pn(rx, fc)) {
int queue = rx->security_idx;
/* Store CCMP/GCMP PN so that we can verify that the
* next fragment has a sequential PN value.
*/
entry->check_sequential_pn = true;
+ entry->is_protected = true;
+ entry->key_color = rx->key->color;
memcpy(entry->last_pn,
rx->key->u.ccmp.rx_pn[queue],
IEEE80211_CCMP_PN_LEN);
@@ -2147,6 +2172,11 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
sizeof(rx->key->u.gcmp.rx_pn[queue]));
BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN !=
IEEE80211_GCMP_PN_LEN);
+ } else if (rx->key &&
+ (ieee80211_has_protected(fc) ||
+ (status->flag & RX_FLAG_DECRYPTED))) {
+ entry->is_protected = true;
+ entry->key_color = rx->key->color;
}
return RX_QUEUED;
}
@@ -2154,7 +2184,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
/* This is a fragment for a frame that should already be pending in
* fragment cache. Add this fragment to the end of the pending entry.
*/
- entry = ieee80211_reassemble_find(rx->sdata, frag, seq,
+ entry = ieee80211_reassemble_find(cache, frag, seq,
rx->seqno_idx, hdr);
if (!entry) {
I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
@@ -2169,25 +2199,39 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
if (entry->check_sequential_pn) {
int i;
u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
- int queue;
- if (!rx->key ||
- (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP &&
- rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 &&
- rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP &&
- rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256))
+ if (!requires_sequential_pn(rx, fc))
+ return RX_DROP_UNUSABLE;
+
+ /* Prevent mixed key and fragment cache attacks */
+ if (entry->key_color != rx->key->color)
return RX_DROP_UNUSABLE;
+
memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
pn[i]++;
if (pn[i])
break;
}
- queue = rx->security_idx;
- rpn = rx->key->u.ccmp.rx_pn[queue];
+
+ rpn = rx->ccm_gcm.pn;
if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN))
return RX_DROP_UNUSABLE;
memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN);
+ } else if (entry->is_protected &&
+ (!rx->key ||
+ (!ieee80211_has_protected(fc) &&
+ !(status->flag & RX_FLAG_DECRYPTED)) ||
+ rx->key->color != entry->key_color)) {
+ /* Drop this as a mixed key or fragment cache attack, even
+ * if for TKIP Michael MIC should protect us, and WEP is a
+ * lost cause anyway.
+ */
+ return RX_DROP_UNUSABLE;
+ } else if (entry->is_protected && rx->key &&
+ entry->key_color != rx->key->color &&
+ (status->flag & RX_FLAG_DECRYPTED)) {
+ return RX_DROP_UNUSABLE;
}
skb_pull(rx->skb, ieee80211_hdrlen(fc));
@@ -2232,6 +2276,7 @@ static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
{
+ struct ieee80211_hdr *hdr = (void *)rx->skb->data;
struct sk_buff *skb = rx->skb;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
@@ -2242,9 +2287,34 @@ static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
if (status->flag & RX_FLAG_DECRYPTED)
return 0;
+ /* check mesh EAPOL frames first */
+ if (unlikely(rx->sta && ieee80211_vif_is_mesh(&rx->sdata->vif) &&
+ ieee80211_is_data(fc))) {
+ struct ieee80211s_hdr *mesh_hdr;
+ u16 hdr_len = ieee80211_hdrlen(fc);
+ u16 ethertype_offset;
+ __be16 ethertype;
+
+ if (!ether_addr_equal(hdr->addr1, rx->sdata->vif.addr))
+ goto drop_check;
+
+ /* make sure fixed part of mesh header is there, also checks skb len */
+ if (!pskb_may_pull(rx->skb, hdr_len + 6))
+ goto drop_check;
+
+ mesh_hdr = (struct ieee80211s_hdr *)(skb->data + hdr_len);
+ ethertype_offset = hdr_len + ieee80211_get_mesh_hdrlen(mesh_hdr) +
+ sizeof(rfc1042_header);
+
+ if (skb_copy_bits(rx->skb, ethertype_offset, &ethertype, 2) == 0 &&
+ ethertype == rx->sdata->control_port_protocol)
+ return 0;
+ }
+
+drop_check:
/* Drop unencrypted frames if key is set. */
if (unlikely(!ieee80211_has_protected(fc) &&
- !ieee80211_is_nullfunc(fc) &&
+ !ieee80211_is_any_nullfunc(fc) &&
ieee80211_is_data(fc) && rx->key))
return -EACCES;
@@ -2347,13 +2417,13 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
/*
- * Allow EAPOL frames to us/the PAE group address regardless
- * of whether the frame was encrypted or not.
+ * Allow EAPOL frames to us/the PAE group address regardless of
+ * whether the frame was encrypted or not, and always disallow
+ * all other destination addresses for them.
*/
- if (ehdr->h_proto == rx->sdata->control_port_protocol &&
- (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
- ether_addr_equal(ehdr->h_dest, pae_group_addr)))
- return true;
+ if (unlikely(ehdr->h_proto == rx->sdata->control_port_protocol))
+ return ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
+ ether_addr_equal(ehdr->h_dest, pae_group_addr);
if (ieee80211_802_1x_port_control(rx) ||
ieee80211_drop_unencrypted(rx, fc))
@@ -2377,8 +2447,28 @@ static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
cfg80211_rx_control_port(dev, skb, noencrypt);
dev_kfree_skb(skb);
} else {
+ struct ethhdr *ehdr = (void *)skb_mac_header(skb);
+
memset(skb->cb, 0, sizeof(skb->cb));
+ /*
+ * 802.1X over 802.11 requires that the authenticator address
+ * be used for EAPOL frames. However, 802.1X allows the use of
+ * the PAE group address instead. If the interface is part of
+ * a bridge and we pass the frame with the PAE group address,
+ * then the bridge will forward it to the network (even if the
+ * client was not associated yet), which isn't supposed to
+ * happen.
+ * To avoid that, rewrite the destination address to our own
+ * address, so that the authenticator (e.g. hostapd) will see
+ * the frame, but bridge won't forward it anywhere else. Note
+ * that due to earlier filtering, the only other address can
+ * be the PAE group address.
+ */
+ if (unlikely(skb->protocol == sdata->control_port_protocol &&
+ !ether_addr_equal(ehdr->h_dest, sdata->vif.addr)))
+ ether_addr_copy(ehdr->h_dest, sdata->vif.addr);
+
/* deliver to local stack */
if (rx->napi)
napi_gro_receive(rx->napi, skb);
@@ -2418,6 +2508,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
if ((sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
!(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
+ ehdr->h_proto != rx->sdata->control_port_protocol &&
(sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
if (is_multicast_ether_addr(ehdr->h_dest) &&
ieee80211_vif_get_num_mcast_if(sdata) != 0) {
@@ -2526,7 +2617,7 @@ __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
if (ieee80211_data_to_8023_exthdr(skb, &ethhdr,
rx->sdata->vif.addr,
rx->sdata->vif.type,
- data_offset))
+ data_offset, true))
return RX_DROP_UNUSABLE;
ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
@@ -2583,6 +2674,23 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
if (is_multicast_ether_addr(hdr->addr1))
return RX_DROP_UNUSABLE;
+ if (rx->key) {
+ /*
+ * We should not receive A-MSDUs on pre-HT connections,
+ * and HT connections cannot use old ciphers. Thus drop
+ * them, as in those cases we couldn't even have SPP
+ * A-MSDUs or such.
+ */
+ switch (rx->key->conf.cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ return RX_DROP_UNUSABLE;
+ default:
+ break;
+ }
+ }
+
return __ieee80211_rx_h_amsdu(rx, 0);
}
@@ -3979,6 +4087,8 @@ void ieee80211_check_fast_rx(struct sta_info *sta)
rcu_read_lock();
key = rcu_dereference(sta->ptk[sta->ptk_idx]);
+ if (!key)
+ key = rcu_dereference(sdata->default_unicast_key);
if (key) {
switch (key->conf.cipher) {
case WLAN_CIPHER_SUITE_TKIP:
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index ec2e83272f9d..71c6a462277f 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -3,7 +3,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -244,6 +244,24 @@ struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata,
*/
void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
{
+ /*
+ * If we had used sta_info_pre_move_state() then we might not
+ * have gone through the state transitions down again, so do
+ * it here now (and warn if it's inserted).
+ *
+ * This will clear state such as fast TX/RX that may have been
+ * allocated during state transitions.
+ */
+ while (sta->sta_state > IEEE80211_STA_NONE) {
+ int ret;
+
+ WARN_ON_ONCE(test_sta_flag(sta, WLAN_STA_INSERTED));
+
+ ret = sta_info_move_state(sta, sta->sta_state - 1);
+ if (WARN_ONCE(ret, "sta_info_move_state() returned %d\n", ret))
+ break;
+ }
+
if (sta->rate_ctrl)
rate_control_free_sta(sta);
@@ -351,6 +369,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
u64_stats_init(&sta->rx_stats.syncp);
+ ieee80211_init_frag_cache(&sta->frags);
+
sta->sta_state = IEEE80211_STA_NONE;
/* Mark TID as unreserved */
@@ -616,7 +636,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
out_drop_sta:
local->num_sta--;
synchronize_net();
- __cleanup_single_sta(sta);
+ cleanup_single_sta(sta);
out_err:
mutex_unlock(&local->sta_mtx);
kfree(sinfo);
@@ -635,19 +655,13 @@ int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
err = sta_info_insert_check(sta);
if (err) {
+ sta_info_free(local, sta);
mutex_unlock(&local->sta_mtx);
rcu_read_lock();
- goto out_free;
+ return err;
}
- err = sta_info_insert_finish(sta);
- if (err)
- goto out_free;
-
- return 0;
- out_free:
- sta_info_free(local, sta);
- return err;
+ return sta_info_insert_finish(sta);
}
int sta_info_insert(struct sta_info *sta)
@@ -979,7 +993,7 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
might_sleep();
lockdep_assert_held(&local->sta_mtx);
- while (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
+ if (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
WARN_ON_ONCE(ret);
}
@@ -1020,6 +1034,8 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
rate_control_remove_sta_debugfs(sta);
ieee80211_sta_debugfs_remove(sta);
+ ieee80211_destroy_frag_cache(&sta->frags);
+
cleanup_single_sta(sta);
}
@@ -2009,6 +2025,10 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate,
int rate_idx = STA_STATS_GET(LEGACY_IDX, rate);
sband = local->hw.wiphy->bands[band];
+
+ if (WARN_ON_ONCE(!sband->bitrates))
+ break;
+
brate = sband->bitrates[rate_idx].bitrate;
if (rinfo->bw == RATE_INFO_BW_5)
shift = 2;
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 9a04327d71d1..c33bc5fc0f2d 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -2,6 +2,7 @@
* Copyright 2002-2005, Devicescape Software, Inc.
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright(c) 2015-2017 Intel Deutschland GmbH
+ * Copyright(c) 2020-2021 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -412,6 +413,34 @@ struct ieee80211_sta_rx_stats {
};
/*
+ * IEEE 802.11-2016 (10.6 "Defragmentation") recommends support for "concurrent
+ * reception of at least one MSDU per access category per associated STA"
+ * on APs, or "at least one MSDU per access category" on other interface types.
+ *
+ * This limit can be increased by changing this define, at the cost of slower
+ * frame reassembly and increased memory use while fragments are pending.
+ */
+#define IEEE80211_FRAGMENT_MAX 4
+
+struct ieee80211_fragment_entry {
+ struct sk_buff_head skb_list;
+ unsigned long first_frag_time;
+ u16 seq;
+ u16 extra_len;
+ u16 last_frag;
+ u8 rx_queue;
+ u8 check_sequential_pn:1, /* needed for CCMP/GCMP */
+ is_protected:1;
+ u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
+ unsigned int key_color;
+};
+
+struct ieee80211_fragment_cache {
+ struct ieee80211_fragment_entry entries[IEEE80211_FRAGMENT_MAX];
+ unsigned int next;
+};
+
+/*
* The bandwidth threshold below which the per-station CoDel parameters will be
* scaled to be more lenient (to prevent starvation of slow stations). This
* value will be scaled by the number of active stations when it is being
@@ -482,6 +511,7 @@ struct ieee80211_sta_rx_stats {
* @pcpu_rx_stats: per-CPU RX statistics, assigned only if the driver needs
* this (by advertising the USES_RSS hw flag)
* @status_stats: TX status statistics
+ * @frags: fragment cache
*/
struct sta_info {
/* General information, mostly static */
@@ -583,6 +613,8 @@ struct sta_info {
struct cfg80211_chan_def tdls_chandef;
+ struct ieee80211_fragment_cache frags;
+
/* keep last! */
struct ieee80211_sta sta;
};
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index f895c656407b..aeb51e385f25 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -487,8 +487,7 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
rcu_read_lock();
sdata = ieee80211_sdata_from_skb(local, skb);
if (sdata) {
- if (ieee80211_is_nullfunc(hdr->frame_control) ||
- ieee80211_is_qos_nullfunc(hdr->frame_control))
+ if (ieee80211_is_any_nullfunc(hdr->frame_control))
cfg80211_probe_status(sdata->dev, hdr->addr1,
cookie, acked,
info->status.ack_signal,
@@ -867,7 +866,7 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
I802_DEBUG_INC(local->dot11FailedCount);
}
- if ((ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
+ if (ieee80211_is_any_nullfunc(fc) &&
ieee80211_has_pm(fc) &&
ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS) &&
!(info->flags & IEEE80211_TX_CTL_INJECTED) &&
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 84639363173b..98d048630ad2 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -300,7 +300,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) &&
test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) &&
!ieee80211_is_probe_req(hdr->frame_control) &&
- !ieee80211_is_nullfunc(hdr->frame_control))
+ !ieee80211_is_any_nullfunc(hdr->frame_control))
/*
* When software scanning only nullfunc frames (to notify
* the sleep state to the AP) and probe requests (for the
@@ -1908,19 +1908,24 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
/* device xmit handlers */
+enum ieee80211_encrypt {
+ ENCRYPT_NO,
+ ENCRYPT_MGMT,
+ ENCRYPT_DATA,
+};
+
static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb,
- int head_need, bool may_encrypt)
+ int head_need,
+ enum ieee80211_encrypt encrypt)
{
struct ieee80211_local *local = sdata->local;
- struct ieee80211_hdr *hdr;
bool enc_tailroom;
int tail_need = 0;
- hdr = (struct ieee80211_hdr *) skb->data;
- enc_tailroom = may_encrypt &&
- (sdata->crypto_tx_tailroom_needed_cnt ||
- ieee80211_is_mgmt(hdr->frame_control));
+ enc_tailroom = encrypt == ENCRYPT_MGMT ||
+ (encrypt == ENCRYPT_DATA &&
+ sdata->crypto_tx_tailroom_needed_cnt);
if (enc_tailroom) {
tail_need = IEEE80211_ENCRYPT_TAILROOM;
@@ -1952,23 +1957,29 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
int headroom;
- bool may_encrypt;
+ enum ieee80211_encrypt encrypt;
- may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT);
+ if (info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)
+ encrypt = ENCRYPT_NO;
+ else if (ieee80211_is_mgmt(hdr->frame_control))
+ encrypt = ENCRYPT_MGMT;
+ else
+ encrypt = ENCRYPT_DATA;
headroom = local->tx_headroom;
- if (may_encrypt)
+ if (encrypt != ENCRYPT_NO)
headroom += sdata->encrypt_headroom;
headroom -= skb_headroom(skb);
headroom = max_t(int, 0, headroom);
- if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) {
+ if (ieee80211_skb_resize(sdata, skb, headroom, encrypt)) {
ieee80211_free_txskb(&local->hw, skb);
return;
}
+ /* reload after potential resize */
hdr = (struct ieee80211_hdr *) skb->data;
info->control.vif = &sdata->vif;
@@ -2751,7 +2762,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
head_need += sdata->encrypt_headroom;
head_need += local->tx_headroom;
head_need = max_t(int, 0, head_need);
- if (ieee80211_skb_resize(sdata, skb, head_need, true)) {
+ if (ieee80211_skb_resize(sdata, skb, head_need, ENCRYPT_DATA)) {
ieee80211_free_txskb(&local->hw, skb);
skb = NULL;
return ERR_PTR(-ENOMEM);
@@ -3414,7 +3425,7 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
if (unlikely(ieee80211_skb_resize(sdata, skb,
max_t(int, extra_head + hw_headroom -
skb_headroom(skb), 0),
- false))) {
+ ENCRYPT_NO))) {
kfree_skb(skb);
return true;
}
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index 259325cbcc31..d691c2f2e92e 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -170,10 +170,7 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
/* take some capabilities as-is */
cap_info = le32_to_cpu(vht_cap_ie->vht_cap_info);
vht_cap->cap = cap_info;
- vht_cap->cap &= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 |
- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
- IEEE80211_VHT_CAP_RXLDPC |
+ vht_cap->cap &= IEEE80211_VHT_CAP_RXLDPC |
IEEE80211_VHT_CAP_VHT_TXOP_PS |
IEEE80211_VHT_CAP_HTC_VHT |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
@@ -182,6 +179,9 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
+ vht_cap->cap |= min_t(u32, cap_info & IEEE80211_VHT_CAP_MAX_MPDU_MASK,
+ own_cap.cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK);
+
/* and some based on our own capabilities */
switch (own_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
@@ -421,12 +421,18 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta)
* IEEE80211-2016 specification makes higher bandwidth operation
* possible on the TDLS link if the peers have wider bandwidth
* capability.
+ *
+ * However, in this case, and only if the TDLS peer is authorized,
+ * limit to the tdls_chandef so that the configuration here isn't
+ * wider than what's actually requested on the channel context.
*/
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
- test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW))
- return bw;
-
- bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width));
+ test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW) &&
+ test_sta_flag(sta, WLAN_STA_AUTHORIZED) &&
+ sta->tdls_chandef.chan)
+ bw = min(bw, ieee80211_chan_width_to_rx_bw(sta->tdls_chandef.width));
+ else
+ bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width));
return bw;
}
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 5dd48f0a4b1b..dbeccccf0fdf 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -2,6 +2,7 @@
* Copyright 2002-2004, Instant802 Networks, Inc.
* Copyright 2008, Jouni Malinen <j@w1.fi>
* Copyright (C) 2016-2017 Intel Deutschland GmbH
+ * Copyright (C) 2020-2021 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -170,8 +171,8 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
update_iv:
/* update IV in key information to be able to detect replays */
- rx->key->u.tkip.rx[rx->security_idx].iv32 = rx->tkip_iv32;
- rx->key->u.tkip.rx[rx->security_idx].iv16 = rx->tkip_iv16;
+ rx->key->u.tkip.rx[rx->security_idx].iv32 = rx->tkip.iv32;
+ rx->key->u.tkip.rx[rx->security_idx].iv16 = rx->tkip.iv16;
return RX_CONTINUE;
@@ -297,8 +298,8 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
key, skb->data + hdrlen,
skb->len - hdrlen, rx->sta->sta.addr,
hdr->addr1, hwaccel, rx->security_idx,
- &rx->tkip_iv32,
- &rx->tkip_iv16);
+ &rx->tkip.iv32,
+ &rx->tkip.iv16);
if (res != TKIP_DECRYPT_OK)
return RX_DROP_UNUSABLE;
@@ -556,6 +557,8 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
}
memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN);
+ if (unlikely(ieee80211_is_frag(hdr)))
+ memcpy(rx->ccm_gcm.pn, pn, IEEE80211_CCMP_PN_LEN);
}
/* Remove CCMP header and MIC */
@@ -784,6 +787,8 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
}
memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN);
+ if (unlikely(ieee80211_is_frag(hdr)))
+ memcpy(rx->ccm_gcm.pn, pn, IEEE80211_CCMP_PN_LEN);
}
/* Remove GCMP header and MIC */
diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c
index 2fb703d70803..d742e635ad07 100644
--- a/net/mac802154/llsec.c
+++ b/net/mac802154/llsec.c
@@ -160,7 +160,7 @@ err_tfm0:
crypto_free_skcipher(key->tfm0);
err_tfm:
for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
- if (key->tfm[i])
+ if (!IS_ERR_OR_NULL(key->tfm[i]))
crypto_free_aead(key->tfm[i]);
kzfree(key);
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
index bcd1a5e6ebf4..2f873a0dc583 100644
--- a/net/mac802154/tx.c
+++ b/net/mac802154/tx.c
@@ -42,11 +42,11 @@ void ieee802154_xmit_worker(struct work_struct *work)
if (res)
goto err_tx;
- ieee802154_xmit_complete(&local->hw, skb, false);
-
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
+ ieee802154_xmit_complete(&local->hw, skb, false);
+
return;
err_tx:
@@ -86,6 +86,8 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
/* async is priority, otherwise sync is fallback */
if (local->ops->xmit_async) {
+ unsigned int len = skb->len;
+
ret = drv_xmit_async(local, skb);
if (ret) {
ieee802154_wake_queue(&local->hw);
@@ -93,7 +95,7 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
}
dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
+ dev->stats.tx_bytes += len;
} else {
local->tx_skb = skb;
queue_work(local->workqueue, &local->tx_work);
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index d5a4db5b3fe7..7623d9aec636 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -618,16 +618,15 @@ static struct net_device *inet6_fib_lookup_dev(struct net *net,
struct net_device *dev;
struct dst_entry *dst;
struct flowi6 fl6;
- int err;
if (!ipv6_stub)
return ERR_PTR(-EAFNOSUPPORT);
memset(&fl6, 0, sizeof(fl6));
memcpy(&fl6.daddr, addr, sizeof(struct in6_addr));
- err = ipv6_stub->ipv6_dst_lookup(net, NULL, &dst, &fl6);
- if (err)
- return ERR_PTR(err);
+ dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &fl6, NULL);
+ if (IS_ERR(dst))
+ return ERR_CAST(dst);
dev = dst->dev;
dev_hold(dev);
diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c
index b4da6d8e8632..2129856b5933 100644
--- a/net/mpls/mpls_gso.c
+++ b/net/mpls/mpls_gso.c
@@ -18,6 +18,7 @@
#include <linux/netdev_features.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
+#include <net/mpls.h>
static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
netdev_features_t features)
@@ -31,6 +32,8 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
skb_reset_network_header(skb);
mpls_hlen = skb_inner_network_header(skb) - skb_network_header(skb);
+ if (unlikely(!mpls_hlen || mpls_hlen % MPLS_HLEN))
+ goto out;
if (unlikely(!pskb_may_pull(skb, mpls_hlen)))
goto out;
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index 091284760d21..9fd20fa90000 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -84,13 +84,20 @@ static void ncsi_channel_monitor(struct timer_list *t)
monitor_state = nc->monitor.state;
spin_unlock_irqrestore(&nc->lock, flags);
- if (!enabled || chained) {
- ncsi_stop_channel_monitor(nc);
- return;
- }
+ if (!enabled)
+ return; /* expected race disabling timer */
+ if (WARN_ON_ONCE(chained))
+ goto bad_state;
+
if (state != NCSI_CHANNEL_INACTIVE &&
state != NCSI_CHANNEL_ACTIVE) {
- ncsi_stop_channel_monitor(nc);
+bad_state:
+ netdev_warn(ndp->ndev.dev,
+ "Bad NCSI monitor state channel %d 0x%x %s queue\n",
+ nc->id, state, chained ? "on" : "off");
+ spin_lock_irqsave(&nc->lock, flags);
+ nc->monitor.enabled = false;
+ spin_unlock_irqrestore(&nc->lock, flags);
return;
}
@@ -117,10 +124,9 @@ static void ncsi_channel_monitor(struct timer_list *t)
ndp->flags |= NCSI_DEV_RESHUFFLE;
}
- ncsi_stop_channel_monitor(nc);
-
ncm = &nc->modes[NCSI_MODE_LINK];
spin_lock_irqsave(&nc->lock, flags);
+ nc->monitor.enabled = false;
nc->state = NCSI_CHANNEL_INVISIBLE;
ncm->data[2] &= ~0x1;
spin_unlock_irqrestore(&nc->lock, flags);
@@ -1484,9 +1490,6 @@ struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
ndp->ptype.dev = dev;
dev_add_pack(&ndp->ptype);
- /* Set up generic netlink interface */
- ncsi_init_netlink(dev);
-
return nd;
}
EXPORT_SYMBOL_GPL(ncsi_register_dev);
@@ -1566,8 +1569,6 @@ void ncsi_unregister_dev(struct ncsi_dev *nd)
#endif
spin_unlock_irqrestore(&ncsi_dev_lock, flags);
- ncsi_unregister_netlink(nd->dev);
-
kfree(ndp);
}
EXPORT_SYMBOL_GPL(ncsi_unregister_dev);
diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c
index 45f33d6dedf7..a2f4280e2889 100644
--- a/net/ncsi/ncsi-netlink.c
+++ b/net/ncsi/ncsi-netlink.c
@@ -397,24 +397,8 @@ static struct genl_family ncsi_genl_family __ro_after_init = {
.n_ops = ARRAY_SIZE(ncsi_ops),
};
-int ncsi_init_netlink(struct net_device *dev)
+static int __init ncsi_init_netlink(void)
{
- int rc;
-
- rc = genl_register_family(&ncsi_genl_family);
- if (rc)
- netdev_err(dev, "ncsi: failed to register netlink family\n");
-
- return rc;
-}
-
-int ncsi_unregister_netlink(struct net_device *dev)
-{
- int rc;
-
- rc = genl_unregister_family(&ncsi_genl_family);
- if (rc)
- netdev_err(dev, "ncsi: failed to unregister netlink family\n");
-
- return rc;
+ return genl_register_family(&ncsi_genl_family);
}
+subsys_initcall(ncsi_init_netlink);
diff --git a/net/ncsi/ncsi-netlink.h b/net/ncsi/ncsi-netlink.h
index 91a5c256f8c4..6c55a6775157 100644
--- a/net/ncsi/ncsi-netlink.h
+++ b/net/ncsi/ncsi-netlink.h
@@ -14,7 +14,4 @@
#include "internal.h"
-int ncsi_init_netlink(struct net_device *dev);
-int ncsi_unregister_netlink(struct net_device *dev);
-
#endif /* __NCSI_NETLINK_H__ */
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index 930c1d3796f0..a43c9a44f870 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -949,7 +949,7 @@ int ncsi_rcv_rsp(struct sk_buff *skb, struct net_device *dev,
int payload, i, ret;
/* Find the NCSI device */
- nd = ncsi_find_dev(dev);
+ nd = ncsi_find_dev(orig_dev);
ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL;
if (!ndp)
return -ENODEV;
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 21eb53f6d4fe..0427e66bc478 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -385,6 +385,8 @@ ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len,
for (id = 0; id < IPSET_EXT_ID_MAX; id++) {
if (!add_extension(id, cadt_flags, tb))
continue;
+ if (align < ip_set_extensions[id].align)
+ align = ip_set_extensions[id].align;
len = ALIGN(len, ip_set_extensions[id].align);
set->offset[id] = len;
set->extensions |= ip_set_extensions[id].type;
@@ -486,13 +488,14 @@ ip_set_match_extensions(struct ip_set *set, const struct ip_set_ext *ext,
if (SET_WITH_COUNTER(set)) {
struct ip_set_counter *counter = ext_counter(data, set);
+ ip_set_update_counter(counter, ext, flags);
+
if (flags & IPSET_FLAG_MATCH_COUNTERS &&
!(ip_set_match_counter(ip_set_get_packets(counter),
mext->packets, mext->packets_op) &&
ip_set_match_counter(ip_set_get_bytes(counter),
mext->bytes, mext->bytes_op)))
return false;
- ip_set_update_counter(counter, ext, flags);
}
if (SET_WITH_SKBINFO(set))
ip_set_get_skbinfo(ext_skbinfo(data, set),
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index ddfe06d7530b..154b40f2f2a8 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -115,20 +115,6 @@ htable_size(u8 hbits)
return hsize * sizeof(struct hbucket *) + sizeof(struct htable);
}
-/* Compute htable_bits from the user input parameter hashsize */
-static u8
-htable_bits(u32 hashsize)
-{
- /* Assume that hashsize == 2^htable_bits */
- u8 bits = fls(hashsize - 1);
-
- if (jhash_size(bits) != hashsize)
- /* Round up to the first 2^n value */
- bits = fls(hashsize);
-
- return bits;
-}
-
#ifdef IP_SET_HASH_WITH_NETS
#if IPSET_NET_COUNT > 1
#define __CIDR(cidr, i) (cidr[i])
@@ -1287,7 +1273,11 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
if (!h)
return -ENOMEM;
- hbits = htable_bits(hashsize);
+ /* Compute htable_bits from the user input parameter hashsize.
+ * Assume that hashsize == 2^htable_bits,
+ * otherwise round up to the first 2^n value.
+ */
+ hbits = fls(hashsize - 1);
hsize = htable_size(hbits);
if (hsize == 0) {
kfree(h);
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 8da228da53ae..993ce04e6ea0 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -63,7 +63,7 @@ list_set_ktest(struct ip_set *set, const struct sk_buff *skb,
/* Don't lookup sub-counters at all */
opt->cmdflags &= ~IPSET_FLAG_MATCH_COUNTERS;
if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE)
- opt->cmdflags &= ~IPSET_FLAG_SKIP_COUNTER_UPDATE;
+ opt->cmdflags |= IPSET_FLAG_SKIP_COUNTER_UPDATE;
list_for_each_entry_rcu(e, &map->members, list) {
ret = ip_set_test(e->id, skb, par, opt);
if (ret <= 0)
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index a71f777d1353..acaeeaf81441 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -725,12 +725,12 @@ static int ip_vs_route_me_harder(struct netns_ipvs *ipvs, int af,
struct dst_entry *dst = skb_dst(skb);
if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) &&
- ip6_route_me_harder(ipvs->net, skb) != 0)
+ ip6_route_me_harder(ipvs->net, skb->sk, skb) != 0)
return 1;
} else
#endif
if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
- ip_route_me_harder(ipvs->net, skb, RTN_LOCAL) != 0)
+ ip_route_me_harder(ipvs->net, skb->sk, skb, RTN_LOCAL) != 0)
return 1;
return 0;
@@ -1928,14 +1928,14 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
- bool uses_ct = false, resched = false;
+ bool old_ct = false, resched = false;
if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
unlikely(!atomic_read(&cp->dest->weight))) {
resched = true;
- uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
+ old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
} else if (is_new_conn_expected(cp, conn_reuse_mode)) {
- uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
+ old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
if (!atomic_read(&cp->n_control)) {
resched = true;
} else {
@@ -1943,15 +1943,17 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
* that uses conntrack while it is still
* referenced by controlled connection(s).
*/
- resched = !uses_ct;
+ resched = !old_ct;
}
}
if (resched) {
+ if (!old_ct)
+ cp->flags &= ~IP_VS_CONN_F_NFCT;
if (!atomic_read(&cp->n_control))
ip_vs_conn_expire_now(cp);
__ip_vs_conn_put(cp);
- if (uses_ct)
+ if (old_ct)
return NF_DROP;
cp = NULL;
}
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index c339b5e386b7..3ad1de081e3c 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2393,6 +2393,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
/* Set timeout values for (tcp tcpfin udp) */
ret = ip_vs_set_timeout(ipvs, (struct ip_vs_timeout_user *)arg);
goto out_unlock;
+ } else if (!len) {
+ /* No more commands with len == 0 below */
+ ret = -EINVAL;
+ goto out_unlock;
}
usvc_compat = (struct ip_vs_service_user *)arg;
@@ -2469,9 +2473,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
break;
case IP_VS_SO_SET_DELDEST:
ret = ip_vs_del_dest(svc, &udest);
- break;
- default:
- ret = -EINVAL;
}
out_unlock:
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 5acd99f83166..f6af13c16cf5 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -1717,6 +1717,8 @@ static int sync_thread_backup(void *data)
{
struct ip_vs_sync_thread_data *tinfo = data;
struct netns_ipvs *ipvs = tinfo->ipvs;
+ struct sock *sk = tinfo->sock->sk;
+ struct udp_sock *up = udp_sk(sk);
int len;
pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, "
@@ -1724,12 +1726,14 @@ static int sync_thread_backup(void *data)
ipvs->bcfg.mcast_ifn, ipvs->bcfg.syncid, tinfo->id);
while (!kthread_should_stop()) {
- wait_event_interruptible(*sk_sleep(tinfo->sock->sk),
- !skb_queue_empty(&tinfo->sock->sk->sk_receive_queue)
- || kthread_should_stop());
+ wait_event_interruptible(*sk_sleep(sk),
+ !skb_queue_empty_lockless(&sk->sk_receive_queue) ||
+ !skb_queue_empty_lockless(&up->reader_queue) ||
+ kthread_should_stop());
/* do we have data now? */
- while (!skb_queue_empty(&(tinfo->sock->sk->sk_receive_queue))) {
+ while (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
+ !skb_queue_empty_lockless(&up->reader_queue)) {
len = ip_vs_receive(tinfo->sock, tinfo->buf,
ipvs->bcfg.sync_maxlen);
if (len <= 0) {
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 3f75cd947045..11f7c546e57b 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -586,6 +586,8 @@ static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
if (ret == NF_ACCEPT) {
nf_reset(skb);
skb_forward_csum(skb);
+ if (skb->dev)
+ skb->tstamp = 0;
}
return ret;
}
@@ -626,6 +628,8 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
if (!local) {
skb_forward_csum(skb);
+ if (skb->dev)
+ skb->tstamp = 0;
NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb,
NULL, skb_dst(skb)->dev, dst_output);
} else
@@ -646,6 +650,8 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
if (!local) {
ip_vs_drop_early_demux_sk(skb);
skb_forward_csum(skb);
+ if (skb->dev)
+ skb->tstamp = 0;
NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb,
NULL, skb_dst(skb)->dev, dst_output);
} else
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index c6073d17c324..1dceda3c0e75 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1063,7 +1063,8 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
* Let nf_ct_resolve_clash() deal with this later.
*/
if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
- &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
+ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
+ nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL))
continue;
NF_CT_STAT_INC_ATOMIC(net, found);
@@ -1352,9 +1353,9 @@ __nf_conntrack_alloc(struct net *net,
*(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
ct->status = 0;
write_pnet(&ct->ct_net, net);
- memset(&ct->__nfct_init_offset[0], 0,
+ memset(&ct->__nfct_init_offset, 0,
offsetof(struct nf_conn, proto) -
- offsetof(struct nf_conn, __nfct_init_offset[0]));
+ offsetof(struct nf_conn, __nfct_init_offset));
nf_ct_zone_add(ct, zone);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 31fa94064a62..15c9fbcd32f2 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1129,6 +1129,8 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[],
if (!tb[CTA_TUPLE_IP])
return -EINVAL;
+ if (l3num != NFPROTO_IPV4 && l3num != NFPROTO_IPV6)
+ return -EOPNOTSUPP;
tuple->src.l3num = l3num;
err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple);
@@ -2654,6 +2656,7 @@ static int ctnetlink_exp_dump_mask(struct sk_buff *skb,
memset(&m, 0xFF, sizeof(m));
memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3));
m.src.u.all = mask->src.u.all;
+ m.src.l3num = tuple->src.l3num;
m.dst.protonum = tuple->dst.protonum;
nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK | NLA_F_NESTED);
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 11562f2a08bb..203107ce2455 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -71,24 +71,32 @@ EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_expectfn);
#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
/* PptpControlMessageType names */
-const char *const pptp_msg_name[] = {
- "UNKNOWN_MESSAGE",
- "START_SESSION_REQUEST",
- "START_SESSION_REPLY",
- "STOP_SESSION_REQUEST",
- "STOP_SESSION_REPLY",
- "ECHO_REQUEST",
- "ECHO_REPLY",
- "OUT_CALL_REQUEST",
- "OUT_CALL_REPLY",
- "IN_CALL_REQUEST",
- "IN_CALL_REPLY",
- "IN_CALL_CONNECT",
- "CALL_CLEAR_REQUEST",
- "CALL_DISCONNECT_NOTIFY",
- "WAN_ERROR_NOTIFY",
- "SET_LINK_INFO"
+static const char *const pptp_msg_name_array[PPTP_MSG_MAX + 1] = {
+ [0] = "UNKNOWN_MESSAGE",
+ [PPTP_START_SESSION_REQUEST] = "START_SESSION_REQUEST",
+ [PPTP_START_SESSION_REPLY] = "START_SESSION_REPLY",
+ [PPTP_STOP_SESSION_REQUEST] = "STOP_SESSION_REQUEST",
+ [PPTP_STOP_SESSION_REPLY] = "STOP_SESSION_REPLY",
+ [PPTP_ECHO_REQUEST] = "ECHO_REQUEST",
+ [PPTP_ECHO_REPLY] = "ECHO_REPLY",
+ [PPTP_OUT_CALL_REQUEST] = "OUT_CALL_REQUEST",
+ [PPTP_OUT_CALL_REPLY] = "OUT_CALL_REPLY",
+ [PPTP_IN_CALL_REQUEST] = "IN_CALL_REQUEST",
+ [PPTP_IN_CALL_REPLY] = "IN_CALL_REPLY",
+ [PPTP_IN_CALL_CONNECT] = "IN_CALL_CONNECT",
+ [PPTP_CALL_CLEAR_REQUEST] = "CALL_CLEAR_REQUEST",
+ [PPTP_CALL_DISCONNECT_NOTIFY] = "CALL_DISCONNECT_NOTIFY",
+ [PPTP_WAN_ERROR_NOTIFY] = "WAN_ERROR_NOTIFY",
+ [PPTP_SET_LINK_INFO] = "SET_LINK_INFO"
};
+
+const char *pptp_msg_name(u_int16_t msg)
+{
+ if (msg > PPTP_MSG_MAX)
+ return pptp_msg_name_array[0];
+
+ return pptp_msg_name_array[msg];
+}
EXPORT_SYMBOL(pptp_msg_name);
#endif
@@ -275,7 +283,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
typeof(nf_nat_pptp_hook_inbound) nf_nat_pptp_inbound;
msg = ntohs(ctlh->messageType);
- pr_debug("inbound control message %s\n", pptp_msg_name[msg]);
+ pr_debug("inbound control message %s\n", pptp_msg_name(msg));
switch (msg) {
case PPTP_START_SESSION_REPLY:
@@ -310,7 +318,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
pcid = pptpReq->ocack.peersCallID;
if (info->pns_call_id != pcid)
goto invalid;
- pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name[msg],
+ pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name(msg),
ntohs(cid), ntohs(pcid));
if (pptpReq->ocack.resultCode == PPTP_OUTCALL_CONNECT) {
@@ -327,7 +335,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
goto invalid;
cid = pptpReq->icreq.callID;
- pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
+ pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid));
info->cstate = PPTP_CALL_IN_REQ;
info->pac_call_id = cid;
break;
@@ -346,7 +354,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
if (info->pns_call_id != pcid)
goto invalid;
- pr_debug("%s, PCID=%X\n", pptp_msg_name[msg], ntohs(pcid));
+ pr_debug("%s, PCID=%X\n", pptp_msg_name(msg), ntohs(pcid));
info->cstate = PPTP_CALL_IN_CONF;
/* we expect a GRE connection from PAC to PNS */
@@ -356,7 +364,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
case PPTP_CALL_DISCONNECT_NOTIFY:
/* server confirms disconnect */
cid = pptpReq->disc.callID;
- pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
+ pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid));
info->cstate = PPTP_CALL_NONE;
/* untrack this call id, unexpect GRE packets */
@@ -383,7 +391,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
invalid:
pr_debug("invalid %s: type=%d cid=%u pcid=%u "
"cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n",
- msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0],
+ pptp_msg_name(msg),
msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate,
ntohs(info->pns_call_id), ntohs(info->pac_call_id));
return NF_ACCEPT;
@@ -403,7 +411,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff,
typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound;
msg = ntohs(ctlh->messageType);
- pr_debug("outbound control message %s\n", pptp_msg_name[msg]);
+ pr_debug("outbound control message %s\n", pptp_msg_name(msg));
switch (msg) {
case PPTP_START_SESSION_REQUEST:
@@ -425,7 +433,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff,
info->cstate = PPTP_CALL_OUT_REQ;
/* track PNS call id */
cid = pptpReq->ocreq.callID;
- pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
+ pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid));
info->pns_call_id = cid;
break;
@@ -439,7 +447,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff,
pcid = pptpReq->icack.peersCallID;
if (info->pac_call_id != pcid)
goto invalid;
- pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name[msg],
+ pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name(msg),
ntohs(cid), ntohs(pcid));
if (pptpReq->icack.resultCode == PPTP_INCALL_ACCEPT) {
@@ -479,7 +487,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff,
invalid:
pr_debug("invalid %s: type=%d cid=%u pcid=%u "
"cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n",
- msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0],
+ pptp_msg_name(msg),
msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate,
ntohs(info->pns_call_id), ntohs(info->pac_call_id));
return NF_ACCEPT;
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 7d7e30ea0ecf..a937d4f75613 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -65,6 +65,8 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
[SCTP_CONNTRACK_HEARTBEAT_ACKED] = 210 SECS,
};
+#define SCTP_FLAG_HEARTBEAT_VTAG_FAILED 1
+
#define sNO SCTP_CONNTRACK_NONE
#define sCL SCTP_CONNTRACK_CLOSED
#define sCW SCTP_CONNTRACK_COOKIE_WAIT
@@ -288,6 +290,7 @@ static int sctp_packet(struct nf_conn *ct,
u_int32_t offset, count;
unsigned int *timeouts;
unsigned long map[256 / sizeof(unsigned long)] = { 0 };
+ bool ignore = false;
sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph);
if (sh == NULL)
@@ -332,15 +335,39 @@ static int sctp_packet(struct nf_conn *ct,
/* Sec 8.5.1 (D) */
if (sh->vtag != ct->proto.sctp.vtag[dir])
goto out_unlock;
- } else if (sch->type == SCTP_CID_HEARTBEAT ||
- sch->type == SCTP_CID_HEARTBEAT_ACK) {
+ } else if (sch->type == SCTP_CID_HEARTBEAT) {
+ if (ct->proto.sctp.vtag[dir] == 0) {
+ pr_debug("Setting %d vtag %x for dir %d\n", sch->type, sh->vtag, dir);
+ ct->proto.sctp.vtag[dir] = sh->vtag;
+ } else if (sh->vtag != ct->proto.sctp.vtag[dir]) {
+ if (test_bit(SCTP_CID_DATA, map) || ignore)
+ goto out_unlock;
+
+ ct->proto.sctp.flags |= SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
+ ct->proto.sctp.last_dir = dir;
+ ignore = true;
+ continue;
+ } else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) {
+ ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
+ }
+ } else if (sch->type == SCTP_CID_HEARTBEAT_ACK) {
if (ct->proto.sctp.vtag[dir] == 0) {
pr_debug("Setting vtag %x for dir %d\n",
sh->vtag, dir);
ct->proto.sctp.vtag[dir] = sh->vtag;
} else if (sh->vtag != ct->proto.sctp.vtag[dir]) {
- pr_debug("Verification tag check failed\n");
- goto out_unlock;
+ if (test_bit(SCTP_CID_DATA, map) || ignore)
+ goto out_unlock;
+
+ if ((ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) == 0 ||
+ ct->proto.sctp.last_dir == dir)
+ goto out_unlock;
+
+ ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
+ ct->proto.sctp.vtag[dir] = sh->vtag;
+ ct->proto.sctp.vtag[!dir] = 0;
+ } else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) {
+ ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
}
}
@@ -375,6 +402,10 @@ static int sctp_packet(struct nf_conn *ct,
}
spin_unlock_bh(&ct->lock);
+ /* allow but do not refresh timeout */
+ if (ignore)
+ return NF_ACCEPT;
+
timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts)
timeouts = sctp_pernet(nf_ct_net(ct))->timeouts;
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 7011ab27c437..40f8a1252394 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -549,13 +549,20 @@ static bool tcp_in_window(const struct nf_conn *ct,
swin = win << sender->td_scale;
sender->td_maxwin = (swin == 0 ? 1 : swin);
sender->td_maxend = end + sender->td_maxwin;
- /*
- * We haven't seen traffic in the other direction yet
- * but we have to tweak window tracking to pass III
- * and IV until that happens.
- */
- if (receiver->td_maxwin == 0)
+ if (receiver->td_maxwin == 0) {
+ /* We haven't seen traffic in the other
+ * direction yet but we have to tweak window
+ * tracking to pass III and IV until that
+ * happens.
+ */
receiver->td_end = receiver->td_maxend = sack;
+ } else if (sack == receiver->td_end + 1) {
+ /* Likely a reply to a keepalive.
+ * Needed for III.
+ */
+ receiver->td_end++;
+ }
+
}
} else if (((state->state == TCP_CONNTRACK_SYN_SENT
&& dir == IP_CT_DIR_ORIGINAL)
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 4c94f3ba2ae4..da0c9fa381d2 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -262,6 +262,7 @@ static const char* l4proto_name(u16 proto)
case IPPROTO_GRE: return "gre";
case IPPROTO_SCTP: return "sctp";
case IPPROTO_UDPLITE: return "udplite";
+ case IPPROTO_ICMPV6: return "icmpv6";
}
return "unknown";
@@ -500,6 +501,9 @@ nf_conntrack_hash_sysctl(struct ctl_table *table, int write,
{
int ret;
+ /* module_param hashsize could have changed value */
+ nf_conntrack_htable_size_user = nf_conntrack_htable_size;
+
ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (ret < 0 || !write)
return ret;
@@ -590,8 +594,11 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
if (net->user_ns != &init_user_ns)
table[0].procname = NULL;
- if (!net_eq(&init_net, net))
+ if (!net_eq(&init_net, net)) {
+ table[0].mode = 0444;
table[2].mode = 0444;
+ table[5].mode = 0444;
+ }
net->ct.sysctl_header = register_net_sysctl(net, "net/netfilter", table);
if (!net->ct.sysctl_header)
diff --git a/net/netfilter/nf_dup_netdev.c b/net/netfilter/nf_dup_netdev.c
index f4a566e67213..98d117f3340c 100644
--- a/net/netfilter/nf_dup_netdev.c
+++ b/net/netfilter/nf_dup_netdev.c
@@ -21,6 +21,7 @@ static void nf_do_netdev_egress(struct sk_buff *skb, struct net_device *dev)
skb_push(skb, skb->mac_len);
skb->dev = dev;
+ skb->tstamp = 0;
dev_queue_xmit(skb);
}
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 890799c16aa4..b3957fe7eced 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -360,7 +360,7 @@ static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
return -1;
tcph = (void *)(skb_network_header(skb) + thoff);
- inet_proto_csum_replace2(&tcph->check, skb, port, new_port, true);
+ inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false);
return 0;
}
@@ -377,7 +377,7 @@ static int nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
udph = (void *)(skb_network_header(skb) + thoff);
if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
inet_proto_csum_replace2(&udph->check, skb, port,
- new_port, true);
+ new_port, false);
if (!udph->check)
udph->check = CSUM_MANGLED_0;
}
diff --git a/net/netfilter/nf_log_common.c b/net/netfilter/nf_log_common.c
index a8c5c846aec1..b164a0e1e053 100644
--- a/net/netfilter/nf_log_common.c
+++ b/net/netfilter/nf_log_common.c
@@ -176,6 +176,18 @@ nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
}
EXPORT_SYMBOL_GPL(nf_log_dump_packet_common);
+void nf_log_dump_vlan(struct nf_log_buf *m, const struct sk_buff *skb)
+{
+ u16 vid;
+
+ if (!skb_vlan_tag_present(skb))
+ return;
+
+ vid = skb_vlan_tag_get(skb);
+ nf_log_buf_add(m, "VPROTO=%04x VID=%u ", ntohs(skb->vlan_proto), vid);
+}
+EXPORT_SYMBOL_GPL(nf_log_dump_vlan);
+
/* bridge and netdev logging families share this code. */
void nf_log_l2packet(struct net *net, u_int8_t pf,
__be16 protocol,
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 2268b10a9dcf..c31df6a76504 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -1068,6 +1068,7 @@ static int __init nf_nat_init(void)
ret = register_pernet_subsys(&nat_net_ops);
if (ret < 0) {
nf_ct_extend_unregister(&nat_extend);
+ kvfree(nf_nat_bysource);
return ret;
}
diff --git a/net/netfilter/nf_nat_proto_udp.c b/net/netfilter/nf_nat_proto_udp.c
index 5790f70a83b2..d85c31c2433c 100644
--- a/net/netfilter/nf_nat_proto_udp.c
+++ b/net/netfilter/nf_nat_proto_udp.c
@@ -66,15 +66,14 @@ static bool udp_manip_pkt(struct sk_buff *skb,
enum nf_nat_manip_type maniptype)
{
struct udphdr *hdr;
- bool do_csum;
if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
return false;
hdr = (struct udphdr *)(skb->data + hdroff);
- do_csum = hdr->check || skb->ip_summed == CHECKSUM_PARTIAL;
+ __udp_manip_pkt(skb, l3proto, iphdroff, hdr, tuple, maniptype,
+ !!hdr->check);
- __udp_manip_pkt(skb, l3proto, iphdroff, hdr, tuple, maniptype, do_csum);
return true;
}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 5881f6668817..9cc8e92f4b00 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -532,7 +532,8 @@ static void nft_request_module(struct net *net, const char *fmt, ...)
static void lockdep_nfnl_nft_mutex_not_held(void)
{
#ifdef CONFIG_PROVE_LOCKING
- WARN_ON_ONCE(lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES));
+ if (debug_locks)
+ WARN_ON_ONCE(lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES));
#endif
}
@@ -718,11 +719,11 @@ static int nf_tables_gettable(struct net *net, struct sock *nlsk,
nlh->nlmsg_seq, NFT_MSG_NEWTABLE, 0,
family, table);
if (err < 0)
- goto err;
+ goto err_fill_table_info;
- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
-err:
+err_fill_table_info:
kfree_skb(skb2);
return err;
}
@@ -1383,11 +1384,11 @@ static int nf_tables_getchain(struct net *net, struct sock *nlsk,
nlh->nlmsg_seq, NFT_MSG_NEWCHAIN, 0,
family, table, chain);
if (err < 0)
- goto err;
+ goto err_fill_chain_info;
- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
-err:
+err_fill_chain_info:
kfree_skb(skb2);
return err;
}
@@ -2488,11 +2489,11 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk,
nlh->nlmsg_seq, NFT_MSG_NEWRULE, 0,
family, table, chain, rule);
if (err < 0)
- goto err;
+ goto err_fill_rule_info;
- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
-err:
+err_fill_rule_info:
kfree_skb(skb2);
return err;
}
@@ -3204,7 +3205,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
goto nla_put_failure;
}
- if (nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata))
+ if (set->udata &&
+ nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata))
goto nla_put_failure;
desc = nla_nest_start(skb, NFTA_SET_DESC);
@@ -3376,11 +3378,11 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk,
err = nf_tables_fill_set(skb2, &ctx, set, NFT_MSG_NEWSET, 0);
if (err < 0)
- goto err;
+ goto err_fill_set_info;
- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
-err:
+err_fill_set_info:
kfree_skb(skb2);
return err;
}
@@ -3450,7 +3452,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
NFT_SET_INTERVAL | NFT_SET_TIMEOUT |
NFT_SET_MAP | NFT_SET_EVAL |
NFT_SET_OBJECT))
- return -EINVAL;
+ return -EOPNOTSUPP;
/* Only one of these operations is supported */
if ((flags & (NFT_SET_MAP | NFT_SET_OBJECT)) ==
(NFT_SET_MAP | NFT_SET_OBJECT))
@@ -3488,7 +3490,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
objtype = ntohl(nla_get_be32(nla[NFTA_SET_OBJ_TYPE]));
if (objtype == NFT_OBJECT_UNSPEC ||
objtype > NFT_OBJECT_MAX)
- return -EINVAL;
+ return -EOPNOTSUPP;
} else if (flags & NFT_SET_OBJECT)
return -EINVAL;
else
@@ -4156,24 +4158,18 @@ static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set,
err = -ENOMEM;
skb = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
if (skb == NULL)
- goto err1;
+ return err;
err = nf_tables_fill_setelem_info(skb, ctx, ctx->seq, ctx->portid,
NFT_MSG_NEWSETELEM, 0, set, &elem);
if (err < 0)
- goto err2;
+ goto err_fill_setelem;
- err = nfnetlink_unicast(skb, ctx->net, ctx->portid, MSG_DONTWAIT);
- /* This avoids a loop in nfnetlink. */
- if (err < 0)
- goto err1;
+ return nfnetlink_unicast(skb, ctx->net, ctx->portid);
- return 0;
-err2:
+err_fill_setelem:
kfree_skb(skb);
-err1:
- /* this avoids a loop in nfnetlink. */
- return err == -EAGAIN ? -ENOBUFS : err;
+ return err;
}
/* called with rcu_read_lock held */
@@ -5272,10 +5268,11 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk,
nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0,
family, table, obj, reset);
if (err < 0)
- goto err;
+ goto err_fill_obj_info;
- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
-err:
+ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
+
+err_fill_obj_info:
kfree_skb(skb2);
return err;
}
@@ -5932,10 +5929,11 @@ static int nf_tables_getflowtable(struct net *net, struct sock *nlsk,
NFT_MSG_NEWFLOWTABLE, 0, family,
flowtable);
if (err < 0)
- goto err;
+ goto err_fill_flowtable_info;
- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
-err:
+ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
+
+err_fill_flowtable_info:
kfree_skb(skb2);
return err;
}
@@ -6096,10 +6094,11 @@ static int nf_tables_getgen(struct net *net, struct sock *nlsk,
err = nf_tables_fill_gen_info(skb2, net, NETLINK_CB(skb).portid,
nlh->nlmsg_seq);
if (err < 0)
- goto err;
+ goto err_fill_gen_info;
- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
-err:
+ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
+
+err_fill_gen_info:
kfree_skb(skb2);
return err;
}
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 7f2c1915763f..9bacddc761ba 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -148,10 +148,15 @@ int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error)
}
EXPORT_SYMBOL_GPL(nfnetlink_set_err);
-int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
- int flags)
+int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid)
{
- return netlink_unicast(net->nfnl, skb, portid, flags);
+ int err;
+
+ err = nlmsg_unicast(net->nfnl, skb, portid);
+ if (err == -EAGAIN)
+ err = -ENOBUFS;
+
+ return err;
}
EXPORT_SYMBOL_GPL(nfnetlink_unicast);
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index 66154dafa305..ddcb1b607474 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -106,7 +106,7 @@ nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
if (help->helper->data_len == 0)
return -EINVAL;
- nla_memcpy(help->data, nla_data(attr), sizeof(help->data));
+ nla_memcpy(help->data, attr, sizeof(help->data));
return 0;
}
@@ -242,6 +242,7 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
ret = -ENOMEM;
goto err2;
}
+ helper->data_len = size;
helper->flags |= NF_CT_HELPER_F_USERSPACE;
memcpy(&helper->tuple, tuple, sizeof(struct nf_conntrack_tuple));
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 332c69d27b47..25298b3eb854 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -359,8 +359,7 @@ __nfulnl_send(struct nfulnl_instance *inst)
goto out;
}
}
- nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid,
- MSG_DONTWAIT);
+ nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid);
out:
inst->qlen = 0;
inst->skb = NULL;
diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
index b0bc130947c9..917f06110c82 100644
--- a/net/netfilter/nfnetlink_osf.c
+++ b/net/netfilter/nfnetlink_osf.c
@@ -170,12 +170,12 @@ static bool nf_osf_match_one(const struct sk_buff *skb,
static const struct tcphdr *nf_osf_hdr_ctx_init(struct nf_osf_hdr_ctx *ctx,
const struct sk_buff *skb,
const struct iphdr *ip,
- unsigned char *opts)
+ unsigned char *opts,
+ struct tcphdr *_tcph)
{
const struct tcphdr *tcp;
- struct tcphdr _tcph;
- tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), &_tcph);
+ tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), _tcph);
if (!tcp)
return NULL;
@@ -191,6 +191,8 @@ static const struct tcphdr *nf_osf_hdr_ctx_init(struct nf_osf_hdr_ctx *ctx,
ctx->optp = skb_header_pointer(skb, ip_hdrlen(skb) +
sizeof(struct tcphdr), ctx->optsize, opts);
+ if (!ctx->optp)
+ return NULL;
}
return tcp;
@@ -210,10 +212,11 @@ nf_osf_match(const struct sk_buff *skb, u_int8_t family,
int fmatch = FMATCH_WRONG;
struct nf_osf_hdr_ctx ctx;
const struct tcphdr *tcp;
+ struct tcphdr _tcph;
memset(&ctx, 0, sizeof(ctx));
- tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts);
+ tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts, &_tcph);
if (!tcp)
return false;
@@ -270,10 +273,11 @@ const char *nf_osf_find(const struct sk_buff *skb,
struct nf_osf_hdr_ctx ctx;
const struct tcphdr *tcp;
const char *genre = NULL;
+ struct tcphdr _tcph;
memset(&ctx, 0, sizeof(ctx));
- tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts);
+ tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts, &_tcph);
if (!tcp)
return NULL;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index d33094f4ec41..f81a3ce0fe48 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -685,7 +685,7 @@ __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue,
*packet_id_ptr = htonl(entry->id);
/* nfnetlink_unicast will either free the nskb or add it to a socket */
- err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT);
+ err = nfnetlink_unicast(nskb, net, queue->peer_portid);
if (err < 0) {
if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
failopen = 1;
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index eb7f9a5f2aeb..4e544044fc2d 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -213,8 +213,10 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
nft_set_ext_add_length(&priv->tmpl, NFT_SET_EXT_EXPR,
priv->expr->ops->size);
if (set->flags & NFT_SET_TIMEOUT) {
- if (timeout || set->timeout)
+ if (timeout || set->timeout) {
+ nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_TIMEOUT);
nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_EXPIRATION);
+ }
}
priv->timeout = timeout;
diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c
index 649edbe77a20..10a12e094929 100644
--- a/net/netfilter/nft_fwd_netdev.c
+++ b/net/netfilter/nft_fwd_netdev.c
@@ -129,6 +129,7 @@ static void nft_fwd_neigh_eval(const struct nft_expr *expr,
return;
skb->dev = dev;
+ skb->tstamp = 0;
neigh_xmit(neigh_table, dev, addr, skb);
out:
regs->verdict.code = verdict;
diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
index 72f13a1144dd..a7bdc532479a 100644
--- a/net/netfilter/nft_limit.c
+++ b/net/netfilter/nft_limit.c
@@ -79,13 +79,13 @@ static int nft_limit_init(struct nft_limit *limit,
return -EOVERFLOW;
if (pkts) {
- tokens = div_u64(limit->nsecs, limit->rate) * limit->burst;
+ tokens = div64_u64(limit->nsecs, limit->rate) * limit->burst;
} else {
/* The token bucket size limits the number of tokens can be
* accumulated. tokens_max specifies the bucket size.
* tokens_max = unit * (rate + burst) / rate.
*/
- tokens = div_u64(limit->nsecs * (limit->rate + limit->burst),
+ tokens = div64_u64(limit->nsecs * (limit->rate + limit->burst),
limit->rate);
}
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index c15807d10b91..3e82a7d0df2a 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -135,7 +135,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
priv->type = NF_NAT_MANIP_DST;
break;
default:
- return -EINVAL;
+ return -EOPNOTSUPP;
}
if (tb[NFTA_NAT_FAMILY] == NULL)
@@ -202,7 +202,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
if (tb[NFTA_NAT_FLAGS]) {
priv->flags = ntohl(nla_get_be32(tb[NFTA_NAT_FLAGS]));
if (priv->flags & ~NF_NAT_RANGE_MASK)
- return -EINVAL;
+ return -EOPNOTSUPP;
}
return nf_ct_netns_get(ctx->net, family);
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 19446a89a2a8..b1a9f330a51f 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -79,7 +79,9 @@ static void nft_payload_eval(const struct nft_expr *expr,
u32 *dest = &regs->data[priv->dreg];
int offset;
- dest[priv->len / NFT_REG32_SIZE] = 0;
+ if (priv->len % NFT_REG32_SIZE)
+ dest[priv->len / NFT_REG32_SIZE] = 0;
+
switch (priv->base) {
case NFT_PAYLOAD_LL_HEADER:
if (!skb_mac_header_was_set(skb))
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index 05118e03c3e4..dbc4ed643b4b 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -392,9 +392,17 @@ static void nft_rhash_destroy(const struct nft_set *set)
(void *)set);
}
+/* Number of buckets is stored in u32, so cap our result to 1U<<31 */
+#define NFT_MAX_BUCKETS (1U << 31)
+
static u32 nft_hash_buckets(u32 size)
{
- return roundup_pow_of_two(size * 4 / 3);
+ u64 val = div_u64((u64)size * 4, 3);
+
+ if (val >= NFT_MAX_BUCKETS)
+ return NFT_MAX_BUCKETS;
+
+ return roundup_pow_of_two(val);
}
static bool nft_rhash_estimate(const struct nft_set_desc *desc, u32 features,
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 0221510328d4..84d317418d18 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -36,6 +36,11 @@ static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe)
(*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END);
}
+static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe)
+{
+ return !nft_rbtree_interval_end(rbe);
+}
+
static bool nft_rbtree_equal(const struct nft_set *set, const void *this,
const struct nft_rbtree_elem *interval)
{
@@ -67,7 +72,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
if (interval &&
nft_rbtree_equal(set, this, interval) &&
nft_rbtree_interval_end(rbe) &&
- !nft_rbtree_interval_end(interval))
+ nft_rbtree_interval_start(interval))
continue;
interval = rbe;
} else if (d > 0)
@@ -92,7 +97,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
nft_set_elem_active(&interval->ext, genmask) &&
- !nft_rbtree_interval_end(interval)) {
+ nft_rbtree_interval_start(interval)) {
*ext = &interval->ext;
return true;
}
@@ -221,9 +226,9 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
p = &parent->rb_right;
else {
if (nft_rbtree_interval_end(rbe) &&
- !nft_rbtree_interval_end(new)) {
+ nft_rbtree_interval_start(new)) {
p = &parent->rb_left;
- } else if (!nft_rbtree_interval_end(rbe) &&
+ } else if (nft_rbtree_interval_start(rbe) &&
nft_rbtree_interval_end(new)) {
p = &parent->rb_right;
} else if (nft_set_elem_active(&rbe->ext, genmask)) {
@@ -314,10 +319,10 @@ static void *nft_rbtree_deactivate(const struct net *net,
parent = parent->rb_right;
else {
if (nft_rbtree_interval_end(rbe) &&
- !nft_rbtree_interval_end(this)) {
+ nft_rbtree_interval_start(this)) {
parent = parent->rb_left;
continue;
- } else if (!nft_rbtree_interval_end(rbe) &&
+ } else if (nft_rbtree_interval_start(rbe) &&
nft_rbtree_interval_end(this)) {
parent = parent->rb_right;
continue;
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 3bab89dbc371..5dd6f6ce92e6 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -335,6 +335,7 @@ static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
const struct xt_match *m;
int have_rev = 0;
+ mutex_lock(&xt[af].mutex);
list_for_each_entry(m, &xt[af].match, list) {
if (strcmp(m->name, name) == 0) {
if (m->revision > *bestp)
@@ -343,6 +344,7 @@ static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
have_rev = 1;
}
}
+ mutex_unlock(&xt[af].mutex);
if (af != NFPROTO_UNSPEC && !have_rev)
return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
@@ -355,6 +357,7 @@ static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
const struct xt_target *t;
int have_rev = 0;
+ mutex_lock(&xt[af].mutex);
list_for_each_entry(t, &xt[af].target, list) {
if (strcmp(t->name, name) == 0) {
if (t->revision > *bestp)
@@ -363,6 +366,7 @@ static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
have_rev = 1;
}
}
+ mutex_unlock(&xt[af].mutex);
if (af != NFPROTO_UNSPEC && !have_rev)
return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
@@ -376,12 +380,10 @@ int xt_find_revision(u8 af, const char *name, u8 revision, int target,
{
int have_rev, best = -1;
- mutex_lock(&xt[af].mutex);
if (target == 1)
have_rev = target_revfn(af, name, revision, &best);
else
have_rev = match_revfn(af, name, revision, &best);
- mutex_unlock(&xt[af].mutex);
/* Nothing at all? Return 0 to try loading module. */
if (best == -1) {
@@ -736,7 +738,7 @@ void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
{
const struct xt_match *match = m->u.kernel.match;
struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
- int pad, off = xt_compat_match_offset(match);
+ int off = xt_compat_match_offset(match);
u_int16_t msize = cm->u.user.match_size;
char name[sizeof(m->u.user.name)];
@@ -746,9 +748,6 @@ void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
match->compat_from_user(m->data, cm->data);
else
memcpy(m->data, cm->data, msize - sizeof(*cm));
- pad = XT_ALIGN(match->matchsize) - match->matchsize;
- if (pad > 0)
- memset(m->data + match->matchsize, 0, pad);
msize += off;
m->u.user.match_size = msize;
@@ -1119,7 +1118,7 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
{
const struct xt_target *target = t->u.kernel.target;
struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
- int pad, off = xt_compat_target_offset(target);
+ int off = xt_compat_target_offset(target);
u_int16_t tsize = ct->u.user.target_size;
char name[sizeof(t->u.user.name)];
@@ -1129,9 +1128,6 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
target->compat_from_user(t->data, ct->data);
else
memcpy(t->data, ct->data, tsize - sizeof(*ct));
- pad = XT_ALIGN(target->targetsize) - target->targetsize;
- if (pad > 0)
- memset(t->data + target->targetsize, 0, pad);
tsize += off;
t->u.user.target_size = tsize;
@@ -1392,7 +1388,7 @@ xt_replace_table(struct xt_table *table,
table->private = newinfo;
/* make sure all cpus see new ->private value */
- smp_wmb();
+ smp_mb();
/*
* Even though table entries have now been swapped, other CPU's
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index 9e05c86ba5c4..932c0ae99bdc 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -118,6 +118,9 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
} cfg;
int ret;
+ if (strnlen(info->name, sizeof(est->name)) >= sizeof(est->name))
+ return -ENAMETOOLONG;
+
net_get_random_once(&jhash_rnd, sizeof(jhash_rnd));
mutex_lock(&xn->hash_lock);
diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
index 4ad5fe27e08b..097534dbc622 100644
--- a/net/netfilter/xt_SECMARK.c
+++ b/net/netfilter/xt_SECMARK.c
@@ -30,10 +30,9 @@ MODULE_ALIAS("ip6t_SECMARK");
static u8 mode;
static unsigned int
-secmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
+secmark_tg(struct sk_buff *skb, const struct xt_secmark_target_info_v1 *info)
{
u32 secmark = 0;
- const struct xt_secmark_target_info *info = par->targinfo;
BUG_ON(info->mode != mode);
@@ -49,7 +48,7 @@ secmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
return XT_CONTINUE;
}
-static int checkentry_lsm(struct xt_secmark_target_info *info)
+static int checkentry_lsm(struct xt_secmark_target_info_v1 *info)
{
int err;
@@ -81,15 +80,15 @@ static int checkentry_lsm(struct xt_secmark_target_info *info)
return 0;
}
-static int secmark_tg_check(const struct xt_tgchk_param *par)
+static int
+secmark_tg_check(const char *table, struct xt_secmark_target_info_v1 *info)
{
- struct xt_secmark_target_info *info = par->targinfo;
int err;
- if (strcmp(par->table, "mangle") != 0 &&
- strcmp(par->table, "security") != 0) {
+ if (strcmp(table, "mangle") != 0 &&
+ strcmp(table, "security") != 0) {
pr_info_ratelimited("only valid in \'mangle\' or \'security\' table, not \'%s\'\n",
- par->table);
+ table);
return -EINVAL;
}
@@ -124,25 +123,76 @@ static void secmark_tg_destroy(const struct xt_tgdtor_param *par)
}
}
-static struct xt_target secmark_tg_reg __read_mostly = {
- .name = "SECMARK",
- .revision = 0,
- .family = NFPROTO_UNSPEC,
- .checkentry = secmark_tg_check,
- .destroy = secmark_tg_destroy,
- .target = secmark_tg,
- .targetsize = sizeof(struct xt_secmark_target_info),
- .me = THIS_MODULE,
+static int secmark_tg_check_v0(const struct xt_tgchk_param *par)
+{
+ struct xt_secmark_target_info *info = par->targinfo;
+ struct xt_secmark_target_info_v1 newinfo = {
+ .mode = info->mode,
+ };
+ int ret;
+
+ memcpy(newinfo.secctx, info->secctx, SECMARK_SECCTX_MAX);
+
+ ret = secmark_tg_check(par->table, &newinfo);
+ info->secid = newinfo.secid;
+
+ return ret;
+}
+
+static unsigned int
+secmark_tg_v0(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_secmark_target_info *info = par->targinfo;
+ struct xt_secmark_target_info_v1 newinfo = {
+ .secid = info->secid,
+ };
+
+ return secmark_tg(skb, &newinfo);
+}
+
+static int secmark_tg_check_v1(const struct xt_tgchk_param *par)
+{
+ return secmark_tg_check(par->table, par->targinfo);
+}
+
+static unsigned int
+secmark_tg_v1(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ return secmark_tg(skb, par->targinfo);
+}
+
+static struct xt_target secmark_tg_reg[] __read_mostly = {
+ {
+ .name = "SECMARK",
+ .revision = 0,
+ .family = NFPROTO_UNSPEC,
+ .checkentry = secmark_tg_check_v0,
+ .destroy = secmark_tg_destroy,
+ .target = secmark_tg_v0,
+ .targetsize = sizeof(struct xt_secmark_target_info),
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "SECMARK",
+ .revision = 1,
+ .family = NFPROTO_UNSPEC,
+ .checkentry = secmark_tg_check_v1,
+ .destroy = secmark_tg_destroy,
+ .target = secmark_tg_v1,
+ .targetsize = sizeof(struct xt_secmark_target_info_v1),
+ .usersize = offsetof(struct xt_secmark_target_info_v1, secid),
+ .me = THIS_MODULE,
+ },
};
static int __init secmark_tg_init(void)
{
- return xt_register_target(&secmark_tg_reg);
+ return xt_register_targets(secmark_tg_reg, ARRAY_SIZE(secmark_tg_reg));
}
static void __exit secmark_tg_exit(void)
{
- xt_unregister_target(&secmark_tg_reg);
+ xt_unregister_targets(secmark_tg_reg, ARRAY_SIZE(secmark_tg_reg));
}
module_init(secmark_tg_init);
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 570144507df1..cb58bc7ae30d 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -155,7 +155,8 @@ static void recent_entry_remove(struct recent_table *t, struct recent_entry *e)
/*
* Drop entries with timestamps older then 'time'.
*/
-static void recent_entry_reap(struct recent_table *t, unsigned long time)
+static void recent_entry_reap(struct recent_table *t, unsigned long time,
+ struct recent_entry *working, bool update)
{
struct recent_entry *e;
@@ -165,6 +166,12 @@ static void recent_entry_reap(struct recent_table *t, unsigned long time)
e = list_entry(t->lru_list.next, struct recent_entry, lru_list);
/*
+ * Do not reap the entry which are going to be updated.
+ */
+ if (e == working && update)
+ return;
+
+ /*
* The last time stamp is the most recent.
*/
if (time_after(time, e->stamps[e->index-1]))
@@ -306,7 +313,8 @@ recent_mt(const struct sk_buff *skb, struct xt_action_param *par)
/* info->seconds must be non-zero */
if (info->check_set & XT_RECENT_REAP)
- recent_entry_reap(t, time);
+ recent_entry_reap(t, time, e,
+ info->check_set & XT_RECENT_UPDATE && ret);
}
if (info->check_set & XT_RECENT_SET ||
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index 9aacf2da3d98..3e3494c8d42f 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -581,6 +581,7 @@ list_start:
break;
}
+ cipso_v4_doi_putdef(doi_def);
rcu_read_unlock();
genlmsg_end(ans_skb, data);
@@ -589,12 +590,14 @@ list_start:
list_retry:
/* XXX - this limit is a guesstimate */
if (nlsze_mult < 4) {
+ cipso_v4_doi_putdef(doi_def);
rcu_read_unlock();
kfree_skb(ans_skb);
nlsze_mult *= 2;
goto list_start;
}
list_failure_lock:
+ cipso_v4_doi_putdef(doi_def);
rcu_read_unlock();
list_failure:
kfree_skb(ans_skb);
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index 41d0e95d171e..b1a1718495f3 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -99,6 +99,7 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry)
kfree(netlbl_domhsh_addr6_entry(iter6));
}
#endif /* IPv6 */
+ kfree(ptr->def.addrsel);
}
kfree(ptr->domain);
kfree(ptr);
@@ -550,6 +551,8 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
goto add_return;
}
#endif /* IPv6 */
+ /* cleanup the new entry since we've moved everything over */
+ netlbl_domhsh_free_entry(&entry->rcu);
} else
ret_val = -EINVAL;
@@ -593,6 +596,12 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
{
int ret_val = 0;
struct audit_buffer *audit_buf;
+ struct netlbl_af4list *iter4;
+ struct netlbl_domaddr4_map *map4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct netlbl_af6list *iter6;
+ struct netlbl_domaddr6_map *map6;
+#endif /* IPv6 */
if (entry == NULL)
return -ENOENT;
@@ -610,6 +619,9 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
ret_val = -ENOENT;
spin_unlock(&netlbl_domhsh_lock);
+ if (ret_val)
+ return ret_val;
+
audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_DEL, audit_info);
if (audit_buf != NULL) {
audit_log_format(audit_buf,
@@ -619,40 +631,29 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
audit_log_end(audit_buf);
}
- if (ret_val == 0) {
- struct netlbl_af4list *iter4;
- struct netlbl_domaddr4_map *map4;
-#if IS_ENABLED(CONFIG_IPV6)
- struct netlbl_af6list *iter6;
- struct netlbl_domaddr6_map *map6;
-#endif /* IPv6 */
-
- switch (entry->def.type) {
- case NETLBL_NLTYPE_ADDRSELECT:
- netlbl_af4list_foreach_rcu(iter4,
- &entry->def.addrsel->list4) {
- map4 = netlbl_domhsh_addr4_entry(iter4);
- cipso_v4_doi_putdef(map4->def.cipso);
- }
+ switch (entry->def.type) {
+ case NETLBL_NLTYPE_ADDRSELECT:
+ netlbl_af4list_foreach_rcu(iter4, &entry->def.addrsel->list4) {
+ map4 = netlbl_domhsh_addr4_entry(iter4);
+ cipso_v4_doi_putdef(map4->def.cipso);
+ }
#if IS_ENABLED(CONFIG_IPV6)
- netlbl_af6list_foreach_rcu(iter6,
- &entry->def.addrsel->list6) {
- map6 = netlbl_domhsh_addr6_entry(iter6);
- calipso_doi_putdef(map6->def.calipso);
- }
+ netlbl_af6list_foreach_rcu(iter6, &entry->def.addrsel->list6) {
+ map6 = netlbl_domhsh_addr6_entry(iter6);
+ calipso_doi_putdef(map6->def.calipso);
+ }
#endif /* IPv6 */
- break;
- case NETLBL_NLTYPE_CIPSOV4:
- cipso_v4_doi_putdef(entry->def.cipso);
- break;
+ break;
+ case NETLBL_NLTYPE_CIPSOV4:
+ cipso_v4_doi_putdef(entry->def.cipso);
+ break;
#if IS_ENABLED(CONFIG_IPV6)
- case NETLBL_NLTYPE_CALIPSO:
- calipso_doi_putdef(entry->def.calipso);
- break;
+ case NETLBL_NLTYPE_CALIPSO:
+ calipso_doi_putdef(entry->def.calipso);
+ break;
#endif /* IPv6 */
- }
- call_rcu(&entry->rcu, netlbl_domhsh_free_entry);
}
+ call_rcu(&entry->rcu, netlbl_domhsh_free_entry);
return ret_val;
}
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index ee3e5b6471a6..15fe2120b310 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -748,6 +748,12 @@ int netlbl_catmap_getlong(struct netlbl_lsm_catmap *catmap,
if ((off & (BITS_PER_LONG - 1)) != 0)
return -EINVAL;
+ /* a null catmap is equivalent to an empty one */
+ if (!catmap) {
+ *offset = (u32)-1;
+ return 0;
+ }
+
if (off < catmap->startbit) {
off = catmap->startbit;
*offset = off;
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index c92894c3e40a..0067f472367b 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -1179,12 +1179,13 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
struct netlbl_unlhsh_walk_arg cb_arg;
u32 skip_bkt = cb->args[0];
u32 skip_chain = cb->args[1];
- u32 iter_bkt;
- u32 iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0;
+ u32 skip_addr4 = cb->args[2];
+ u32 iter_bkt, iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0;
struct netlbl_unlhsh_iface *iface;
struct list_head *iter_list;
struct netlbl_af4list *addr4;
#if IS_ENABLED(CONFIG_IPV6)
+ u32 skip_addr6 = cb->args[3];
struct netlbl_af6list *addr6;
#endif
@@ -1195,7 +1196,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
rcu_read_lock();
for (iter_bkt = skip_bkt;
iter_bkt < rcu_dereference(netlbl_unlhsh)->size;
- iter_bkt++, iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0) {
+ iter_bkt++) {
iter_list = &rcu_dereference(netlbl_unlhsh)->tbl[iter_bkt];
list_for_each_entry_rcu(iface, iter_list, list) {
if (!iface->valid ||
@@ -1203,7 +1204,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
continue;
netlbl_af4list_foreach_rcu(addr4,
&iface->addr4_list) {
- if (iter_addr4++ < cb->args[2])
+ if (iter_addr4++ < skip_addr4)
continue;
if (netlbl_unlabel_staticlist_gen(
NLBL_UNLABEL_C_STATICLIST,
@@ -1216,10 +1217,12 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
goto unlabel_staticlist_return;
}
}
+ iter_addr4 = 0;
+ skip_addr4 = 0;
#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_rcu(addr6,
&iface->addr6_list) {
- if (iter_addr6++ < cb->args[3])
+ if (iter_addr6++ < skip_addr6)
continue;
if (netlbl_unlabel_staticlist_gen(
NLBL_UNLABEL_C_STATICLIST,
@@ -1232,8 +1235,12 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
goto unlabel_staticlist_return;
}
}
+ iter_addr6 = 0;
+ skip_addr6 = 0;
#endif /* IPv6 */
}
+ iter_chain = 0;
+ skip_chain = 0;
}
unlabel_staticlist_return:
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index f0ec068e1d02..ede73ecfb1f5 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -961,60 +961,11 @@ static struct genl_family genl_ctrl __ro_after_init = {
.netnsok = true,
};
-static int genl_bind(struct net *net, int group)
-{
- struct genl_family *f;
- int err = -ENOENT;
- unsigned int id;
-
- down_read(&cb_lock);
-
- idr_for_each_entry(&genl_fam_idr, f, id) {
- if (group >= f->mcgrp_offset &&
- group < f->mcgrp_offset + f->n_mcgrps) {
- int fam_grp = group - f->mcgrp_offset;
-
- if (!f->netnsok && net != &init_net)
- err = -ENOENT;
- else if (f->mcast_bind)
- err = f->mcast_bind(net, fam_grp);
- else
- err = 0;
- break;
- }
- }
- up_read(&cb_lock);
-
- return err;
-}
-
-static void genl_unbind(struct net *net, int group)
-{
- struct genl_family *f;
- unsigned int id;
-
- down_read(&cb_lock);
-
- idr_for_each_entry(&genl_fam_idr, f, id) {
- if (group >= f->mcgrp_offset &&
- group < f->mcgrp_offset + f->n_mcgrps) {
- int fam_grp = group - f->mcgrp_offset;
-
- if (f->mcast_unbind)
- f->mcast_unbind(net, fam_grp);
- break;
- }
- }
- up_read(&cb_lock);
-}
-
static int __net_init genl_pernet_init(struct net *net)
{
struct netlink_kernel_cfg cfg = {
.input = genl_rcv,
.flags = NL_CFG_F_NONROOT_RECV,
- .bind = genl_bind,
- .unbind = genl_unbind,
};
/* we'll bump the group number right afterwards */
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index b76aa668a94b..53ced34a1fdd 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -211,6 +211,7 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
/* refcount initialized at 1 */
spin_unlock_bh(&nr_node_list_lock);
+ nr_neigh_put(nr_neigh);
return 0;
}
nr_node_lock(nr_node);
diff --git a/net/nfc/digital_dep.c b/net/nfc/digital_dep.c
index 4f9a973988b2..1eed0cf59190 100644
--- a/net/nfc/digital_dep.c
+++ b/net/nfc/digital_dep.c
@@ -1285,6 +1285,8 @@ static void digital_tg_recv_dep_req(struct nfc_digital_dev *ddev, void *arg,
}
rc = nfc_tm_data_received(ddev->nfc_dev, resp);
+ if (rc)
+ resp = NULL;
exit:
kfree_skb(ddev->chaining_skb);
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index 7d766350c08e..59de4f54dd18 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -120,11 +120,15 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
llcp_sock->service_name_len,
GFP_KERNEL);
if (!llcp_sock->service_name) {
+ nfc_llcp_local_put(llcp_sock->local);
+ llcp_sock->local = NULL;
ret = -ENOMEM;
goto put_dev;
}
llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
if (llcp_sock->ssap == LLCP_SAP_MAX) {
+ nfc_llcp_local_put(llcp_sock->local);
+ llcp_sock->local = NULL;
kfree(llcp_sock->service_name);
llcp_sock->service_name = NULL;
ret = -EADDRINUSE;
@@ -683,6 +687,10 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
ret = -EISCONN;
goto error;
}
+ if (sk->sk_state == LLCP_CONNECTING) {
+ ret = -EINPROGRESS;
+ goto error;
+ }
dev = nfc_get_device(addr->dev_idx);
if (dev == NULL) {
@@ -714,6 +722,8 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
llcp_sock->local = nfc_llcp_local_get(local);
llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
if (llcp_sock->ssap == LLCP_SAP_MAX) {
+ nfc_llcp_local_put(llcp_sock->local);
+ llcp_sock->local = NULL;
ret = -ENOMEM;
goto put_dev;
}
@@ -751,8 +761,12 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
sock_unlink:
nfc_llcp_put_ssap(local, llcp_sock->ssap);
+ nfc_llcp_local_put(llcp_sock->local);
+ llcp_sock->local = NULL;
nfc_llcp_sock_unlink(&local->connecting_sockets, sk);
+ kfree(llcp_sock->service_name);
+ llcp_sock->service_name = NULL;
put_dev:
nfc_put_device(dev);
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 074960154993..33c23af6709d 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -1187,6 +1187,7 @@ EXPORT_SYMBOL(nci_allocate_device);
void nci_free_device(struct nci_dev *ndev)
{
nfc_free_device(ndev->nfc_dev);
+ nci_hci_deallocate(ndev);
kfree(ndev);
}
EXPORT_SYMBOL(nci_free_device);
diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
index c0d323b58e73..c972c212e7ca 100644
--- a/net/nfc/nci/hci.c
+++ b/net/nfc/nci/hci.c
@@ -807,3 +807,8 @@ struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev)
return hdev;
}
+
+void nci_hci_deallocate(struct nci_dev *ndev)
+{
+ kfree(ndev->hci_dev);
+}
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index a65a5a5f434b..4884e1987562 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -871,6 +871,7 @@ static int nfc_genl_stop_poll(struct sk_buff *skb, struct genl_info *info)
if (!dev->polling) {
device_unlock(&dev->dev);
+ nfc_put_device(dev);
return -EINVAL;
}
@@ -1235,7 +1236,7 @@ static int nfc_genl_fw_download(struct sk_buff *skb, struct genl_info *info)
u32 idx;
char firmware_name[NFC_FIRMWARE_NAME_MAXSIZE + 1];
- if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
+ if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_FIRMWARE_NAME])
return -EINVAL;
idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index e2188deb08dc..57a07ab80d92 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -117,7 +117,7 @@ static int rawsock_connect(struct socket *sock, struct sockaddr *_addr,
if (addr->target_idx > dev->target_next_idx - 1 ||
addr->target_idx < dev->target_next_idx - dev->n_targets) {
rc = -EINVAL;
- goto error;
+ goto put_dev;
}
rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol);
@@ -344,10 +344,13 @@ static int rawsock_create(struct net *net, struct socket *sock,
if ((sock->type != SOCK_SEQPACKET) && (sock->type != SOCK_RAW))
return -ESOCKTNOSUPPORT;
- if (sock->type == SOCK_RAW)
+ if (sock->type == SOCK_RAW) {
+ if (!capable(CAP_NET_RAW))
+ return -EPERM;
sock->ops = &rawsock_raw_ops;
- else
+ } else {
sock->ops = &rawsock_ops;
+ }
sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto, kern);
if (!sk)
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 8211e8e97c96..849fcf973c74 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -892,17 +892,17 @@ static void ovs_fragment(struct net *net, struct vport *vport,
}
if (key->eth.type == htons(ETH_P_IP)) {
- struct dst_entry ovs_dst;
+ struct rtable ovs_rt = { 0 };
unsigned long orig_dst;
prepare_frag(vport, skb, orig_network_offset,
ovs_key_mac_proto(key));
- dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
+ dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
DST_OBSOLETE_NONE, DST_NOCOUNT);
- ovs_dst.dev = vport->dev;
+ ovs_rt.dst.dev = vport->dev;
orig_dst = skb->_skb_refdst;
- skb_dst_set_noref(skb, &ovs_dst);
+ skb_dst_set_noref(skb, &ovs_rt.dst);
IPCB(skb)->frag_max_size = mru;
ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 6dcb59f272e1..fb13fcfedaf4 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -283,10 +283,6 @@ void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key)
ovs_ct_update_key(skb, NULL, key, false, false);
}
-#define IN6_ADDR_INITIALIZER(ADDR) \
- { (ADDR).s6_addr32[0], (ADDR).s6_addr32[1], \
- (ADDR).s6_addr32[2], (ADDR).s6_addr32[3] }
-
int ovs_ct_put_key(const struct sw_flow_key *swkey,
const struct sw_flow_key *output, struct sk_buff *skb)
{
@@ -308,24 +304,30 @@ int ovs_ct_put_key(const struct sw_flow_key *swkey,
if (swkey->ct_orig_proto) {
if (swkey->eth.type == htons(ETH_P_IP)) {
- struct ovs_key_ct_tuple_ipv4 orig = {
- output->ipv4.ct_orig.src,
- output->ipv4.ct_orig.dst,
- output->ct.orig_tp.src,
- output->ct.orig_tp.dst,
- output->ct_orig_proto,
- };
+ struct ovs_key_ct_tuple_ipv4 orig;
+
+ memset(&orig, 0, sizeof(orig));
+ orig.ipv4_src = output->ipv4.ct_orig.src;
+ orig.ipv4_dst = output->ipv4.ct_orig.dst;
+ orig.src_port = output->ct.orig_tp.src;
+ orig.dst_port = output->ct.orig_tp.dst;
+ orig.ipv4_proto = output->ct_orig_proto;
+
if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4,
sizeof(orig), &orig))
return -EMSGSIZE;
} else if (swkey->eth.type == htons(ETH_P_IPV6)) {
- struct ovs_key_ct_tuple_ipv6 orig = {
- IN6_ADDR_INITIALIZER(output->ipv6.ct_orig.src),
- IN6_ADDR_INITIALIZER(output->ipv6.ct_orig.dst),
- output->ct.orig_tp.src,
- output->ct.orig_tp.dst,
- output->ct_orig_proto,
- };
+ struct ovs_key_ct_tuple_ipv6 orig;
+
+ memset(&orig, 0, sizeof(orig));
+ memcpy(orig.ipv6_src, output->ipv6.ct_orig.src.s6_addr32,
+ sizeof(orig.ipv6_src));
+ memcpy(orig.ipv6_dst, output->ipv6.ct_orig.dst.s6_addr32,
+ sizeof(orig.ipv6_dst));
+ orig.src_port = output->ct.orig_tp.src;
+ orig.dst_port = output->ct.orig_tp.dst;
+ orig.ipv6_proto = output->ct_orig_proto;
+
if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6,
sizeof(orig), &orig))
return -EMSGSIZE;
@@ -897,15 +899,19 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
}
err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype);
- if (err == NF_ACCEPT &&
- ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) {
- if (maniptype == NF_NAT_MANIP_SRC)
- maniptype = NF_NAT_MANIP_DST;
- else
- maniptype = NF_NAT_MANIP_SRC;
-
- err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range,
- maniptype);
+ if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) {
+ if (ct->status & IPS_SRC_NAT) {
+ if (maniptype == NF_NAT_MANIP_SRC)
+ maniptype = NF_NAT_MANIP_DST;
+ else
+ maniptype = NF_NAT_MANIP_SRC;
+
+ err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range,
+ maniptype);
+ } else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
+ err = ovs_ct_nat_execute(skb, ct, ctinfo, NULL,
+ NF_NAT_MANIP_SRC);
+ }
}
/* Mark NAT done if successful and update the flow key. */
diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c
index c038e021a591..9b0c54f0702c 100644
--- a/net/openvswitch/meter.c
+++ b/net/openvswitch/meter.c
@@ -255,8 +255,8 @@ static struct dp_meter *dp_meter_create(struct nlattr **a)
*
* Start with a full bucket.
*/
- band->bucket = (band->burst_size + band->rate) * 1000;
- band_max_delta_t = band->bucket / band->rate;
+ band->bucket = (band->burst_size + band->rate) * 1000ULL;
+ band_max_delta_t = div_u64(band->bucket, band->rate);
if (band_max_delta_t > meter->max_delta_t)
meter->max_delta_t = band_max_delta_t;
band++;
@@ -464,6 +464,14 @@ bool ovs_meter_execute(struct datapath *dp, struct sk_buff *skb,
spin_lock(&meter->lock);
long_delta_ms = (now_ms - meter->used); /* ms */
+ if (long_delta_ms < 0) {
+ /* This condition means that we have several threads fighting
+ * for a meter lock, and the one who received the packets a
+ * bit later wins. Assuming that all racing threads received
+ * packets at the same time to avoid overflow.
+ */
+ long_delta_ms = 0;
+ }
/* Make sure delta_ms will not be too large, so that bucket will not
* wrap around below.
diff --git a/net/openvswitch/meter.h b/net/openvswitch/meter.h
index 964ace2650f8..970557ed5b5b 100644
--- a/net/openvswitch/meter.h
+++ b/net/openvswitch/meter.h
@@ -26,7 +26,7 @@ struct dp_meter_band {
u32 type;
u32 rate;
u32 burst_size;
- u32 bucket; /* 1/1000 packets, or in bits */
+ u64 bucket; /* 1/1000 packets, or in bits */
struct ovs_flow_stats stats;
};
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 0e029aefa707..16b745d254fe 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -949,6 +949,7 @@ static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
}
static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
+ __releases(&pkc->blk_fill_in_prog_lock)
{
struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
atomic_dec(&pkc->blk_fill_in_prog);
@@ -996,6 +997,7 @@ static void prb_fill_curr_block(char *curr,
struct tpacket_kbdq_core *pkc,
struct tpacket_block_desc *pbd,
unsigned int len)
+ __acquires(&pkc->blk_fill_in_prog_lock)
{
struct tpacket3_hdr *ppd;
@@ -2160,7 +2162,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
int skb_len = skb->len;
unsigned int snaplen, res;
unsigned long status = TP_STATUS_USER;
- unsigned short macoff, netoff, hdrlen;
+ unsigned short macoff, hdrlen;
+ unsigned int netoff;
struct sk_buff *copy_skb = NULL;
struct timespec ts;
__u32 ts_status;
@@ -2223,6 +2226,12 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
}
macoff = netoff - maclen;
}
+ if (netoff > USHRT_MAX) {
+ spin_lock(&sk->sk_receive_queue.lock);
+ po->stats.stats1.tp_drops++;
+ spin_unlock(&sk->sk_receive_queue.lock);
+ goto drop_n_restore;
+ }
if (po->tp_version <= TPACKET_V2) {
if (macoff + snaplen > po->rx_ring.frame_size) {
if (po->copy_thresh &&
@@ -2272,8 +2281,11 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
if (do_vnet &&
virtio_net_hdr_from_skb(skb, h.raw + macoff -
sizeof(struct virtio_net_hdr),
- vio_le(), true, 0))
+ vio_le(), true, 0)) {
+ if (po->tp_version == TPACKET_V3)
+ prb_clear_blk_fill_status(&po->rx_ring);
goto drop_n_account;
+ }
if (po->tp_version <= TPACKET_V2) {
packet_increment_rx_head(po, &po->rx_ring);
@@ -2379,7 +2391,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
__clear_bit(slot_id, po->rx_ring.rx_owner_map);
spin_unlock(&sk->sk_receive_queue.lock);
sk->sk_data_ready(sk);
- } else {
+ } else if (po->tp_version == TPACKET_V3) {
prb_clear_blk_fill_status(&po->rx_ring);
}
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index 82d38f93c81a..43f63d0606ec 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -185,7 +185,7 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
{
struct qrtr_hdr_v1 *hdr;
size_t len = skb->len;
- int rc = -ENODEV;
+ int rc;
hdr = skb_push(skb, sizeof(*hdr));
hdr->version = cpu_to_le32(QRTR_PROTO_VER_1);
@@ -194,7 +194,7 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
hdr->src_port_id = cpu_to_le32(from->sq_port);
if (to->sq_port == QRTR_PORT_CTRL) {
hdr->dst_node_id = cpu_to_le32(node->nid);
- hdr->dst_port_id = cpu_to_le32(QRTR_NODE_BCAST);
+ hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL);
} else {
hdr->dst_node_id = cpu_to_le32(to->sq_node);
hdr->dst_port_id = cpu_to_le32(to->sq_port);
@@ -203,15 +203,17 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
hdr->size = cpu_to_le32(len);
hdr->confirm_rx = 0;
- skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr));
-
- mutex_lock(&node->ep_lock);
- if (node->ep)
- rc = node->ep->xmit(node->ep, skb);
- else
- kfree_skb(skb);
- mutex_unlock(&node->ep_lock);
+ rc = skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr));
+ if (!rc) {
+ mutex_lock(&node->ep_lock);
+ rc = -ENODEV;
+ if (node->ep)
+ rc = node->ep->xmit(node->ep, skb);
+ else
+ kfree_skb(skb);
+ mutex_unlock(&node->ep_lock);
+ }
return rc;
}
@@ -266,10 +268,10 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
unsigned int ver;
size_t hdrlen;
- if (len & 3)
+ if (len == 0 || len & 3)
return -EINVAL;
- skb = netdev_alloc_skb(NULL, len);
+ skb = __netdev_alloc_skb(NULL, len, GFP_ATOMIC | __GFP_NOWARN);
if (!skb)
return -ENOMEM;
@@ -280,6 +282,8 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
switch (ver) {
case QRTR_PROTO_VER_1:
+ if (len < sizeof(*v1))
+ goto err;
v1 = data;
hdrlen = sizeof(*v1);
@@ -293,6 +297,8 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
size = le32_to_cpu(v1->size);
break;
case QRTR_PROTO_VER_2:
+ if (len < sizeof(*v2))
+ goto err;
v2 = data;
hdrlen = sizeof(*v2) + v2->optlen;
@@ -550,23 +556,25 @@ static void qrtr_port_remove(struct qrtr_sock *ipc)
*/
static int qrtr_port_assign(struct qrtr_sock *ipc, int *port)
{
+ u32 min_port;
int rc;
mutex_lock(&qrtr_port_lock);
if (!*port) {
- rc = idr_alloc(&qrtr_ports, ipc,
- QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET + 1,
- GFP_ATOMIC);
- if (rc >= 0)
- *port = rc;
+ min_port = QRTR_MIN_EPH_SOCKET;
+ rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, QRTR_MAX_EPH_SOCKET, GFP_ATOMIC);
+ if (!rc)
+ *port = min_port;
} else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) {
rc = -EACCES;
} else if (*port == QRTR_PORT_CTRL) {
- rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC);
+ min_port = 0;
+ rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, 0, GFP_ATOMIC);
} else {
- rc = idr_alloc(&qrtr_ports, ipc, *port, *port + 1, GFP_ATOMIC);
- if (rc >= 0)
- *port = rc;
+ min_port = *port;
+ rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, *port, GFP_ATOMIC);
+ if (!rc)
+ *port = min_port;
}
mutex_unlock(&qrtr_port_lock);
@@ -718,7 +726,7 @@ static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
}
mutex_unlock(&qrtr_node_lock);
- qrtr_local_enqueue(node, skb, type, from, to);
+ qrtr_local_enqueue(NULL, skb, type, from, to);
return 0;
}
@@ -769,27 +777,30 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
node = NULL;
if (addr->sq_node == QRTR_NODE_BCAST) {
- enqueue_fn = qrtr_bcast_enqueue;
- if (addr->sq_port != QRTR_PORT_CTRL) {
+ if (addr->sq_port != QRTR_PORT_CTRL &&
+ qrtr_local_nid != QRTR_NODE_BCAST) {
release_sock(sk);
return -ENOTCONN;
}
+ enqueue_fn = qrtr_bcast_enqueue;
} else if (addr->sq_node == ipc->us.sq_node) {
enqueue_fn = qrtr_local_enqueue;
} else {
- enqueue_fn = qrtr_node_enqueue;
node = qrtr_node_lookup(addr->sq_node);
if (!node) {
release_sock(sk);
return -ECONNRESET;
}
+ enqueue_fn = qrtr_node_enqueue;
}
plen = (len + 3) & ~3;
skb = sock_alloc_send_skb(sk, plen + QRTR_HDR_MAX_SIZE,
msg->msg_flags & MSG_DONTWAIT, &rc);
- if (!skb)
+ if (!skb) {
+ rc = -ENOMEM;
goto out_node;
+ }
skb_reserve(skb, QRTR_HDR_MAX_SIZE);
@@ -857,6 +868,11 @@ static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg,
rc = copied;
if (addr) {
+ /* There is an anonymous 2-byte hole after sq_family,
+ * make sure to clear it.
+ */
+ memset(addr, 0, sizeof(*addr));
+
cb = (struct qrtr_cb *)skb->cb;
addr->sq_family = AF_QIPCRTR;
addr->sq_node = cb->src_node;
@@ -1008,6 +1024,7 @@ static int qrtr_release(struct socket *sock)
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
+ sock_orphan(sk);
sock->sk = NULL;
if (!sock_flag(sk, SOCK_ZAPPED))
diff --git a/net/qrtr/tun.c b/net/qrtr/tun.c
index e35869e81766..cb425e216d46 100644
--- a/net/qrtr/tun.c
+++ b/net/qrtr/tun.c
@@ -31,6 +31,7 @@ static int qrtr_tun_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
static int qrtr_tun_open(struct inode *inode, struct file *filp)
{
struct qrtr_tun *tun;
+ int ret;
tun = kzalloc(sizeof(*tun), GFP_KERNEL);
if (!tun)
@@ -43,7 +44,16 @@ static int qrtr_tun_open(struct inode *inode, struct file *filp)
filp->private_data = tun;
- return qrtr_endpoint_register(&tun->ep, QRTR_EP_NID_AUTO);
+ ret = qrtr_endpoint_register(&tun->ep, QRTR_EP_NID_AUTO);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ filp->private_data = NULL;
+ kfree(tun);
+ return ret;
}
static ssize_t qrtr_tun_read_iter(struct kiocb *iocb, struct iov_iter *to)
@@ -80,6 +90,12 @@ static ssize_t qrtr_tun_write_iter(struct kiocb *iocb, struct iov_iter *from)
ssize_t ret;
void *kbuf;
+ if (!len)
+ return -EINVAL;
+
+ if (len > KMALLOC_MAX_SIZE)
+ return -ENOMEM;
+
kbuf = kzalloc(len, GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index e1965d9cbcf8..9882cebfcad6 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -531,6 +531,9 @@ int rds_rdma_extra_size(struct rds_rdma_args *args,
if (args->nr_local == 0)
return -EINVAL;
+ if (args->nr_local > UIO_MAXIOV)
+ return -EMSGSIZE;
+
iov->iov = kcalloc(args->nr_local,
sizeof(struct rds_iovec),
GFP_KERNEL);
diff --git a/net/rds/recv.c b/net/rds/recv.c
index c0b945516cdb..3ca278988b52 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -455,12 +455,13 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
{
struct rds_notifier *notifier;
- struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */
+ struct rds_rdma_notify cmsg;
unsigned int count = 0, max_messages = ~0U;
unsigned long flags;
LIST_HEAD(copy);
int err = 0;
+ memset(&cmsg, 0, sizeof(cmsg)); /* fill holes with zero */
/* put_cmsg copies to user space and thus may sleep. We can't do this
* with rs_lock held, so first grab as many notifications as we can stuff
diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
index 094a6621f8e8..c318e5c9f6df 100644
--- a/net/rose/rose_loopback.c
+++ b/net/rose/rose_loopback.c
@@ -99,10 +99,19 @@ static void rose_loopback_timer(struct timer_list *unused)
}
if (frametype == ROSE_CALL_REQUEST) {
- if ((dev = rose_dev_get(dest)) != NULL) {
- if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0)
- kfree_skb(skb);
- } else {
+ if (!rose_loopback_neigh->dev) {
+ kfree_skb(skb);
+ continue;
+ }
+
+ dev = rose_dev_get(dest);
+ if (!dev) {
+ kfree_skb(skb);
+ continue;
+ }
+
+ if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0) {
+ dev_put(dev);
kfree_skb(skb);
}
} else {
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 57f835d2442e..fb7e3fffcb5e 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -1010,7 +1010,7 @@ static int __init af_rxrpc_init(void)
goto error_security;
}
- ret = register_pernet_subsys(&rxrpc_net_ops);
+ ret = register_pernet_device(&rxrpc_net_ops);
if (ret)
goto error_pernet;
@@ -1055,7 +1055,7 @@ error_key_type:
error_sock:
proto_unregister(&rxrpc_proto);
error_proto:
- unregister_pernet_subsys(&rxrpc_net_ops);
+ unregister_pernet_device(&rxrpc_net_ops);
error_pernet:
rxrpc_exit_security();
error_security:
@@ -1077,7 +1077,7 @@ static void __exit af_rxrpc_exit(void)
unregister_key_type(&key_type_rxrpc);
sock_unregister(PF_RXRPC);
proto_unregister(&rxrpc_proto);
- unregister_pernet_subsys(&rxrpc_net_ops);
+ unregister_pernet_device(&rxrpc_net_ops);
ASSERTCMP(atomic_read(&rxrpc_n_tx_skbs), ==, 0);
ASSERTCMP(atomic_read(&rxrpc_n_rx_skbs), ==, 0);
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index c5566bc4aaca..7ecfbff1f0d0 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -26,6 +26,11 @@
#include <net/ip.h>
#include "ar-internal.h"
+static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call,
+ unsigned long user_call_ID)
+{
+}
+
/*
* Preallocate a single service call, connection and peer and, if possible,
* give them a user ID and attach the user's side of the ID to them.
@@ -206,6 +211,7 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
tail = b->peer_backlog_tail;
while (CIRC_CNT(head, tail, size) > 0) {
struct rxrpc_peer *peer = b->peer_backlog[tail];
+ rxrpc_put_local(peer->local);
kfree(peer);
tail = (tail + 1) & (size - 1);
}
@@ -232,6 +238,8 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
if (rx->discard_new_call) {
_debug("discard %lx", call->user_call_ID);
rx->discard_new_call(call, call->user_call_ID);
+ if (call->notify_rx)
+ call->notify_rx = rxrpc_dummy_notify;
rxrpc_put_call(call, rxrpc_call_put_kernel);
}
rxrpc_call_completed(call);
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 964c4e45de11..39f5fa3501ff 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -290,7 +290,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
*/
ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
if (ret < 0)
- goto error;
+ goto error_attached_to_socket;
trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
here, NULL);
@@ -310,18 +310,29 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
error_dup_user_ID:
write_unlock(&rx->call_lock);
release_sock(&rx->sk);
- ret = -EEXIST;
-
-error:
__rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
- RX_CALL_DEAD, ret);
+ RX_CALL_DEAD, -EEXIST);
trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
- here, ERR_PTR(ret));
+ here, ERR_PTR(-EEXIST));
rxrpc_release_call(rx, call);
mutex_unlock(&call->user_mutex);
rxrpc_put_call(call, rxrpc_call_put);
- _leave(" = %d", ret);
- return ERR_PTR(ret);
+ _leave(" = -EEXIST");
+ return ERR_PTR(-EEXIST);
+
+ /* We got an error, but the call is attached to the socket and is in
+ * need of release. However, we might now race with recvmsg() when
+ * completing the call queues it. Return 0 from sys_sendmsg() and
+ * leave the error to recvmsg() to deal with.
+ */
+error_attached_to_socket:
+ trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
+ here, ERR_PTR(ret));
+ set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
+ __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
+ RX_CALL_DEAD, ret);
+ _leave(" = c=%08x [err]", call->debug_id);
+ return call;
}
/*
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 126154a97a59..04213afd7710 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -342,18 +342,18 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
return ret;
spin_lock(&conn->channel_lock);
- spin_lock(&conn->state_lock);
+ spin_lock_bh(&conn->state_lock);
if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) {
conn->state = RXRPC_CONN_SERVICE;
- spin_unlock(&conn->state_lock);
+ spin_unlock_bh(&conn->state_lock);
for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
rxrpc_call_is_secure(
rcu_dereference_protected(
conn->channels[loop].call,
lockdep_is_held(&conn->channel_lock)));
} else {
- spin_unlock(&conn->state_lock);
+ spin_unlock_bh(&conn->state_lock);
}
spin_unlock(&conn->channel_lock);
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index c4c4450891e0..2adb7c5c8966 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -215,9 +215,11 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
call->peer->cong_cwnd = call->cong_cwnd;
- spin_lock_bh(&conn->params.peer->lock);
- hlist_del_rcu(&call->error_link);
- spin_unlock_bh(&conn->params.peer->lock);
+ if (!hlist_unhashed(&call->error_link)) {
+ spin_lock_bh(&call->peer->lock);
+ hlist_del_rcu(&call->error_link);
+ spin_unlock_bh(&call->peer->lock);
+ }
if (rxrpc_is_client_call(call))
return rxrpc_disconnect_client_call(call);
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index d9beb28fc32f..40711f410828 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -446,7 +446,7 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
if (state >= RXRPC_CALL_COMPLETE)
return;
- if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) {
+ if (state == RXRPC_CALL_SERVER_RECV_REQUEST) {
unsigned long timo = READ_ONCE(call->next_req_timo);
unsigned long now, expect_req_by;
@@ -735,13 +735,12 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU),
rwind, ntohl(ackinfo->jumbo_max));
+ if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
+ rwind = RXRPC_RXTX_BUFF_SIZE - 1;
if (call->tx_winsize != rwind) {
- if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
- rwind = RXRPC_RXTX_BUFF_SIZE - 1;
if (rwind > call->tx_winsize)
wake = true;
- trace_rxrpc_rx_rwind_change(call, sp->hdr.serial,
- ntohl(ackinfo->rwind), wake);
+ trace_rxrpc_rx_rwind_change(call, sp->hdr.serial, rwind, wake);
call->tx_winsize = rwind;
}
@@ -815,6 +814,30 @@ static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks,
}
/*
+ * Return true if the ACK is valid - ie. it doesn't appear to have regressed
+ * with respect to the ack state conveyed by preceding ACKs.
+ */
+static bool rxrpc_is_ack_valid(struct rxrpc_call *call,
+ rxrpc_seq_t first_pkt, rxrpc_seq_t prev_pkt)
+{
+ rxrpc_seq_t base = READ_ONCE(call->ackr_first_seq);
+
+ if (after(first_pkt, base))
+ return true; /* The window advanced */
+
+ if (before(first_pkt, base))
+ return false; /* firstPacket regressed */
+
+ if (after_eq(prev_pkt, call->ackr_prev_seq))
+ return true; /* previousPacket hasn't regressed. */
+
+ /* Some rx implementations put a serial number in previousPacket. */
+ if (after_eq(prev_pkt, base + call->tx_winsize))
+ return false;
+ return true;
+}
+
+/*
* Process an ACK packet.
*
* ack.firstPacket is the sequence number of the first soft-ACK'd/NAK'd packet
@@ -878,9 +901,12 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
}
/* Discard any out-of-order or duplicate ACKs (outside lock). */
- if (before(first_soft_ack, call->ackr_first_seq) ||
- before(prev_pkt, call->ackr_prev_seq))
+ if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
+ trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial,
+ first_soft_ack, call->ackr_first_seq,
+ prev_pkt, call->ackr_prev_seq);
return;
+ }
buf.info.rxMTU = 0;
ioffset = offset + nr_acks + 3;
@@ -891,9 +917,12 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
spin_lock(&call->input_lock);
/* Discard any out-of-order or duplicate ACKs (inside lock). */
- if (before(first_soft_ack, call->ackr_first_seq) ||
- before(prev_pkt, call->ackr_prev_seq))
+ if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
+ trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial,
+ first_soft_ack, call->ackr_first_seq,
+ prev_pkt, call->ackr_prev_seq);
goto out;
+ }
call->acks_latest_ts = skb->tstamp;
call->acks_latest = sp->hdr.serial;
diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c
index e7f6b8823eb6..9be6b35fd9b2 100644
--- a/net/rxrpc/key.c
+++ b/net/rxrpc/key.c
@@ -35,7 +35,7 @@ static void rxrpc_free_preparse_s(struct key_preparsed_payload *);
static void rxrpc_destroy(struct key *);
static void rxrpc_destroy_s(struct key *);
static void rxrpc_describe(const struct key *, struct seq_file *);
-static long rxrpc_read(const struct key *, char __user *, size_t);
+static long rxrpc_read(const struct key *, char *, size_t);
/*
* rxrpc defined keys take an arbitrary string as the description and an
@@ -905,7 +905,7 @@ int rxrpc_request_key(struct rxrpc_sock *rx, char __user *optval, int optlen)
_enter("");
- if (optlen <= 0 || optlen > PAGE_SIZE - 1)
+ if (optlen <= 0 || optlen > PAGE_SIZE - 1 || rx->securities)
return -EINVAL;
description = memdup_user_nul(optval, optlen);
@@ -1044,12 +1044,12 @@ EXPORT_SYMBOL(rxrpc_get_null_key);
* - this returns the result in XDR form
*/
static long rxrpc_read(const struct key *key,
- char __user *buffer, size_t buflen)
+ char *buffer, size_t buflen)
{
const struct rxrpc_key_token *token;
const struct krb5_principal *princ;
size_t size;
- __be32 __user *xdr, *oldxdr;
+ __be32 *xdr, *oldxdr;
u32 cnlen, toksize, ntoks, tok, zero;
u16 toksizes[AFSTOKEN_MAX];
int loop;
@@ -1075,7 +1075,7 @@ static long rxrpc_read(const struct key *key,
switch (token->security_index) {
case RXRPC_SECURITY_RXKAD:
- toksize += 9 * 4; /* viceid, kvno, key*2 + len, begin,
+ toksize += 8 * 4; /* viceid, kvno, key*2, begin,
* end, primary, tktlen */
toksize += RND(token->kad->ticket_len);
break;
@@ -1110,8 +1110,9 @@ static long rxrpc_read(const struct key *key,
break;
default: /* we have a ticket we can't encode */
- BUG();
- continue;
+ pr_err("Unsupported key token type (%u)\n",
+ token->security_index);
+ return -ENOPKG;
}
_debug("token[%u]: toksize=%u", ntoks, toksize);
@@ -1126,30 +1127,33 @@ static long rxrpc_read(const struct key *key,
if (!buffer || buflen < size)
return size;
- xdr = (__be32 __user *) buffer;
+ xdr = (__be32 *)buffer;
zero = 0;
#define ENCODE(x) \
do { \
- __be32 y = htonl(x); \
- if (put_user(y, xdr++) < 0) \
- goto fault; \
+ *xdr++ = htonl(x); \
} while(0)
#define ENCODE_DATA(l, s) \
do { \
u32 _l = (l); \
ENCODE(l); \
- if (copy_to_user(xdr, (s), _l) != 0) \
- goto fault; \
- if (_l & 3 && \
- copy_to_user((u8 __user *)xdr + _l, &zero, 4 - (_l & 3)) != 0) \
- goto fault; \
+ memcpy(xdr, (s), _l); \
+ if (_l & 3) \
+ memcpy((u8 *)xdr + _l, &zero, 4 - (_l & 3)); \
+ xdr += (_l + 3) >> 2; \
+ } while(0)
+#define ENCODE_BYTES(l, s) \
+ do { \
+ u32 _l = (l); \
+ memcpy(xdr, (s), _l); \
+ if (_l & 3) \
+ memcpy((u8 *)xdr + _l, &zero, 4 - (_l & 3)); \
xdr += (_l + 3) >> 2; \
} while(0)
#define ENCODE64(x) \
do { \
__be64 y = cpu_to_be64(x); \
- if (copy_to_user(xdr, &y, 8) != 0) \
- goto fault; \
+ memcpy(xdr, &y, 8); \
xdr += 8 >> 2; \
} while(0)
#define ENCODE_STR(s) \
@@ -1173,7 +1177,7 @@ static long rxrpc_read(const struct key *key,
case RXRPC_SECURITY_RXKAD:
ENCODE(token->kad->vice_id);
ENCODE(token->kad->kvno);
- ENCODE_DATA(8, token->kad->session_key);
+ ENCODE_BYTES(8, token->kad->session_key);
ENCODE(token->kad->start);
ENCODE(token->kad->expiry);
ENCODE(token->kad->primary_flag);
@@ -1223,8 +1227,9 @@ static long rxrpc_read(const struct key *key,
break;
default:
- BUG();
- break;
+ pr_err("Unsupported key token type (%u)\n",
+ token->security_index);
+ return -ENOPKG;
}
ASSERTCMP((unsigned long)xdr - (unsigned long)oldxdr, ==,
@@ -1240,8 +1245,4 @@ static long rxrpc_read(const struct key *key,
ASSERTCMP((char __user *) xdr - buffer, ==, size);
_leave(" = %zu", size);
return size;
-
-fault:
- _leave(" = -EFAULT");
- return -EFAULT;
}
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 4c0087a48e87..fe190a691872 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -169,15 +169,6 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
goto error;
}
- /* we want to set the don't fragment bit */
- opt = IPV6_PMTUDISC_DO;
- ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
- (char *) &opt, sizeof(opt));
- if (ret < 0) {
- _debug("setsockopt failed");
- goto error;
- }
-
/* Fall through and set IPv4 options too otherwise we don't get
* errors from IPv4 packets sent through the IPv6 socket.
*/
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index b0aa08e3796d..da8a555ec3f6 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -480,41 +480,21 @@ send_fragmentable:
skb->tstamp = ktime_get_real();
switch (conn->params.local->srx.transport.family) {
+ case AF_INET6:
case AF_INET:
opt = IP_PMTUDISC_DONT;
- ret = kernel_setsockopt(conn->params.local->socket,
- SOL_IP, IP_MTU_DISCOVER,
- (char *)&opt, sizeof(opt));
- if (ret == 0) {
- ret = kernel_sendmsg(conn->params.local->socket, &msg,
- iov, 2, len);
- conn->params.peer->last_tx_at = ktime_get_seconds();
-
- opt = IP_PMTUDISC_DO;
- kernel_setsockopt(conn->params.local->socket, SOL_IP,
- IP_MTU_DISCOVER,
- (char *)&opt, sizeof(opt));
- }
- break;
-
-#ifdef CONFIG_AF_RXRPC_IPV6
- case AF_INET6:
- opt = IPV6_PMTUDISC_DONT;
- ret = kernel_setsockopt(conn->params.local->socket,
- SOL_IPV6, IPV6_MTU_DISCOVER,
- (char *)&opt, sizeof(opt));
- if (ret == 0) {
- ret = kernel_sendmsg(conn->params.local->socket, &msg,
- iov, 2, len);
- conn->params.peer->last_tx_at = ktime_get_seconds();
-
- opt = IPV6_PMTUDISC_DO;
- kernel_setsockopt(conn->params.local->socket,
- SOL_IPV6, IPV6_MTU_DISCOVER,
- (char *)&opt, sizeof(opt));
- }
+ kernel_setsockopt(conn->params.local->socket,
+ SOL_IP, IP_MTU_DISCOVER,
+ (char *)&opt, sizeof(opt));
+ ret = kernel_sendmsg(conn->params.local->socket, &msg,
+ iov, 2, len);
+ conn->params.peer->last_tx_at = ktime_get_seconds();
+
+ opt = IP_PMTUDISC_DO;
+ kernel_setsockopt(conn->params.local->socket,
+ SOL_IP, IP_MTU_DISCOVER,
+ (char *)&opt, sizeof(opt));
break;
-#endif
default:
BUG();
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c
index 9805e3b85c36..81a765dd8c9b 100644
--- a/net/rxrpc/proc.c
+++ b/net/rxrpc/proc.c
@@ -72,7 +72,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
"Proto Local "
" Remote "
" SvID ConnID CallID End Use State Abort "
- " UserID TxSeq TW RxSeq RW RxSerial RxTimo\n");
+ " DebugId TxSeq TW RxSeq RW RxSerial RxTimo\n");
return 0;
}
@@ -104,7 +104,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
rx_hard_ack = READ_ONCE(call->rx_hard_ack);
seq_printf(seq,
"UDP %-47.47s %-47.47s %4x %08x %08x %s %3u"
- " %-8.8s %08x %lx %08x %02x %08x %02x %08x %06lx\n",
+ " %-8.8s %08x %08x %08x %02x %08x %02x %08x %06lx\n",
lbuff,
rbuff,
call->service_id,
@@ -114,7 +114,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
atomic_read(&call->usage),
rxrpc_call_states[call->state],
call->abort_code,
- call->user_call_ID,
+ call->debug_id,
tx_hard_ack, READ_ONCE(call->tx_top) - tx_hard_ack,
rx_hard_ack, READ_ONCE(call->rx_top) - rx_hard_ack,
call->rx_serial,
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 0374b0623c8b..e4fde33b887e 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -453,7 +453,7 @@ try_again:
list_empty(&rx->recvmsg_q) &&
rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
release_sock(&rx->sk);
- return -ENODATA;
+ return -EAGAIN;
}
if (list_empty(&rx->recvmsg_q)) {
@@ -530,7 +530,7 @@ try_again:
goto error_unlock_call;
}
- if (msg->msg_name) {
+ if (msg->msg_name && call->peer) {
struct sockaddr_rxrpc *srx = msg->msg_name;
size_t len = sizeof(call->peer->srx);
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index cea16838d588..dce7bdc73de4 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -1118,7 +1118,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
ret = rxkad_decrypt_ticket(conn, skb, ticket, ticket_len, &session_key,
&expiry, _abort_code);
if (ret < 0)
- goto temporary_error_free_resp;
+ goto temporary_error_free_ticket;
/* use the session key from inside the ticket to decrypt the
* response */
@@ -1200,7 +1200,6 @@ protocol_error:
temporary_error_free_ticket:
kfree(ticket);
-temporary_error_free_resp:
kfree(response);
temporary_error:
/* Ignore the response packet if we got a temporary error such as
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index 250d3dae8af4..edd76c41765f 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -278,7 +278,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
/* this should be in poll */
sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
- if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
+ if (sk->sk_shutdown & SEND_SHUTDOWN)
return -EPIPE;
more = msg->msg_flags & MSG_MORE;
@@ -654,6 +654,9 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
if (IS_ERR(call))
return PTR_ERR(call);
/* ... and we have the call lock. */
+ ret = 0;
+ if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE)
+ goto out_put_unlock;
} else {
switch (READ_ONCE(call->state)) {
case RXRPC_CALL_UNINITIALISED:
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index f2c4bfc79663..26710b297dcb 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -900,6 +900,9 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
return ERR_PTR(-EINVAL);
}
+ if (!bind && ovr && err == ACT_P_CREATED)
+ refcount_set(&a->tcfa_refcnt, 2);
+
return a;
err_mod:
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 538dedd84e21..6485f421c091 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -46,17 +46,20 @@ static int tcf_connmark_act(struct sk_buff *skb, const struct tc_action *a,
tcf_lastuse_update(&ca->tcf_tm);
bstats_update(&ca->tcf_bstats, skb);
- if (skb->protocol == htons(ETH_P_IP)) {
+ switch (skb_protocol(skb, true)) {
+ case htons(ETH_P_IP):
if (skb->len < sizeof(struct iphdr))
goto out;
proto = NFPROTO_IPV4;
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ break;
+ case htons(ETH_P_IPV6):
if (skb->len < sizeof(struct ipv6hdr))
goto out;
proto = NFPROTO_IPV6;
- } else {
+ break;
+ default:
goto out;
}
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 9ecbf8edcf39..24ad4ceaca27 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -577,7 +577,7 @@ static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a,
goto drop;
update_flags = params->update_flags;
- protocol = tc_skb_protocol(skb);
+ protocol = skb_protocol(skb, false);
again:
switch (protocol) {
case cpu_to_be16(ETH_P_IP):
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index a80179c1075f..06648b79968e 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -51,7 +51,7 @@ static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a,
if (params->flags & SKBEDIT_F_INHERITDSFIELD) {
int wlen = skb_network_offset(skb);
- switch (tc_skb_protocol(skb)) {
+ switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
wlen += sizeof(struct iphdr);
if (!pskb_may_pull(skb, wlen))
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index e4fc6b2bc29d..f43234be5695 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -314,7 +314,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
metadata = __ipv6_tun_set_dst(&saddr, &daddr, tos, ttl, dst_port,
0, flags,
- key_id, 0);
+ key_id, opts_len);
} else {
NL_SET_ERR_MSG(extack, "Missing either ipv4 or ipv6 src and dst");
ret = -EINVAL;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index e217ebc693f8..184c20b86393 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -969,7 +969,7 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
reclassify:
#endif
for (; tp; tp = rcu_dereference_bh(tp->next)) {
- __be16 protocol = tc_skb_protocol(skb);
+ __be16 protocol = skb_protocol(skb, false);
int err;
if (tp->protocol != protocol &&
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 2bb043cd436b..55bf75cb1f16 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -84,7 +84,7 @@ static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
if (dst)
return ntohl(dst);
- return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
+ return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true);
}
static u32 flow_get_proto(const struct sk_buff *skb,
@@ -108,7 +108,7 @@ static u32 flow_get_proto_dst(const struct sk_buff *skb,
if (flow->ports.ports)
return ntohs(flow->ports.dst);
- return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
+ return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true);
}
static u32 flow_get_iif(const struct sk_buff *skb)
@@ -155,7 +155,7 @@ static u32 flow_get_nfct(const struct sk_buff *skb)
static u32 flow_get_nfct_src(const struct sk_buff *skb,
const struct flow_keys *flow)
{
- switch (tc_skb_protocol(skb)) {
+ switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
return ntohl(CTTUPLE(skb, src.u3.ip));
case htons(ETH_P_IPV6):
@@ -168,7 +168,7 @@ fallback:
static u32 flow_get_nfct_dst(const struct sk_buff *skb,
const struct flow_keys *flow)
{
- switch (tc_skb_protocol(skb)) {
+ switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
return ntohl(CTTUPLE(skb, dst.u3.ip));
case htons(ETH_P_IPV6):
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 44ca31f8538d..208436eb107c 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -203,7 +203,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
/* skb_flow_dissect() does not set n_proto in case an unknown
* protocol, so do it rather here.
*/
- skb_key.basic.n_proto = skb->protocol;
+ skb_key.basic.n_proto = skb_protocol(skb, false);
skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 0d7a0aac8dbb..e41bc5ecaa09 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -339,9 +339,13 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
if (tb[TCA_TCINDEX_MASK])
cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
- if (tb[TCA_TCINDEX_SHIFT])
+ if (tb[TCA_TCINDEX_SHIFT]) {
cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
-
+ if (cp->shift > 16) {
+ err = -EINVAL;
+ goto errout;
+ }
+ }
if (!cp->hash) {
/* Hash not specified, use perfect hash if the upper limit
* of the hashing index is below the threshold.
diff --git a/net/sched/em_ipset.c b/net/sched/em_ipset.c
index c1b23e3060b8..ef3b6b66c26a 100644
--- a/net/sched/em_ipset.c
+++ b/net/sched/em_ipset.c
@@ -62,7 +62,7 @@ static int em_ipset_match(struct sk_buff *skb, struct tcf_ematch *em,
};
int ret, network_offset;
- switch (tc_skb_protocol(skb)) {
+ switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
state.pf = NFPROTO_IPV4;
if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index d6e97115500b..e36fa9272259 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -199,7 +199,7 @@ META_COLLECTOR(int_priority)
META_COLLECTOR(int_protocol)
{
/* Let userspace take care of the byte ordering */
- dst->value = tc_skb_protocol(skb);
+ dst->value = skb_protocol(skb, false);
}
META_COLLECTOR(int_pkttype)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 39e319d04bb8..1f12be9f0207 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -398,7 +398,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
{
struct qdisc_rate_table *rtab;
- if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
+ if (tab == NULL || r->rate == 0 ||
+ r->cell_log == 0 || r->cell_log >= 32 ||
nla_len(tab) != TC_RTAB_SIZE) {
NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
return NULL;
@@ -2047,7 +2048,7 @@ static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
struct tcmsg *tcm, struct netlink_callback *cb,
- int *t_p, int s_t)
+ int *t_p, int s_t, bool recur)
{
struct Qdisc *q;
int b;
@@ -2058,7 +2059,7 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
return -1;
- if (!qdisc_dev(root))
+ if (!qdisc_dev(root) || !recur)
return 0;
if (tcm->tcm_parent) {
@@ -2093,13 +2094,13 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
s_t = cb->args[0];
t = 0;
- if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
+ if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t, true) < 0)
goto done;
dev_queue = dev_ingress_queue(dev);
if (dev_queue &&
tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
- &t, s_t) < 0)
+ &t, s_t, false) < 0)
goto done;
done:
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index cd49afca9617..91bd5c839303 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -551,16 +551,16 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt,
if (!p->link.q)
p->link.q = &noop_qdisc;
pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
+ p->link.vcc = NULL;
+ p->link.sock = NULL;
+ p->link.common.classid = sch->handle;
+ p->link.ref = 1;
err = tcf_block_get(&p->link.block, &p->link.filter_list, sch,
extack);
if (err)
return err;
- p->link.vcc = NULL;
- p->link.sock = NULL;
- p->link.common.classid = sch->handle;
- p->link.ref = 1;
tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch);
return 0;
}
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 824e3c37e5dd..32712e7dcbdc 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -589,7 +589,7 @@ static void cake_update_flowkeys(struct flow_keys *keys,
struct nf_conntrack_tuple tuple = {};
bool rev = !skb->_nfct;
- if (tc_skb_protocol(skb) != htons(ETH_P_IP))
+ if (skb_protocol(skb, true) != htons(ETH_P_IP))
return;
if (!nf_ct_get_tuple_skb(&tuple, skb))
@@ -1508,32 +1508,51 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
return idx + (tin << 16);
}
-static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
+static u8 cake_handle_diffserv(struct sk_buff *skb, bool wash)
{
- int wlen = skb_network_offset(skb);
+ const int offset = skb_network_offset(skb);
+ u16 *buf, buf_;
u8 dscp;
- switch (tc_skb_protocol(skb)) {
+ switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
- wlen += sizeof(struct iphdr);
- if (!pskb_may_pull(skb, wlen) ||
- skb_try_make_writable(skb, wlen))
+ buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_);
+ if (unlikely(!buf))
return 0;
- dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
- if (wash && dscp)
+ /* ToS is in the second byte of iphdr */
+ dscp = ipv4_get_dsfield((struct iphdr *)buf) >> 2;
+
+ if (wash && dscp) {
+ const int wlen = offset + sizeof(struct iphdr);
+
+ if (!pskb_may_pull(skb, wlen) ||
+ skb_try_make_writable(skb, wlen))
+ return 0;
+
ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
+ }
+
return dscp;
case htons(ETH_P_IPV6):
- wlen += sizeof(struct ipv6hdr);
- if (!pskb_may_pull(skb, wlen) ||
- skb_try_make_writable(skb, wlen))
+ buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_);
+ if (unlikely(!buf))
return 0;
- dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
- if (wash && dscp)
+ /* Traffic class is in the first and second bytes of ipv6hdr */
+ dscp = ipv6_get_dsfield((struct ipv6hdr *)buf) >> 2;
+
+ if (wash && dscp) {
+ const int wlen = offset + sizeof(struct ipv6hdr);
+
+ if (!pskb_may_pull(skb, wlen) ||
+ skb_try_make_writable(skb, wlen))
+ return 0;
+
ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
+ }
+
return dscp;
case htons(ETH_P_ARP):
@@ -1550,13 +1569,16 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
{
struct cake_sched_data *q = qdisc_priv(sch);
u32 tin;
+ bool wash;
u8 dscp;
/* Tin selection: Default to diffserv-based selection, allow overriding
- * using firewall marks or skb->priority.
+ * using firewall marks or skb->priority. Call DSCP parsing early if
+ * wash is enabled, otherwise defer to below to skip unneeded parsing.
*/
- dscp = cake_handle_diffserv(skb,
- q->rate_flags & CAKE_FLAG_WASH);
+ wash = !!(q->rate_flags & CAKE_FLAG_WASH);
+ if (wash)
+ dscp = cake_handle_diffserv(skb, wash);
if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT)
tin = 0;
@@ -1567,6 +1589,8 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
tin = q->tin_order[TC_H_MIN(skb->priority) - 1];
else {
+ if (!wash)
+ dscp = cake_handle_diffserv(skb, wash);
tin = q->tin_index[dscp];
if (unlikely(tin >= q->tin_cnt))
@@ -2630,7 +2654,7 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt,
qdisc_watchdog_init(&q->watchdog, sch);
if (opt) {
- int err = cake_change(sch, opt, extack);
+ err = cake_change(sch, opt, extack);
if (err)
return err;
@@ -2944,7 +2968,7 @@ static int cake_dump_class_stats(struct Qdisc *sch, unsigned long cl,
PUT_STAT_S32(BLUE_TIMER_US,
ktime_to_us(
ktime_sub(now,
- flow->cvars.blue_timer)));
+ flow->cvars.blue_timer)));
}
if (flow->cvars.dropping) {
PUT_STAT_S32(DROP_NEXT_US,
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index eafc0d17d174..8b72aed77ed4 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -327,7 +327,8 @@ static void choke_reset(struct Qdisc *sch)
sch->q.qlen = 0;
sch->qstats.backlog = 0;
- memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
+ if (q->tab)
+ memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
q->head = q->tail = 0;
red_restart(&q->vars);
}
@@ -354,6 +355,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
struct sk_buff **old = NULL;
unsigned int mask;
u32 max_P;
+ u8 *stab;
if (opt == NULL)
return -EINVAL;
@@ -369,8 +371,8 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
ctl = nla_data(tb[TCA_CHOKE_PARMS]);
-
- if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
+ stab = nla_data(tb[TCA_CHOKE_STAB]);
+ if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab))
return -EINVAL;
if (ctl->limit > CHOKE_MAX_QUEUE)
@@ -420,7 +422,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
ctl->Plog, ctl->Scell_log,
- nla_data(tb[TCA_CHOKE_STAB]),
+ stab,
max_P);
red_set_vars(&q->vars);
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 84c948c91914..fe030af9272c 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -207,7 +207,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (p->set_tc_index) {
int wlen = skb_network_offset(skb);
- switch (tc_skb_protocol(skb)) {
+ switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
wlen += sizeof(struct iphdr);
if (!pskb_may_pull(skb, wlen) ||
@@ -300,7 +300,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
index = skb->tc_index & (p->indices - 1);
pr_debug("index %d->%d\n", skb->tc_index, index);
- switch (tc_skb_protocol(skb)) {
+ switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
ipv4_change_dsfield(ip_hdr(skb), p->mv[index].mask,
p->mv[index].value);
@@ -317,7 +317,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
*/
if (p->mv[index].mask != 0xff || p->mv[index].value)
pr_warn("%s: unsupported protocol %d\n",
- __func__, ntohs(tc_skb_protocol(skb)));
+ __func__, ntohs(skb_protocol(skb, true)));
break;
}
@@ -402,7 +402,8 @@ static void dsmark_reset(struct Qdisc *sch)
struct dsmark_qdisc_data *p = qdisc_priv(sch);
pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
- qdisc_reset(p->q);
+ if (p->q)
+ qdisc_reset(p->q);
sch->qstats.backlog = 0;
sch->q.qlen = 0;
}
diff --git a/net/sched/sch_etf.c b/net/sched/sch_etf.c
index 1538d6fa8165..2278f3d420cd 100644
--- a/net/sched/sch_etf.c
+++ b/net/sched/sch_etf.c
@@ -77,7 +77,7 @@ static bool is_packet_valid(struct Qdisc *sch, struct sk_buff *nskb)
struct sock *sk = nskb->sk;
ktime_t now;
- if (!sk)
+ if (!sk || !sk_fullsock(sk))
return false;
if (!sock_flag(sk, SOCK_TXTIME))
@@ -129,8 +129,9 @@ static void report_sock_error(struct sk_buff *skb, u32 err, u8 code)
struct sock_exterr_skb *serr;
struct sk_buff *clone;
ktime_t txtime = skb->tstamp;
+ struct sock *sk = skb->sk;
- if (!skb->sk || !(skb->sk->sk_txtime_report_errors))
+ if (!sk || !sk_fullsock(sk) || !(sk->sk_txtime_report_errors))
return;
clone = skb_clone(skb, GFP_ATOMIC);
@@ -146,7 +147,7 @@ static void report_sock_error(struct sk_buff *skb, u32 err, u8 code)
serr->ee.ee_data = (txtime >> 32); /* high part of tstamp */
serr->ee.ee_info = txtime; /* low part of tstamp */
- if (sock_queue_err_skb(skb->sk, clone))
+ if (sock_queue_err_skb(sk, clone))
kfree_skb(clone);
}
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 137692cb8b4f..a862d9990be7 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -429,7 +429,7 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
- q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
+ q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 8a4d01e427a2..4e15913e7519 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -487,6 +487,7 @@ void __netdev_watchdog_up(struct net_device *dev)
dev_hold(dev);
}
}
+EXPORT_SYMBOL_GPL(__netdev_watchdog_up);
static void dev_watchdog_up(struct net_device *dev)
{
@@ -1114,29 +1115,41 @@ static void dev_deactivate_queue(struct net_device *dev,
struct netdev_queue *dev_queue,
void *_qdisc_default)
{
+ struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc);
struct Qdisc *qdisc_default = _qdisc_default;
- struct Qdisc *qdisc;
- qdisc = rtnl_dereference(dev_queue->qdisc);
if (qdisc) {
- bool nolock = qdisc->flags & TCQ_F_NOLOCK;
-
- if (nolock)
- spin_lock_bh(&qdisc->seqlock);
- spin_lock_bh(qdisc_lock(qdisc));
-
if (!(qdisc->flags & TCQ_F_BUILTIN))
set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
- qdisc_reset(qdisc);
-
- spin_unlock_bh(qdisc_lock(qdisc));
- if (nolock)
- spin_unlock_bh(&qdisc->seqlock);
}
}
+static void dev_reset_queue(struct net_device *dev,
+ struct netdev_queue *dev_queue,
+ void *_unused)
+{
+ struct Qdisc *qdisc;
+ bool nolock;
+
+ qdisc = dev_queue->qdisc_sleeping;
+ if (!qdisc)
+ return;
+
+ nolock = qdisc->flags & TCQ_F_NOLOCK;
+
+ if (nolock)
+ spin_lock_bh(&qdisc->seqlock);
+ spin_lock_bh(qdisc_lock(qdisc));
+
+ qdisc_reset(qdisc);
+
+ spin_unlock_bh(qdisc_lock(qdisc));
+ if (nolock)
+ spin_unlock_bh(&qdisc->seqlock);
+}
+
static bool some_qdisc_is_busy(struct net_device *dev)
{
unsigned int i;
@@ -1195,12 +1208,20 @@ void dev_deactivate_many(struct list_head *head)
dev_watchdog_down(dev);
}
- /* Wait for outstanding qdisc-less dev_queue_xmit calls.
+ /* Wait for outstanding qdisc-less dev_queue_xmit calls or
+ * outstanding qdisc enqueuing calls.
* This is avoided if all devices are in dismantle phase :
* Caller will call synchronize_net() for us
*/
synchronize_net();
+ list_for_each_entry(dev, head, close_list) {
+ netdev_for_each_tx_queue(dev, dev_reset_queue, NULL);
+
+ if (dev_ingress_queue(dev))
+ dev_reset_queue(dev, dev_ingress_queue(dev), NULL);
+ }
+
/* Wait for outstanding qdisc_run calls. */
list_for_each_entry(dev, head, close_list) {
while (some_qdisc_is_busy(dev))
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 4a042abf844c..db7a91a29b59 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -357,7 +357,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
struct gred_sched *table = qdisc_priv(sch);
struct gred_sched_data *q = table->tab[dp];
- if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
+ if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab))
return -EINVAL;
if (!q) {
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 014a28d8dd4f..02d8d3fd84a5 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -330,7 +330,7 @@ static s64 tabledist(s64 mu, s32 sigma,
/* default uniform distribution */
if (dist == NULL)
- return ((rnd % (2 * sigma)) + mu) - sigma;
+ return ((rnd % (2 * (u32)sigma)) + mu) - sigma;
t = dist->table[rnd % dist->size];
x = (sigma % NETEM_DIST_SCALE) * t;
@@ -787,6 +787,10 @@ static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
q->slot_config.max_packets = INT_MAX;
if (q->slot_config.max_bytes == 0)
q->slot_config.max_bytes = INT_MAX;
+
+ /* capping dist_jitter to the range acceptable by tabledist() */
+ q->slot_config.dist_jitter = min_t(__s64, INT_MAX, abs(q->slot_config.dist_jitter));
+
q->slot.packets_left = q->slot_config.max_packets;
q->slot.bytes_left = q->slot_config.max_bytes;
if (q->slot_config.min_delay | q->slot_config.max_delay |
@@ -1011,6 +1015,9 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
if (tb[TCA_NETEM_SLOT])
get_slot(q, tb[TCA_NETEM_SLOT]);
+ /* capping jitter to the range acceptable by tabledist() */
+ q->jitter = min_t(s64, abs(q->jitter), INT_MAX);
+
return ret;
get_table_failure:
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 56c181c3feeb..e42f890fa147 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -199,6 +199,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
struct Qdisc *child = NULL;
int err;
u32 max_P;
+ u8 *stab;
if (opt == NULL)
return -EINVAL;
@@ -214,7 +215,9 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
ctl = nla_data(tb[TCA_RED_PARMS]);
- if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
+ stab = nla_data(tb[TCA_RED_STAB]);
+ if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog,
+ ctl->Scell_log, stab))
return -EINVAL;
if (ctl->limit > 0) {
@@ -240,7 +243,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
red_set_parms(&q->parms,
ctl->qth_min, ctl->qth_max, ctl->Wlog,
ctl->Plog, ctl->Scell_log,
- nla_data(tb[TCA_RED_STAB]),
+ stab,
max_P);
red_set_vars(&q->vars);
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index d483d6ba59b7..1bfdf90fa0cc 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -641,8 +641,17 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
if (ctl->divisor &&
(!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
return -EINVAL;
+
+ /* slot->allot is a short, make sure quantum is not too big. */
+ if (ctl->quantum) {
+ unsigned int scaled = SFQ_ALLOT_SIZE(ctl->quantum);
+
+ if (scaled <= 0 || scaled > SHRT_MAX)
+ return -EINVAL;
+ }
+
if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
- ctl_v1->Wlog))
+ ctl_v1->Wlog, ctl_v1->Scell_log, NULL))
return -EINVAL;
if (ctl_v1 && ctl_v1->qth_min) {
p = kmalloc(sizeof(*p), GFP_KERNEL);
diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c
index 52c0b6d8f1d7..3d9de52849bc 100644
--- a/net/sched/sch_skbprio.c
+++ b/net/sched/sch_skbprio.c
@@ -173,6 +173,9 @@ static int skbprio_change(struct Qdisc *sch, struct nlattr *opt,
{
struct tc_skbprio_qopt *ctl = nla_data(opt);
+ if (opt->nla_len != nla_attr_size(sizeof(*ctl)))
+ return -EINVAL;
+
sch->limit = ctl->limit;
return 0;
}
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 93f04cf5cac1..163364124691 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -138,6 +138,9 @@ teql_destroy(struct Qdisc *sch)
struct teql_sched_data *dat = qdisc_priv(sch);
struct teql_master *master = dat->m;
+ if (!master)
+ return;
+
prev = master->slaves;
if (prev) {
do {
@@ -243,7 +246,7 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
char haddr[MAX_ADDR_LEN];
neigh_ha_snapshot(haddr, n, dev);
- err = dev_hard_header(skb, dev, ntohs(tc_skb_protocol(skb)),
+ err = dev_hard_header(skb, dev, ntohs(skb_protocol(skb, false)),
haddr, NULL, skb->len);
if (err < 0)
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index f68ccd1f4860..d17708800652 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1593,12 +1593,15 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
enum sctp_scope scope, gfp_t gfp)
{
+ struct sock *sk = asoc->base.sk;
int flags;
/* Use scoping rules to determine the subset of addresses from
* the endpoint.
*/
- flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
+ flags = (PF_INET6 == sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
+ if (!inet_v6_ipv6only(sk))
+ flags |= SCTP_ADDR4_ALLOWED;
if (asoc->peer.ipv4_address)
flags |= SCTP_ADDR4_PEERSUPP;
if (asoc->peer.ipv6_address)
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 5b537613946f..2bd8c80bd85f 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -515,6 +515,7 @@ int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp)
out_err:
/* Clean up any successful allocations */
sctp_auth_destroy_hmacs(ep->auth_hmacs);
+ ep->auth_hmacs = NULL;
return -ENOMEM;
}
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 7df3704982f5..38d01cfb313e 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -453,6 +453,7 @@ static int sctp_copy_one_addr(struct net *net, struct sctp_bind_addr *dest,
* well as the remote peer.
*/
if ((((AF_INET == addr->sa.sa_family) &&
+ (flags & SCTP_ADDR4_ALLOWED) &&
(flags & SCTP_ADDR4_PEERSUPP))) ||
(((AF_INET6 == addr->sa.sa_family) &&
(flags & SCTP_ADDR6_ALLOWED) &&
diff --git a/net/sctp/input.c b/net/sctp/input.c
index f64d882c8698..3dd900e42b85 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -461,7 +461,7 @@ void sctp_icmp_proto_unreachable(struct sock *sk,
else {
if (!mod_timer(&t->proto_unreach_timer,
jiffies + (HZ/20)))
- sctp_association_hold(asoc);
+ sctp_transport_hold(t);
}
} else {
struct net *net = sock_net(sk);
@@ -470,7 +470,7 @@ void sctp_icmp_proto_unreachable(struct sock *sk,
"encountered!\n", __func__);
if (del_timer(&t->proto_unreach_timer))
- sctp_association_put(asoc);
+ sctp_transport_put(t);
sctp_do_sm(net, SCTP_EVENT_T_OTHER,
SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 7657194f396e..4cc573924493 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -288,7 +288,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
rcu_read_unlock();
- dst = ip6_dst_lookup_flow(sk, fl6, final_p);
+ dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
if (!asoc || saddr) {
t->dst = dst;
memcpy(fl, &_fl, sizeof(_fl));
@@ -346,7 +346,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
fl6->saddr = laddr->a.v6.sin6_addr;
fl6->fl6_sport = laddr->a.v6.sin6_port;
final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
- bdst = ip6_dst_lookup_flow(sk, fl6, final_p);
+ bdst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
if (IS_ERR(bdst))
continue;
@@ -655,8 +655,8 @@ static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp)
if (!(type & IPV6_ADDR_UNICAST))
return 0;
- return sp->inet.freebind || net->ipv6.sysctl.ip_nonlocal_bind ||
- ipv6_chk_addr(net, in6, NULL, 0);
+ return ipv6_can_nonlocal_bind(net, &sp->inet) ||
+ ipv6_chk_addr(net, in6, NULL, 0);
}
/* This function checks if the address is a valid address to be used for
@@ -945,8 +945,7 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr)
net = sock_net(&opt->inet.sk);
rcu_read_lock();
dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id);
- if (!dev || !(opt->inet.freebind ||
- net->ipv6.sysctl.ip_nonlocal_bind ||
+ if (!dev || !(ipv6_can_nonlocal_bind(net, &opt->inet) ||
ipv6_chk_addr(net, &addr->v6.sin6_addr,
dev, 0))) {
rcu_read_unlock();
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 7bb8e5603298..d6e83a37a1ad 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -51,6 +51,7 @@
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
#include <net/sctp/stream_sched.h>
+#include <trace/events/sctp.h>
/* Declare internal functions here. */
static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn);
@@ -1257,6 +1258,11 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
/* Grab the association's destination address list. */
transport_list = &asoc->peer.transport_addr_list;
+ /* SCTP path tracepoint for congestion control debugging. */
+ list_for_each_entry(transport, transport_list, transports) {
+ trace_sctp_probe_path(transport, asoc);
+ }
+
sack_ctsn = ntohl(sack->cum_tsn_ack);
gap_ack_blocks = ntohs(sack->num_gap_ack_blocks);
asoc->stats.gapcnt += gap_ack_blocks;
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index a644292f9faf..84f79ac4b984 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -230,6 +230,12 @@ static void sctp_transport_seq_stop(struct seq_file *seq, void *v)
{
struct sctp_ht_iter *iter = seq->private;
+ if (v && v != SEQ_START_TOKEN) {
+ struct sctp_transport *transport = v;
+
+ sctp_transport_put(transport);
+ }
+
sctp_transport_walk_stop(&iter->hti);
}
@@ -237,6 +243,12 @@ static void *sctp_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct sctp_ht_iter *iter = seq->private;
+ if (v && v != SEQ_START_TOKEN) {
+ struct sctp_transport *transport = v;
+
+ sctp_transport_put(transport);
+ }
+
++*pos;
return sctp_transport_get_next(seq_file_net(seq), &iter->hti);
@@ -292,8 +304,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
sk->sk_rcvbuf);
seq_printf(seq, "\n");
- sctp_transport_put(transport);
-
return 0;
}
@@ -369,8 +379,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "\n");
}
- sctp_transport_put(transport);
-
return 0;
}
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 787c59d798f4..af054f38341b 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -163,7 +163,8 @@ int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp,
* sock as well as the remote peer.
*/
if (addr->a.sa.sa_family == AF_INET &&
- !(copy_flags & SCTP_ADDR4_PEERSUPP))
+ (!(copy_flags & SCTP_ADDR4_ALLOWED) ||
+ !(copy_flags & SCTP_ADDR4_PEERSUPP)))
continue;
if (addr->a.sa.sa_family == AF_INET6 &&
(!(copy_flags & SCTP_ADDR6_ALLOWED) ||
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index fb546b2d67ca..dc51e14f568e 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -871,7 +871,11 @@ struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc,
struct sctp_chunk *retval;
__u32 ctsn;
- ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
+ if (chunk && chunk->asoc)
+ ctsn = sctp_tsnmap_get_ctsn(&chunk->asoc->peer.tsn_map);
+ else
+ ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
+
shut.cum_tsn_ack = htonl(ctsn);
retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN, 0,
@@ -3144,7 +3148,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
* primary.
*/
if (af->is_any(&addr))
- memcpy(&addr.v4, sctp_source(asconf), sizeof(addr));
+ memcpy(&addr, sctp_source(asconf), sizeof(addr));
if (security_sctp_bind_connect(asoc->ep->base.sk,
SCTP_PARAM_SET_PRIMARY,
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 0234a64b3b19..2a94240eac36 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -434,7 +434,7 @@ void sctp_generate_proto_unreach_event(struct timer_list *t)
/* Try again later. */
if (!mod_timer(&transport->proto_unreach_timer,
jiffies + (HZ/20)))
- sctp_association_hold(asoc);
+ sctp_transport_hold(transport);
goto out_unlock;
}
@@ -450,7 +450,7 @@ void sctp_generate_proto_unreach_event(struct timer_list *t)
out_unlock:
bh_unlock_sock(sk);
- sctp_association_put(asoc);
+ sctp_transport_put(transport);
}
/* Handle the timeout of the RE-CONFIG timer. */
@@ -1537,9 +1537,17 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
timeout = asoc->timeouts[cmd->obj.to];
BUG_ON(!timeout);
- timer->expires = jiffies + timeout;
- sctp_association_hold(asoc);
- add_timer(timer);
+ /*
+ * SCTP has a hard time with timer starts. Because we process
+ * timer starts as side effects, it can be hard to tell if we
+ * have already started a timer or not, which leads to BUG
+ * halts when we call add_timer. So here, instead of just starting
+ * a timer, if the timer is already started, and just mod
+ * the timer with the shorter of the two expiration times
+ */
+ if (!timer_pending(timer))
+ sctp_association_hold(asoc);
+ timer_reduce(timer, jiffies + timeout);
break;
case SCTP_CMD_TIMER_RESTART:
@@ -1607,12 +1615,12 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
break;
case SCTP_CMD_INIT_FAILED:
- sctp_cmd_init_failed(commands, asoc, cmd->obj.u32);
+ sctp_cmd_init_failed(commands, asoc, cmd->obj.u16);
break;
case SCTP_CMD_ASSOC_FAILED:
sctp_cmd_assoc_failed(commands, asoc, event_type,
- subtype, chunk, cmd->obj.u32);
+ subtype, chunk, cmd->obj.u16);
break;
case SCTP_CMD_INIT_COUNTER_INC:
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 9f4d325f3a79..be5ea5e8b19e 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -1856,31 +1856,47 @@ static enum sctp_disposition sctp_sf_do_dupcook_a(
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_ASCONF_QUEUE, SCTP_NULL());
- repl = sctp_make_cookie_ack(new_asoc, chunk);
+ /* Update the content of current association. */
+ if (sctp_assoc_update((struct sctp_association *)asoc, new_asoc)) {
+ struct sctp_chunk *abort;
+
+ abort = sctp_make_abort(asoc, NULL, sizeof(struct sctp_errhdr));
+ if (abort) {
+ sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0);
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
+ }
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNABORTED));
+ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
+ SCTP_PERR(SCTP_ERROR_RSRC_LOW));
+ SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+ SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
+ goto nomem;
+ }
+
+ repl = sctp_make_cookie_ack(asoc, chunk);
if (!repl)
goto nomem;
/* Report association restart to upper layer. */
ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_RESTART, 0,
- new_asoc->c.sinit_num_ostreams,
- new_asoc->c.sinit_max_instreams,
+ asoc->c.sinit_num_ostreams,
+ asoc->c.sinit_max_instreams,
NULL, GFP_ATOMIC);
if (!ev)
goto nomem_ev;
- /* Update the content of current association. */
- sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
- if (sctp_state(asoc, SHUTDOWN_PENDING) &&
+ if ((sctp_state(asoc, SHUTDOWN_PENDING) ||
+ sctp_state(asoc, SHUTDOWN_SENT)) &&
(sctp_sstate(asoc->base.sk, CLOSING) ||
sock_flag(asoc->base.sk, SOCK_DEAD))) {
- /* if were currently in SHUTDOWN_PENDING, but the socket
- * has been closed by user, don't transition to ESTABLISHED.
- * Instead trigger SHUTDOWN bundled with COOKIE_ACK.
+ /* If the socket has been closed by user, don't
+ * transition to ESTABLISHED. Instead trigger SHUTDOWN
+ * bundled with COOKIE_ACK.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
return sctp_sf_do_9_2_start_shutdown(net, ep, asoc,
- SCTP_ST_CHUNK(0), NULL,
+ SCTP_ST_CHUNK(0), repl,
commands);
} else {
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
@@ -1932,7 +1948,8 @@ static enum sctp_disposition sctp_sf_do_dupcook_b(
sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
- SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
+ if (asoc->state < SCTP_STATE_ESTABLISHED)
+ SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
repl = sctp_make_cookie_ack(new_asoc, chunk);
@@ -5483,7 +5500,7 @@ enum sctp_disposition sctp_sf_do_9_2_start_shutdown(
* in the Cumulative TSN Ack field the last sequential TSN it
* has received from the peer.
*/
- reply = sctp_make_shutdown(asoc, NULL);
+ reply = sctp_make_shutdown(asoc, arg);
if (!reply)
goto nomem;
@@ -6081,7 +6098,7 @@ enum sctp_disposition sctp_sf_autoclose_timer_expire(
disposition = SCTP_DISPOSITION_CONSUME;
if (sctp_outq_is_empty(&asoc->outqueue)) {
disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type,
- arg, commands);
+ NULL, commands);
}
return disposition;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index c93be3ba5df2..1148f6691707 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -375,6 +375,18 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
return af;
}
+static void sctp_auto_asconf_init(struct sctp_sock *sp)
+{
+ struct net *net = sock_net(&sp->inet.sk);
+
+ if (net->sctp.default_auto_asconf) {
+ spin_lock(&net->sctp.addr_wq_lock);
+ list_add_tail(&sp->auto_asconf_list, &net->sctp.auto_asconf_splist);
+ spin_unlock(&net->sctp.addr_wq_lock);
+ sp->do_auto_asconf = 1;
+ }
+}
+
/* Bind a local address either to an endpoint or to an association. */
static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
{
@@ -437,8 +449,10 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
}
/* Refresh ephemeral port. */
- if (!bp->port)
+ if (!bp->port) {
bp->port = inet_sk(sk)->inet_num;
+ sctp_auto_asconf_init(sp);
+ }
/* Add the address to the bind address list.
* Use GFP_ATOMIC since BHs will be disabled.
@@ -1931,7 +1945,10 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
if (sctp_wspace(asoc) < (int)msg_len)
sctp_prsctp_prune(asoc, sinfo, msg_len - sctp_wspace(asoc));
- if (sctp_wspace(asoc) <= 0) {
+ if (sk_under_memory_pressure(sk))
+ sk_mem_reclaim(sk);
+
+ if (sctp_wspace(asoc) <= 0 || !sk_wmem_schedule(sk, msg_len)) {
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
if (err)
@@ -4773,19 +4790,6 @@ static int sctp_init_sock(struct sock *sk)
sk_sockets_allocated_inc(sk);
sock_prot_inuse_add(net, sk->sk_prot, 1);
- /* Nothing can fail after this block, otherwise
- * sctp_destroy_sock() will be called without addr_wq_lock held
- */
- if (net->sctp.default_auto_asconf) {
- spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
- list_add_tail(&sp->auto_asconf_list,
- &net->sctp.auto_asconf_splist);
- sp->do_auto_asconf = 1;
- spin_unlock(&sock_net(sk)->sctp.addr_wq_lock);
- } else {
- sp->do_auto_asconf = 0;
- }
-
local_bh_enable();
return 0;
@@ -7640,8 +7644,6 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
pr_debug("%s: begins, snum:%d\n", __func__, snum);
- local_bh_disable();
-
if (snum == 0) {
/* Search for an available port. */
int low, high, remaining, index;
@@ -7660,20 +7662,21 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
continue;
index = sctp_phashfn(sock_net(sk), rover);
head = &sctp_port_hashtable[index];
- spin_lock(&head->lock);
+ spin_lock_bh(&head->lock);
sctp_for_each_hentry(pp, &head->chain)
if ((pp->port == rover) &&
net_eq(sock_net(sk), pp->net))
goto next;
break;
next:
- spin_unlock(&head->lock);
+ spin_unlock_bh(&head->lock);
+ cond_resched();
} while (--remaining > 0);
/* Exhausted local port range during search? */
ret = 1;
if (remaining <= 0)
- goto fail;
+ return ret;
/* OK, here is the one we will use. HEAD (the port
* hash table list entry) is non-NULL and we hold it's
@@ -7688,7 +7691,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
* port iterator, pp being NULL.
*/
head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)];
- spin_lock(&head->lock);
+ spin_lock_bh(&head->lock);
sctp_for_each_hentry(pp, &head->chain) {
if ((pp->port == snum) && net_eq(pp->net, sock_net(sk)))
goto pp_found;
@@ -7770,10 +7773,7 @@ success:
ret = 0;
fail_unlock:
- spin_unlock(&head->lock);
-
-fail:
- local_bh_enable();
+ spin_unlock_bh(&head->lock);
return ret;
}
@@ -8515,7 +8515,10 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
goto do_error;
if (signal_pending(current))
goto do_interrupted;
- if ((int)msg_len <= sctp_wspace(asoc))
+ if (sk_under_memory_pressure(sk))
+ sk_mem_reclaim(sk);
+ if ((int)msg_len <= sctp_wspace(asoc) &&
+ sk_wmem_schedule(sk, msg_len))
break;
/* Let another process have a go. Since we are going
@@ -8846,6 +8849,8 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
sctp_bind_addr_dup(&newsp->ep->base.bind_addr,
&oldsp->ep->base.bind_addr, GFP_KERNEL);
+ sctp_auto_asconf_init(newsp);
+
/* Move any messages in the old socket's receive queue that are for the
* peeled off association to the new socket's receive queue.
*/
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 87061a4bb44b..516bc48be5bc 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -97,17 +97,11 @@ static size_t fa_index(struct flex_array *fa, void *elem, size_t count)
return index;
}
-/* Migrates chunks from stream queues to new stream queues if needed,
- * but not across associations. Also, removes those chunks to streams
- * higher than the new max.
- */
-static void sctp_stream_outq_migrate(struct sctp_stream *stream,
- struct sctp_stream *new, __u16 outcnt)
+static void sctp_stream_shrink_out(struct sctp_stream *stream, __u16 outcnt)
{
struct sctp_association *asoc;
struct sctp_chunk *ch, *temp;
struct sctp_outq *outq;
- int i;
asoc = container_of(stream, struct sctp_association, stream);
outq = &asoc->outqueue;
@@ -131,6 +125,19 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream,
sctp_chunk_free(ch);
}
+}
+
+/* Migrates chunks from stream queues to new stream queues if needed,
+ * but not across associations. Also, removes those chunks to streams
+ * higher than the new max.
+ */
+static void sctp_stream_outq_migrate(struct sctp_stream *stream,
+ struct sctp_stream *new, __u16 outcnt)
+{
+ int i;
+
+ if (stream->outcnt > outcnt)
+ sctp_stream_shrink_out(stream, outcnt);
if (new) {
/* Here we actually move the old ext stuff into the new
@@ -1136,11 +1143,13 @@ struct sctp_chunk *sctp_process_strreset_resp(
nums = ntohs(addstrm->number_of_streams);
number = stream->outcnt - nums;
- if (result == SCTP_STRRESET_PERFORMED)
+ if (result == SCTP_STRRESET_PERFORMED) {
for (i = number; i < stream->outcnt; i++)
SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
- else
+ } else {
+ sctp_stream_shrink_out(stream, number);
stream->outcnt = number;
+ }
*evp = sctp_ulpevent_make_stream_change_event(asoc, flags,
0, nums, GFP_ATOMIC);
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index c0d55ed62d2e..78302e547b2b 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -148,7 +148,7 @@ void sctp_transport_free(struct sctp_transport *transport)
/* Delete the ICMP proto unreachable timer if it's active. */
if (del_timer(&transport->proto_unreach_timer))
- sctp_association_put(transport->asoc);
+ sctp_transport_put(transport);
sctp_transport_put(transport);
}
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 26dcd02b2d0c..9aab4ab8161b 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -1644,6 +1644,9 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
struct smc_sock *smc;
int val, rc;
+ if (level == SOL_TCP && optname == TCP_ULP)
+ return -EOPNOTSUPP;
+
smc = smc_sk(sk);
/* generic setsockopts reaching us here always apply to the
@@ -1665,7 +1668,6 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
lock_sock(sk);
switch (optname) {
- case TCP_ULP:
case TCP_FASTOPEN:
case TCP_FASTOPEN_CONNECT:
case TCP_FASTOPEN_KEY:
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 2c9baf8bf118..e7a6c8dcf6b8 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -770,7 +770,7 @@ static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
return buf_desc;
}
-#define SMCD_DMBE_SIZES 7 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
+#define SMCD_DMBE_SIZES 6 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
bool is_dmb, int bufsize)
diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
index 2379a02c319d..6c4a7a5938b7 100644
--- a/net/smc/smc_diag.c
+++ b/net/smc/smc_diag.c
@@ -169,13 +169,15 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
(req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) &&
!list_empty(&smc->conn.lgr->list)) {
struct smc_connection *conn = &smc->conn;
- struct smcd_diag_dmbinfo dinfo = {
- .linkid = *((u32 *)conn->lgr->id),
- .peer_gid = conn->lgr->peer_gid,
- .my_gid = conn->lgr->smcd->local_gid,
- .token = conn->rmb_desc->token,
- .peer_token = conn->peer_token
- };
+ struct smcd_diag_dmbinfo dinfo;
+
+ memset(&dinfo, 0, sizeof(dinfo));
+
+ dinfo.linkid = *((u32 *)conn->lgr->id);
+ dinfo.peer_gid = conn->lgr->peer_gid;
+ dinfo.my_gid = conn->lgr->smcd->local_gid;
+ dinfo.token = conn->rmb_desc->token;
+ dinfo.peer_token = conn->peer_token;
if (nla_put(skb, SMC_DIAG_DMBINFO, sizeof(dinfo), &dinfo) < 0)
goto errout;
diff --git a/net/socket.c b/net/socket.c
index 1030a612423b..29169045dcfe 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -474,7 +474,7 @@ static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)
if (f.file) {
sock = sock_from_file(f.file, err);
if (likely(sock)) {
- *fput_needed = f.flags;
+ *fput_needed = f.flags & FDPUT_FPUT;
return sock;
}
fdput(f);
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index 2e0a6f92e563..7404f02702a1 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -81,11 +81,11 @@ static size_t rpc_ntop6(const struct sockaddr *sap,
rc = snprintf(scopebuf, sizeof(scopebuf), "%c%u",
IPV6_SCOPE_DELIMITER, sin6->sin6_scope_id);
- if (unlikely((size_t)rc > sizeof(scopebuf)))
+ if (unlikely((size_t)rc >= sizeof(scopebuf)))
return 0;
len += rc;
- if (unlikely(len > buflen))
+ if (unlikely(len >= buflen))
return 0;
strcat(buf, scopebuf);
@@ -184,7 +184,7 @@ static int rpc_parse_scope_id(struct net *net, const char *buf,
scope_id = dev->ifindex;
dev_put(dev);
} else {
- if (kstrtou32(p, 10, &scope_id) == 0) {
+ if (kstrtou32(p, 10, &scope_id) != 0) {
kfree(p);
return 0;
}
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 8cb7d812ccb8..e61c48c1b37d 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -53,6 +53,7 @@
#include <linux/uaccess.h>
#include <linux/hashtable.h>
+#include "auth_gss_internal.h"
#include "../netns.h"
static const struct rpc_authops authgss_ops;
@@ -147,35 +148,6 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
}
-static const void *
-simple_get_bytes(const void *p, const void *end, void *res, size_t len)
-{
- const void *q = (const void *)((const char *)p + len);
- if (unlikely(q > end || q < p))
- return ERR_PTR(-EFAULT);
- memcpy(res, p, len);
- return q;
-}
-
-static inline const void *
-simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
-{
- const void *q;
- unsigned int len;
-
- p = simple_get_bytes(p, end, &len, sizeof(len));
- if (IS_ERR(p))
- return p;
- q = (const void *)((const char *)p + len);
- if (unlikely(q > end || q < p))
- return ERR_PTR(-EFAULT);
- dest->data = kmemdup(p, len, GFP_NOFS);
- if (unlikely(dest->data == NULL))
- return ERR_PTR(-ENOMEM);
- dest->len = len;
- return q;
-}
-
static struct gss_cl_ctx *
gss_cred_get_ctx(struct rpc_cred *cred)
{
diff --git a/net/sunrpc/auth_gss/auth_gss_internal.h b/net/sunrpc/auth_gss/auth_gss_internal.h
new file mode 100644
index 000000000000..f6d9631bd9d0
--- /dev/null
+++ b/net/sunrpc/auth_gss/auth_gss_internal.h
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * linux/net/sunrpc/auth_gss/auth_gss_internal.h
+ *
+ * Internal definitions for RPCSEC_GSS client authentication
+ *
+ * Copyright (c) 2000 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ */
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/sunrpc/xdr.h>
+
+static inline const void *
+simple_get_bytes(const void *p, const void *end, void *res, size_t len)
+{
+ const void *q = (const void *)((const char *)p + len);
+ if (unlikely(q > end || q < p))
+ return ERR_PTR(-EFAULT);
+ memcpy(res, p, len);
+ return q;
+}
+
+static inline const void *
+simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
+{
+ const void *q;
+ unsigned int len;
+
+ p = simple_get_bytes(p, end, &len, sizeof(len));
+ if (IS_ERR(p))
+ return p;
+ q = (const void *)((const char *)p + len);
+ if (unlikely(q > end || q < p))
+ return ERR_PTR(-EFAULT);
+ if (len) {
+ dest->data = kmemdup(p, len, GFP_NOFS);
+ if (unlikely(dest->data == NULL))
+ return ERR_PTR(-ENOMEM);
+ } else
+ dest->data = NULL;
+ dest->len = len;
+ return q;
+}
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 7bb2514aadd9..14f2823ad6c2 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -46,6 +46,8 @@
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/gss_krb5_enctypes.h>
+#include "auth_gss_internal.h"
+
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
@@ -187,35 +189,6 @@ get_gss_krb5_enctype(int etype)
return NULL;
}
-static const void *
-simple_get_bytes(const void *p, const void *end, void *res, int len)
-{
- const void *q = (const void *)((const char *)p + len);
- if (unlikely(q > end || q < p))
- return ERR_PTR(-EFAULT);
- memcpy(res, p, len);
- return q;
-}
-
-static const void *
-simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
-{
- const void *q;
- unsigned int len;
-
- p = simple_get_bytes(p, end, &len, sizeof(len));
- if (IS_ERR(p))
- return p;
- q = (const void *)((const char *)p + len);
- if (unlikely(q > end || q < p))
- return ERR_PTR(-EFAULT);
- res->data = kmemdup(p, len, GFP_NOFS);
- if (unlikely(res->data == NULL))
- return ERR_PTR(-ENOMEM);
- res->len = len;
- return q;
-}
-
static inline const void *
get_key(const void *p, const void *end,
struct krb5_ctx *ctx, struct crypto_skcipher **res)
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index 5fec3abbe19b..c7d88f979c56 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -61,6 +61,8 @@ gss_mech_free(struct gss_api_mech *gm)
for (i = 0; i < gm->gm_pf_num; i++) {
pf = &gm->gm_pfs[i];
+ if (pf->domain)
+ auth_domain_put(pf->domain);
kfree(pf->auth_domain_name);
pf->auth_domain_name = NULL;
}
@@ -83,6 +85,7 @@ make_auth_domain_name(char *name)
static int
gss_mech_svc_setup(struct gss_api_mech *gm)
{
+ struct auth_domain *dom;
struct pf_desc *pf;
int i, status;
@@ -92,10 +95,13 @@ gss_mech_svc_setup(struct gss_api_mech *gm)
status = -ENOMEM;
if (pf->auth_domain_name == NULL)
goto out;
- status = svcauth_gss_register_pseudoflavor(pf->pseudoflavor,
- pf->auth_domain_name);
- if (status)
+ dom = svcauth_gss_register_pseudoflavor(
+ pf->pseudoflavor, pf->auth_domain_name);
+ if (IS_ERR(dom)) {
+ status = PTR_ERR(dom);
goto out;
+ }
+ pf->domain = dom;
}
return 0;
out:
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 68830e88b6e9..a85d78d2bdb7 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -779,7 +779,7 @@ u32 svcauth_gss_flavor(struct auth_domain *dom)
EXPORT_SYMBOL_GPL(svcauth_gss_flavor);
-int
+struct auth_domain *
svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
{
struct gss_domain *new;
@@ -796,21 +796,23 @@ svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
new->h.flavour = &svcauthops_gss;
new->pseudoflavor = pseudoflavor;
- stat = 0;
test = auth_domain_lookup(name, &new->h);
- if (test != &new->h) { /* Duplicate registration */
+ if (test != &new->h) {
+ pr_warn("svc: duplicate registration of gss pseudo flavour %s.\n",
+ name);
+ stat = -EADDRINUSE;
auth_domain_put(test);
- kfree(new->h.name);
- goto out_free_dom;
+ goto out_free_name;
}
- return 0;
+ return test;
+out_free_name:
+ kfree(new->h.name);
out_free_dom:
kfree(new);
out:
- return stat;
+ return ERR_PTR(stat);
}
-
EXPORT_SYMBOL_GPL(svcauth_gss_register_pseudoflavor);
static inline int
@@ -1077,9 +1079,9 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp,
struct gssp_in_token *in_token)
{
struct kvec *argv = &rqstp->rq_arg.head[0];
- unsigned int page_base, length;
- int pages, i, res;
- size_t inlen;
+ unsigned int length, pgto_offs, pgfrom_offs;
+ int pages, i, res, pgto, pgfrom;
+ size_t inlen, to_offs, from_offs;
res = gss_read_common_verf(gc, argv, authp, in_handle);
if (res)
@@ -1107,17 +1109,24 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp,
memcpy(page_address(in_token->pages[0]), argv->iov_base, length);
inlen -= length;
- i = 1;
- page_base = rqstp->rq_arg.page_base;
+ to_offs = length;
+ from_offs = rqstp->rq_arg.page_base;
while (inlen) {
- length = min_t(unsigned int, inlen, PAGE_SIZE);
- memcpy(page_address(in_token->pages[i]),
- page_address(rqstp->rq_arg.pages[i]) + page_base,
+ pgto = to_offs >> PAGE_SHIFT;
+ pgfrom = from_offs >> PAGE_SHIFT;
+ pgto_offs = to_offs & ~PAGE_MASK;
+ pgfrom_offs = from_offs & ~PAGE_MASK;
+
+ length = min_t(unsigned int, inlen,
+ min_t(unsigned int, PAGE_SIZE - pgto_offs,
+ PAGE_SIZE - pgfrom_offs));
+ memcpy(page_address(in_token->pages[pgto]) + pgto_offs,
+ page_address(rqstp->rq_arg.pages[pgfrom]) + pgfrom_offs,
length);
+ to_offs += length;
+ from_offs += length;
inlen -= length;
- page_base = 0;
- i++;
}
return 0;
}
@@ -1757,11 +1766,14 @@ static int
svcauth_gss_release(struct svc_rqst *rqstp)
{
struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data;
- struct rpc_gss_wire_cred *gc = &gsd->clcred;
+ struct rpc_gss_wire_cred *gc;
struct xdr_buf *resbuf = &rqstp->rq_res;
int stat = -EINVAL;
struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
+ if (!gsd)
+ goto out;
+ gc = &gsd->clcred;
if (gc->gc_proc != RPC_GSS_PROC_DATA)
goto out;
/* Release can be called twice, but we only wrap once. */
@@ -1802,10 +1814,10 @@ out_err:
if (rqstp->rq_cred.cr_group_info)
put_group_info(rqstp->rq_cred.cr_group_info);
rqstp->rq_cred.cr_group_info = NULL;
- if (gsd->rsci)
+ if (gsd && gsd->rsci) {
cache_put(&gsd->rsci->h, sn->rsc_cache);
- gsd->rsci = NULL;
-
+ gsd->rsci = NULL;
+ }
return stat;
}
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 4fda18d47e2c..285eab5b43c8 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -1331,6 +1331,7 @@ rpc_gssd_dummy_populate(struct dentry *root, struct rpc_pipe *pipe_data)
q.len = strlen(gssd_dummy_clnt_dir[0].name);
clnt_dentry = d_hash_and_lookup(gssd_dentry, &q);
if (!clnt_dentry) {
+ __rpc_depopulate(gssd_dentry, gssd_dummy_clnt_dir, 0, 1);
pipe_dentry = ERR_PTR(-ENOENT);
goto out;
}
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 08b5fa4a2852..ba8f36731228 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -981,8 +981,8 @@ static int rpcb_dec_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
p = xdr_inline_decode(xdr, len);
if (unlikely(p == NULL))
goto out_fail;
- dprintk("RPC: %5u RPCB_%s reply: %s\n", req->rq_task->tk_pid,
- req->rq_task->tk_msg.rpc_proc->p_name, (char *)p);
+ dprintk("RPC: %5u RPCB_%s reply: %*pE\n", req->rq_task->tk_pid,
+ req->rq_task->tk_msg.rpc_proc->p_name, len, (char *)p);
if (rpc_uaddr2sockaddr(req->rq_xprt->xprt_net, (char *)p, len,
sap, sizeof(address)) == 0)
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index d65f8d35de87..11d393c04772 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1330,7 +1330,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
sendit:
if (svc_authorise(rqstp))
- goto close;
+ goto close_xprt;
return 1; /* Caller can now send it */
dropit:
@@ -1339,6 +1339,8 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
return 0;
close:
+ svc_authorise(rqstp);
+close_xprt:
if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
svc_close_xprt(rqstp->rq_xprt);
dprintk("svc: svc_process close\n");
@@ -1347,7 +1349,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
err_short_len:
svc_printk(rqstp, "short len %zd, dropping request\n",
argv->iov_len);
- goto close;
+ goto close_xprt;
err_bad_rpc:
serv->sv_stats->rpcbadfmt++;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 6cf0fd37cbf0..4b56b949a463 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -103,8 +103,17 @@ void svc_unreg_xprt_class(struct svc_xprt_class *xcl)
}
EXPORT_SYMBOL_GPL(svc_unreg_xprt_class);
-/*
- * Format the transport list for printing
+/**
+ * svc_print_xprts - Format the transport list for printing
+ * @buf: target buffer for formatted address
+ * @maxlen: length of target buffer
+ *
+ * Fills in @buf with a string containing a list of transport names, each name
+ * terminated with '\n'. If the buffer is too small, some entries may be
+ * missing, but it is guaranteed that all lines in the output buffer are
+ * complete.
+ *
+ * Returns positive length of the filled-in string.
*/
int svc_print_xprts(char *buf, int maxlen)
{
@@ -117,9 +126,9 @@ int svc_print_xprts(char *buf, int maxlen)
list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
int slen;
- sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload);
- slen = strlen(tmpstr);
- if (len + slen > maxlen)
+ slen = snprintf(tmpstr, sizeof(tmpstr), "%s %d\n",
+ xcl->xcl_name, xcl->xcl_max_payload);
+ if (slen >= sizeof(tmpstr) || len + slen >= maxlen)
break;
len += slen;
strcat(buf, tmpstr);
@@ -878,9 +887,6 @@ int svc_send(struct svc_rqst *rqstp)
if (!xprt)
goto out;
- /* release the receive skb before sending the reply */
- xprt->xpt_ops->xpo_release_rqst(rqstp);
-
/* calculate over-all length */
xb = &rqstp->rq_res;
xb->len = xb->head[0].iov_len +
@@ -1046,7 +1052,7 @@ static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, st
struct svc_xprt *xprt;
int ret = 0;
- spin_lock(&serv->sv_lock);
+ spin_lock_bh(&serv->sv_lock);
list_for_each_entry(xprt, xprt_list, xpt_list) {
if (xprt->xpt_net != net)
continue;
@@ -1054,7 +1060,7 @@ static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, st
set_bit(XPT_CLOSE, &xprt->xpt_flags);
svc_xprt_enqueue(xprt);
}
- spin_unlock(&serv->sv_lock);
+ spin_unlock_bh(&serv->sv_lock);
return ret;
}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 8566531c2f10..d0b5a1c47a32 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -636,6 +636,8 @@ svc_udp_sendto(struct svc_rqst *rqstp)
{
int error;
+ svc_release_udp_skb(rqstp);
+
error = svc_sendto(rqstp, &rqstp->rq_res);
if (error == -ECONNREFUSED)
/* ICMP error on earlier request. */
@@ -1173,6 +1175,8 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
int sent;
__be32 reclen;
+ svc_release_skb(rqstp);
+
/* Set up the first element of the reply kvec.
* Any other kvecs that may be in use have been taken
* care of by the server implementation itself.
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index ac16f509c95c..540e340e2565 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -1036,6 +1036,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
base = 0;
} else {
base -= buf->head[0].iov_len;
+ subbuf->head[0].iov_base = buf->head[0].iov_base;
subbuf->head[0].iov_len = 0;
}
@@ -1048,6 +1049,8 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
base = 0;
} else {
base -= buf->page_len;
+ subbuf->pages = buf->pages;
+ subbuf->page_base = 0;
subbuf->page_len = 0;
}
@@ -1059,6 +1062,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
base = 0;
} else {
base -= buf->tail[0].iov_len;
+ subbuf->tail[0].iov_base = buf->tail[0].iov_base;
subbuf->tail[0].iov_len = 0;
}
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 5e7c13aa66d0..9c4235ce5789 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -143,31 +143,64 @@ out:
}
EXPORT_SYMBOL_GPL(xprt_unregister_transport);
+static void
+xprt_class_release(const struct xprt_class *t)
+{
+ module_put(t->owner);
+}
+
+static const struct xprt_class *
+xprt_class_find_by_netid_locked(const char *netid)
+{
+ const struct xprt_class *t;
+ unsigned int i;
+
+ list_for_each_entry(t, &xprt_list, list) {
+ for (i = 0; t->netid[i][0] != '\0'; i++) {
+ if (strcmp(t->netid[i], netid) != 0)
+ continue;
+ if (!try_module_get(t->owner))
+ continue;
+ return t;
+ }
+ }
+ return NULL;
+}
+
+static const struct xprt_class *
+xprt_class_find_by_netid(const char *netid)
+{
+ const struct xprt_class *t;
+
+ spin_lock(&xprt_list_lock);
+ t = xprt_class_find_by_netid_locked(netid);
+ if (!t) {
+ spin_unlock(&xprt_list_lock);
+ request_module("rpc%s", netid);
+ spin_lock(&xprt_list_lock);
+ t = xprt_class_find_by_netid_locked(netid);
+ }
+ spin_unlock(&xprt_list_lock);
+ return t;
+}
+
/**
* xprt_load_transport - load a transport implementation
- * @transport_name: transport to load
+ * @netid: transport to load
*
* Returns:
* 0: transport successfully loaded
* -ENOENT: transport module not available
*/
-int xprt_load_transport(const char *transport_name)
+int xprt_load_transport(const char *netid)
{
- struct xprt_class *t;
- int result;
+ const struct xprt_class *t;
- result = 0;
- spin_lock(&xprt_list_lock);
- list_for_each_entry(t, &xprt_list, list) {
- if (strcmp(t->name, transport_name) == 0) {
- spin_unlock(&xprt_list_lock);
- goto out;
- }
- }
- spin_unlock(&xprt_list_lock);
- result = request_module("xprt%s", transport_name);
-out:
- return result;
+ t = xprt_class_find_by_netid(netid);
+ if (!t)
+ return -ENOENT;
+ xprt_class_release(t);
+ return 0;
}
EXPORT_SYMBOL_GPL(xprt_load_transport);
diff --git a/net/sunrpc/xprtrdma/module.c b/net/sunrpc/xprtrdma/module.c
index 620327c01302..45c5b41ac8dc 100644
--- a/net/sunrpc/xprtrdma/module.c
+++ b/net/sunrpc/xprtrdma/module.c
@@ -24,6 +24,7 @@ MODULE_DESCRIPTION("RPC/RDMA Transport");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("svcrdma");
MODULE_ALIAS("xprtrdma");
+MODULE_ALIAS("rpcrdma6");
static void __exit rpc_rdma_cleanup(void)
{
diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
index b9827665ff35..cf2272a90f13 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
@@ -256,6 +256,7 @@ xprt_rdma_bc_put(struct rpc_xprt *xprt)
{
dprintk("svcrdma: %s: xprt %p\n", __func__, xprt);
+ xprt_rdma_free_addresses(xprt);
xprt_free(xprt);
module_put(THIS_MODULE);
}
@@ -307,9 +308,9 @@ xprt_setup_rdma_bc(struct xprt_create *args)
xprt->timeout = &xprt_rdma_bc_timeout;
xprt_set_bound(xprt);
xprt_set_connected(xprt);
- xprt->bind_timeout = RPCRDMA_BIND_TO;
- xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
- xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO;
+ xprt->bind_timeout = 0;
+ xprt->reestablish_timeout = 0;
+ xprt->idle_timeout = 0;
xprt->prot = XPRT_TRANSPORT_BC_RDMA;
xprt->tsh_size = 0;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index b24d5b8f2fee..252495ff9010 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -226,6 +226,26 @@ void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
svc_rdma_recv_ctxt_destroy(rdma, ctxt);
}
+/**
+ * svc_rdma_release_rqst - Release transport-specific per-rqst resources
+ * @rqstp: svc_rqst being released
+ *
+ * Ensure that the recv_ctxt is released whether or not a Reply
+ * was sent. For example, the client could close the connection,
+ * or svc_process could drop an RPC, before the Reply is sent.
+ */
+void svc_rdma_release_rqst(struct svc_rqst *rqstp)
+{
+ struct svc_rdma_recv_ctxt *ctxt = rqstp->rq_xprt_ctxt;
+ struct svc_xprt *xprt = rqstp->rq_xprt;
+ struct svcxprt_rdma *rdma =
+ container_of(xprt, struct svcxprt_rdma, sc_xprt);
+
+ rqstp->rq_xprt_ctxt = NULL;
+ if (ctxt)
+ svc_rdma_recv_ctxt_put(rdma, ctxt);
+}
+
static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma,
struct svc_rdma_recv_ctxt *ctxt)
{
@@ -248,6 +268,8 @@ static int svc_rdma_post_recv(struct svcxprt_rdma *rdma)
{
struct svc_rdma_recv_ctxt *ctxt;
+ if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
+ return 0;
ctxt = svc_rdma_recv_ctxt_get(rdma);
if (!ctxt)
return -ENOMEM;
@@ -704,6 +726,8 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
__be32 *p;
int ret;
+ rqstp->rq_xprt_ctxt = NULL;
+
spin_lock(&rdma_xprt->sc_rq_dto_lock);
ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_read_complete_q);
if (ctxt) {
diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c
index dc1951759a8e..22f135263815 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
@@ -331,8 +331,6 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
if (atomic_sub_return(cc->cc_sqecount,
&rdma->sc_sq_avail) > 0) {
ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
- trace_svcrdma_post_rw(&cc->cc_cqe,
- cc->cc_sqecount, ret);
if (ret)
break;
return 0;
@@ -345,6 +343,7 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
trace_svcrdma_sq_retry(rdma);
} while (1);
+ trace_svcrdma_sq_post_err(rdma, ret);
set_bit(XPT_CLOSE, &xprt->xpt_flags);
/* If even one was posted, there will be a completion. */
@@ -680,7 +679,6 @@ static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
struct svc_rdma_read_info *info,
__be32 *p)
{
- unsigned int i;
int ret;
ret = -EINVAL;
@@ -703,12 +701,6 @@ static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
info->ri_chunklen += rs_length;
}
- /* Pages under I/O have been copied to head->rc_pages.
- * Prevent their premature release by svc_xprt_release() .
- */
- for (i = 0; i < info->ri_readctxt->rc_page_count; i++)
- rqstp->rq_pages[i] = NULL;
-
return ret;
}
@@ -803,6 +795,26 @@ out:
return ret;
}
+/* Pages under I/O have been copied to head->rc_pages. Ensure they
+ * are not released by svc_xprt_release() until the I/O is complete.
+ *
+ * This has to be done after all Read WRs are constructed to properly
+ * handle a page that is part of I/O on behalf of two different RDMA
+ * segments.
+ *
+ * Do this only if I/O has been posted. Otherwise, we do indeed want
+ * svc_xprt_release() to clean things up properly.
+ */
+static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
+ const unsigned int start,
+ const unsigned int num_pages)
+{
+ unsigned int i;
+
+ for (i = start; i < num_pages + start; i++)
+ rqstp->rq_pages[i] = NULL;
+}
+
/**
* svc_rdma_recv_read_chunk - Pull a Read chunk from the client
* @rdma: controlling RDMA transport
@@ -856,6 +868,7 @@ int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp,
ret = svc_rdma_post_chunk_ctxt(&info->ri_cc);
if (ret < 0)
goto out_err;
+ svc_rdma_save_io_pages(rqstp, 0, head->rc_page_count);
return 0;
out_err:
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index e8ad7ddf347a..4062cd624b26 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -310,15 +310,17 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr)
}
svc_xprt_get(&rdma->sc_xprt);
+ trace_svcrdma_post_send(wr);
ret = ib_post_send(rdma->sc_qp, wr, NULL);
- trace_svcrdma_post_send(wr, ret);
- if (ret) {
- set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
- svc_xprt_put(&rdma->sc_xprt);
- wake_up(&rdma->sc_send_wait);
- }
- break;
+ if (ret)
+ break;
+ return 0;
}
+
+ trace_svcrdma_sq_post_err(rdma, ret);
+ set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
+ svc_xprt_put(&rdma->sc_xprt);
+ wake_up(&rdma->sc_send_wait);
return ret;
}
@@ -637,10 +639,11 @@ static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
while (remaining) {
len = min_t(u32, PAGE_SIZE - pageoff, remaining);
- memcpy(dst, page_address(*ppages), len);
+ memcpy(dst, page_address(*ppages) + pageoff, len);
remaining -= len;
dst += len;
pageoff = 0;
+ ppages++;
}
}
@@ -906,12 +909,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
wr_lst, rp_ch);
if (ret < 0)
goto err1;
- ret = 0;
-
-out:
- rqstp->rq_xprt_ctxt = NULL;
- svc_rdma_recv_ctxt_put(rdma, rctxt);
- return ret;
+ return 0;
err2:
if (ret != -E2BIG && ret != -EINVAL)
@@ -920,14 +918,12 @@ out:
ret = svc_rdma_send_error_msg(rdma, sctxt, rqstp);
if (ret < 0)
goto err1;
- ret = 0;
- goto out;
+ return 0;
err1:
svc_rdma_send_ctxt_put(rdma, sctxt);
err0:
trace_svcrdma_send_failed(rqstp, ret);
set_bit(XPT_CLOSE, &xprt->xpt_flags);
- ret = -ENOTCONN;
- goto out;
+ return -ENOTCONN;
}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 7308992b7a18..f1824303e076 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -71,7 +71,6 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
struct sockaddr *sa, int salen,
int flags);
static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
-static void svc_rdma_release_rqst(struct svc_rqst *);
static void svc_rdma_detach(struct svc_xprt *xprt);
static void svc_rdma_free(struct svc_xprt *xprt);
static int svc_rdma_has_wspace(struct svc_xprt *xprt);
@@ -616,10 +615,6 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
return NULL;
}
-static void svc_rdma_release_rqst(struct svc_rqst *rqstp)
-{
-}
-
/*
* When connected, an svc_xprt has at least two references:
*
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index f56f36b4d742..fdd14908eacb 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -854,6 +854,7 @@ static struct xprt_class xprt_rdma = {
.owner = THIS_MODULE,
.ident = XPRT_TRANSPORT_RDMA,
.setup = xprt_setup_rdma,
+ .netid = { "rdma", "rdma6", "" },
};
void xprt_rdma_cleanup(void)
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 9dc059dea689..798fbd89ed42 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -3241,6 +3241,7 @@ static struct xprt_class xs_local_transport = {
.owner = THIS_MODULE,
.ident = XPRT_TRANSPORT_LOCAL,
.setup = xs_setup_local,
+ .netid = { "" },
};
static struct xprt_class xs_udp_transport = {
@@ -3249,6 +3250,7 @@ static struct xprt_class xs_udp_transport = {
.owner = THIS_MODULE,
.ident = XPRT_TRANSPORT_UDP,
.setup = xs_setup_udp,
+ .netid = { "udp", "udp6", "" },
};
static struct xprt_class xs_tcp_transport = {
@@ -3257,6 +3259,7 @@ static struct xprt_class xs_tcp_transport = {
.owner = THIS_MODULE,
.ident = XPRT_TRANSPORT_TCP,
.setup = xs_setup_tcp,
+ .netid = { "tcp", "tcp6", "" },
};
static struct xprt_class xs_bc_tcp_transport = {
@@ -3265,6 +3268,7 @@ static struct xprt_class xs_bc_tcp_transport = {
.owner = THIS_MODULE,
.ident = XPRT_TRANSPORT_BC_TCP,
.setup = xs_setup_bc_tcp,
+ .netid = { "" },
};
/**
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index d8026543bf4c..68107bf92c7e 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -181,7 +181,7 @@ static void tipc_bcbase_xmit(struct net *net, struct sk_buff_head *xmitq)
}
/* We have to transmit across all bearers */
- skb_queue_head_init(&_xmitq);
+ __skb_queue_head_init(&_xmitq);
for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
if (!bb->dests[bearer_id])
continue;
@@ -237,7 +237,7 @@ static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts,
struct sk_buff_head xmitq;
int rc = 0;
- skb_queue_head_init(&xmitq);
+ __skb_queue_head_init(&xmitq);
tipc_bcast_lock(net);
if (tipc_link_bc_peers(l))
rc = tipc_link_xmit(l, pkts, &xmitq);
@@ -267,7 +267,7 @@ static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts,
u32 dnode, selector;
selector = msg_link_selector(buf_msg(skb_peek(pkts)));
- skb_queue_head_init(&_pkts);
+ __skb_queue_head_init(&_pkts);
list_for_each_entry_safe(dst, tmp, &dests->list, list) {
dnode = dst->node;
@@ -299,7 +299,7 @@ int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts,
int rc = 0;
skb_queue_head_init(&inputq);
- skb_queue_head_init(&localq);
+ __skb_queue_head_init(&localq);
/* Clone packets before they are consumed by next call */
if (dests->local && !tipc_msg_reassemble(pkts, &localq)) {
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 49bb9fc0aa06..ce0f067d0faf 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -93,6 +93,11 @@ out_sk_rht:
static void __net_exit tipc_exit_net(struct net *net)
{
tipc_net_stop(net);
+
+ /* Make sure the tipc_net_finalize_work stopped
+ * before releasing the resources.
+ */
+ flush_scheduled_work();
tipc_bcast_stop(net);
tipc_nametbl_stop(net);
tipc_sk_rht_destroy(net);
diff --git a/net/tipc/group.c b/net/tipc/group.c
index 3ee93b5c19b6..b656385efad6 100644
--- a/net/tipc/group.c
+++ b/net/tipc/group.c
@@ -199,7 +199,7 @@ void tipc_group_join(struct net *net, struct tipc_group *grp, int *sk_rcvbuf)
struct tipc_member *m, *tmp;
struct sk_buff_head xmitq;
- skb_queue_head_init(&xmitq);
+ __skb_queue_head_init(&xmitq);
rbtree_postorder_for_each_entry_safe(m, tmp, tree, tree_node) {
tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, &xmitq);
tipc_group_update_member(m, 0);
@@ -273,8 +273,8 @@ static struct tipc_member *tipc_group_find_node(struct tipc_group *grp,
return NULL;
}
-static void tipc_group_add_to_tree(struct tipc_group *grp,
- struct tipc_member *m)
+static int tipc_group_add_to_tree(struct tipc_group *grp,
+ struct tipc_member *m)
{
u64 nkey, key = (u64)m->node << 32 | m->port;
struct rb_node **n, *parent = NULL;
@@ -291,10 +291,11 @@ static void tipc_group_add_to_tree(struct tipc_group *grp,
else if (key > nkey)
n = &(*n)->rb_right;
else
- return;
+ return -EEXIST;
}
rb_link_node(&m->tree_node, parent, n);
rb_insert_color(&m->tree_node, &grp->members);
+ return 0;
}
static struct tipc_member *tipc_group_create_member(struct tipc_group *grp,
@@ -302,6 +303,7 @@ static struct tipc_member *tipc_group_create_member(struct tipc_group *grp,
u32 instance, int state)
{
struct tipc_member *m;
+ int ret;
m = kzalloc(sizeof(*m), GFP_ATOMIC);
if (!m)
@@ -314,8 +316,12 @@ static struct tipc_member *tipc_group_create_member(struct tipc_group *grp,
m->port = port;
m->instance = instance;
m->bc_acked = grp->bc_snd_nxt - 1;
+ ret = tipc_group_add_to_tree(grp, m);
+ if (ret < 0) {
+ kfree(m);
+ return NULL;
+ }
grp->member_cnt++;
- tipc_group_add_to_tree(grp, m);
tipc_nlist_add(&grp->dests, m->node);
m->state = state;
return m;
@@ -435,7 +441,7 @@ bool tipc_group_cong(struct tipc_group *grp, u32 dnode, u32 dport,
return true;
if (state == MBR_PENDING && adv == ADV_IDLE)
return true;
- skb_queue_head_init(&xmitq);
+ __skb_queue_head_init(&xmitq);
tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, &xmitq);
tipc_node_distr_xmit(grp->net, &xmitq);
return true;
diff --git a/net/tipc/link.c b/net/tipc/link.c
index cc9a0485536b..bd28ac7f2195 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -914,9 +914,7 @@ void tipc_link_reset(struct tipc_link *l)
int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
struct sk_buff_head *xmitq)
{
- struct tipc_msg *hdr = buf_msg(skb_peek(list));
unsigned int maxwin = l->window;
- int imp = msg_importance(hdr);
unsigned int mtu = l->mtu;
u16 ack = l->rcv_nxt - 1;
u16 seqno = l->snd_nxt;
@@ -925,13 +923,20 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
struct sk_buff_head *backlogq = &l->backlogq;
struct sk_buff *skb, *_skb, **tskb;
int pkt_cnt = skb_queue_len(list);
+ struct tipc_msg *hdr;
int rc = 0;
+ int imp;
+
+ if (pkt_cnt <= 0)
+ return 0;
+ hdr = buf_msg(skb_peek(list));
if (unlikely(msg_size(hdr) > mtu)) {
- skb_queue_purge(list);
+ __skb_queue_purge(list);
return -EMSGSIZE;
}
+ imp = msg_importance(hdr);
/* Allow oversubscription of one data msg per source at congestion */
if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
if (imp == TIPC_SYSTEM_IMPORTANCE) {
@@ -957,7 +962,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
if (likely(skb_queue_len(transmq) < maxwin)) {
_skb = skb_clone(skb, GFP_ATOMIC);
if (!_skb) {
- skb_queue_purge(list);
+ __skb_queue_purge(list);
return -ENOBUFS;
}
__skb_dequeue(list);
@@ -1429,7 +1434,7 @@ void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
struct sk_buff *skb;
u32 dnode = l->addr;
- skb_queue_head_init(&tnlq);
+ __skb_queue_head_init(&tnlq);
skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
INT_H_SIZE, BASIC_H_SIZE,
dnode, onode, 0, 0, 0);
@@ -1465,8 +1470,8 @@ void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
if (!tnl)
return;
- skb_queue_head_init(&tnlq);
- skb_queue_head_init(&tmpxq);
+ __skb_queue_head_init(&tnlq);
+ __skb_queue_head_init(&tmpxq);
/* At least one packet required for safe algorithm => add dummy */
skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
@@ -1476,7 +1481,7 @@ void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
pr_warn("%sunable to create tunnel packet\n", link_co_err);
return;
}
- skb_queue_tail(&tnlq, skb);
+ __skb_queue_tail(&tnlq, skb);
tipc_link_xmit(l, &tnlq, &tmpxq);
__skb_queue_purge(&tmpxq);
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index cbccf1791d3c..0ac270444974 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -140,18 +140,14 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
if (fragid == FIRST_FRAGMENT) {
if (unlikely(head))
goto err;
- if (unlikely(skb_unclone(frag, GFP_ATOMIC)))
+ *buf = NULL;
+ if (skb_has_frag_list(frag) && __skb_linearize(frag))
+ goto err;
+ frag = skb_unshare(frag, GFP_ATOMIC);
+ if (unlikely(!frag))
goto err;
head = *headbuf = frag;
- *buf = NULL;
TIPC_SKB_CB(head)->tail = NULL;
- if (skb_is_nonlinear(head)) {
- skb_walk_frags(head, tail) {
- TIPC_SKB_CB(head)->tail = tail;
- }
- } else {
- skb_frag_list_init(head);
- }
return 0;
}
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 29e684054abe..5086e27d3011 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -255,8 +255,9 @@ err_out:
static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
struct tipc_nl_compat_msg *msg)
{
- int err;
+ struct nlmsghdr *nlh;
struct sk_buff *arg;
+ int err;
if (msg->req_type && (!msg->req_size ||
!TLV_CHECK_TYPE(msg->req, msg->req_type)))
@@ -285,6 +286,15 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
return -ENOMEM;
}
+ nlh = nlmsg_put(arg, 0, 0, tipc_genl_family.id, 0, NLM_F_MULTI);
+ if (!nlh) {
+ kfree_skb(arg);
+ kfree_skb(msg->rep);
+ msg->rep = NULL;
+ return -EMSGSIZE;
+ }
+ nlmsg_end(arg, nlh);
+
err = __tipc_nl_compat_dumpit(cmd, msg, arg);
if (err) {
kfree_skb(msg->rep);
@@ -661,7 +671,7 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
if (err)
return err;
- link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
+ link_info.dest = htonl(nla_get_flag(link[TIPC_NLA_LINK_DEST]));
link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
nla_strlcpy(link_info.str, link[TIPC_NLA_LINK_NAME],
TIPC_MAX_LINK_NAME);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index e67ffd194927..a188c2590137 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1368,13 +1368,14 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
int rc;
if (in_own_node(net, dnode)) {
+ spin_lock_init(&list->lock);
tipc_sk_rcv(net, list);
return 0;
}
n = tipc_node_find(net, dnode);
if (unlikely(!n)) {
- skb_queue_purge(list);
+ __skb_queue_purge(list);
return -EHOSTUNREACH;
}
@@ -1383,7 +1384,7 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
if (unlikely(bearer_id == INVALID_BEARER_ID)) {
tipc_node_read_unlock(n);
tipc_node_put(n);
- skb_queue_purge(list);
+ __skb_queue_purge(list);
return -EHOSTUNREACH;
}
@@ -1415,7 +1416,7 @@ int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
{
struct sk_buff_head head;
- skb_queue_head_init(&head);
+ __skb_queue_head_init(&head);
__skb_queue_tail(&head, skb);
tipc_node_xmit(net, &head, dnode, selector);
return 0;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 40947ad90222..3c41fb8edc5f 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -800,7 +800,7 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
msg_set_nameupper(hdr, seq->upper);
/* Build message as chain of buffers */
- skb_queue_head_init(&pkts);
+ __skb_queue_head_init(&pkts);
rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
/* Send message if build was successful */
@@ -841,7 +841,7 @@ static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
msg_set_grp_bc_seqno(hdr, bc_snd_nxt);
/* Build message as chain of buffers */
- skb_queue_head_init(&pkts);
+ __skb_queue_head_init(&pkts);
mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
if (unlikely(rc != dlen))
@@ -1046,7 +1046,7 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
msg_set_grp_bc_ack_req(hdr, ack);
/* Build message as chain of buffers */
- skb_queue_head_init(&pkts);
+ __skb_queue_head_init(&pkts);
rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
if (unlikely(rc != dlen))
return rc;
@@ -1187,6 +1187,9 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
spin_lock_bh(&inputq->lock);
if (skb_peek(arrvq) == skb) {
skb_queue_splice_tail_init(&tmpq, inputq);
+ /* Decrease the skb's refcnt as increasing in the
+ * function tipc_skb_peek
+ */
kfree_skb(__skb_dequeue(arrvq));
}
spin_unlock_bh(&inputq->lock);
@@ -1372,7 +1375,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
if (unlikely(rc))
return rc;
- skb_queue_head_init(&pkts);
+ __skb_queue_head_init(&pkts);
mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
if (unlikely(rc != dlen))
@@ -1427,7 +1430,7 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
int send, sent = 0;
int rc = 0;
- skb_queue_head_init(&pkts);
+ __skb_queue_head_init(&pkts);
if (unlikely(dlen > INT_MAX))
return -EMSGSIZE;
@@ -1782,7 +1785,7 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
/* Send group flow control advertisement when applicable */
if (tsk->group && msg_in_group(hdr) && !grp_evt) {
- skb_queue_head_init(&xmitq);
+ __skb_queue_head_init(&xmitq);
tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
msg_orignode(hdr), msg_origport(hdr),
&xmitq);
@@ -2565,18 +2568,18 @@ static int tipc_shutdown(struct socket *sock, int how)
lock_sock(sk);
__tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
- sk->sk_shutdown = SEND_SHUTDOWN;
+ sk->sk_shutdown = SHUTDOWN_MASK;
if (sk->sk_state == TIPC_DISCONNECTING) {
/* Discard any unreceived messages */
__skb_queue_purge(&sk->sk_receive_queue);
- /* Wake up anyone sleeping in poll */
- sk->sk_state_change(sk);
res = 0;
} else {
res = -ENOTCONN;
}
+ /* Wake up anyone sleeping in poll. */
+ sk->sk_state_change(sk);
release_sock(sk);
return res;
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
index 35558656fe02..1c4733153d74 100644
--- a/net/tipc/topsrv.c
+++ b/net/tipc/topsrv.c
@@ -407,12 +407,15 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
return -EWOULDBLOCK;
if (ret == sizeof(s)) {
read_lock_bh(&sk->sk_callback_lock);
- ret = tipc_conn_rcv_sub(srv, con, &s);
+ /* RACE: the connection can be closed in the meantime */
+ if (likely(connected(con)))
+ ret = tipc_conn_rcv_sub(srv, con, &s);
read_unlock_bh(&sk->sk_callback_lock);
+ if (!ret)
+ return 0;
}
- if (ret < 0)
- tipc_conn_close(con);
+ tipc_conn_close(con);
return ret;
}
@@ -668,12 +671,18 @@ static int tipc_topsrv_start(struct net *net)
ret = tipc_topsrv_work_start(srv);
if (ret < 0)
- return ret;
+ goto err_start;
ret = tipc_topsrv_create_listener(srv);
if (ret < 0)
- tipc_topsrv_work_stop(srv);
+ goto err_create;
+
+ return 0;
+err_create:
+ tipc_topsrv_work_stop(srv);
+err_start:
+ kfree(srv);
return ret;
}
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 382c84d9339d..1d6235479706 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -189,10 +189,13 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb,
.saddr = src->ipv6,
.flowi6_proto = IPPROTO_UDP
};
- err = ipv6_stub->ipv6_dst_lookup(net, ub->ubsock->sk, &ndst,
- &fl6);
- if (err)
+ ndst = ipv6_stub->ipv6_dst_lookup_flow(net,
+ ub->ubsock->sk,
+ &fl6, NULL);
+ if (IS_ERR(ndst)) {
+ err = PTR_ERR(ndst);
goto tx_error;
+ }
ttl = ip6_dst_hoplimit(ndst);
err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb, NULL,
&src->ipv6, &dst->ipv6, 0, ttl, 0,
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 8f40bbfd60ea..228e3ce48d43 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -351,13 +351,13 @@ static int tls_push_data(struct sock *sk,
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
- int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
struct tls_record_info *record = ctx->open_record;
struct page_frag *pfrag;
size_t orig_size = size;
u32 max_open_record_len;
- int copy, rc = 0;
+ bool more = false;
bool done = false;
+ int copy, rc = 0;
long timeo;
if (flags &
@@ -422,9 +422,8 @@ handle_error:
if (!size) {
last_record:
tls_push_record_flags = flags;
- if (more) {
- tls_ctx->pending_open_record_frags =
- record->num_frags;
+ if (flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE)) {
+ more = true;
break;
}
@@ -445,6 +444,8 @@ last_record:
}
} while (!done);
+ tls_ctx->pending_open_record_frags = more;
+
if (orig_size - size > 0)
rc = orig_size - size;
@@ -476,7 +477,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags)
{
struct iov_iter msg_iter;
- char *kaddr = kmap(page);
+ char *kaddr;
struct kvec iov;
int rc;
@@ -490,6 +491,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
goto out;
}
+ kaddr = kmap(page);
iov.iov_base = kaddr + offset;
iov.iov_len = size;
iov_iter_kvec(&msg_iter, WRITE | ITER_KVEC, &iov, 1, size);
@@ -953,6 +955,8 @@ void tls_device_offload_cleanup_rx(struct sock *sk)
if (tls_ctx->tx_conf != TLS_HW) {
dev_put(netdev);
tls_ctx->netdev = NULL;
+ } else {
+ set_bit(TLS_RX_DEV_CLOSED, &tls_ctx->flags);
}
out:
up_read(&device_offload_lock);
@@ -982,7 +986,8 @@ static int tls_device_down(struct net_device *netdev)
if (ctx->tx_conf == TLS_HW)
netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
TLS_OFFLOAD_CTX_DIR_TX);
- if (ctx->rx_conf == TLS_HW)
+ if (ctx->rx_conf == TLS_HW &&
+ !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags))
netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
TLS_OFFLOAD_CTX_DIR_RX);
WRITE_ONCE(ctx->netdev, NULL);
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index bbb2da70e870..7d761244a360 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -630,6 +630,12 @@ static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
return NULL;
}
+ if (!skb_queue_empty(&sk->sk_receive_queue)) {
+ __strp_unpause(&ctx->strp);
+ if (ctx->recv_pkt)
+ return ctx->recv_pkt;
+ }
+
if (sk->sk_shutdown & RCV_SHUTDOWN)
return NULL;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 2318e2e2748f..2020306468af 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -192,11 +192,17 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk)
return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
}
-static inline int unix_recvq_full(struct sock const *sk)
+static inline int unix_recvq_full(const struct sock *sk)
{
return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
}
+static inline int unix_recvq_full_lockless(const struct sock *sk)
+{
+ return skb_queue_len_lockless(&sk->sk_receive_queue) >
+ READ_ONCE(sk->sk_max_ack_backlog);
+}
+
struct sock *unix_peer_get(struct sock *s)
{
struct sock *peer;
@@ -1788,7 +1794,8 @@ restart_locked:
* - unix_peer(sk) == sk by time of get but disconnected before lock
*/
if (other != sk &&
- unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
+ unlikely(unix_peer(other) != sk &&
+ unix_recvq_full_lockless(other))) {
if (timeo) {
timeo = unix_wait_for_peer(other, timeo);
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 62ec9182f234..aceafec612a8 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -628,8 +628,9 @@ struct sock *__vsock_create(struct net *net,
vsk->trusted = psk->trusted;
vsk->owner = get_cred(psk->owner);
vsk->connect_timeout = psk->connect_timeout;
+ security_sk_clone(parent, sk);
} else {
- vsk->trusted = capable(CAP_NET_ADMIN);
+ vsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN);
vsk->owner = get_current_cred();
vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT;
}
@@ -816,10 +817,12 @@ static int vsock_shutdown(struct socket *sock, int mode)
*/
sk = sock->sk;
+
+ lock_sock(sk);
if (sock->state == SS_UNCONNECTED) {
err = -ENOTCONN;
if (sk->sk_type == SOCK_STREAM)
- return err;
+ goto out;
} else {
sock->state = SS_DISCONNECTING;
err = 0;
@@ -828,10 +831,8 @@ static int vsock_shutdown(struct socket *sock, int mode)
/* Receive and send shutdowns are treated alike. */
mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN);
if (mode) {
- lock_sock(sk);
sk->sk_shutdown |= mode;
sk->sk_state_change(sk);
- release_sock(sk);
if (sk->sk_type == SOCK_STREAM) {
sock_reset_flag(sk, SOCK_DONE);
@@ -839,6 +840,8 @@ static int vsock_shutdown(struct socket *sock, int mode)
}
}
+out:
+ release_sock(sk);
return err;
}
@@ -1107,7 +1110,6 @@ static void vsock_connect_timeout(struct work_struct *work)
{
struct sock *sk;
struct vsock_sock *vsk;
- int cancel = 0;
vsk = container_of(work, struct vsock_sock, connect_work.work);
sk = sk_vsock(vsk);
@@ -1118,11 +1120,9 @@ static void vsock_connect_timeout(struct work_struct *work)
sk->sk_state = TCP_CLOSE;
sk->sk_err = ETIMEDOUT;
sk->sk_error_report(sk);
- cancel = 1;
+ vsock_transport_cancel_pkt(vsk);
}
release_sock(sk);
- if (cancel)
- vsock_transport_cancel_pkt(vsk);
sock_put(sk);
}
@@ -1283,7 +1283,7 @@ static int vsock_accept(struct socket *sock, struct socket *newsock, int flags,
/* Wait for children sockets to appear; these are the new sockets
* created upon connection establishment.
*/
- timeout = sock_sndtimeo(listener, flags & O_NONBLOCK);
+ timeout = sock_rcvtimeo(listener, flags & O_NONBLOCK);
prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
while ((connected = vsock_dequeue_accept(listener)) == NULL &&
diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
index db6ca51228d2..2bdf36845a5f 100644
--- a/net/vmw_vsock/hyperv_transport.c
+++ b/net/vmw_vsock/hyperv_transport.c
@@ -443,14 +443,10 @@ static void hvs_shutdown_lock_held(struct hvsock *hvs, int mode)
static int hvs_shutdown(struct vsock_sock *vsk, int mode)
{
- struct sock *sk = sk_vsock(vsk);
-
if (!(mode & SEND_SHUTDOWN))
return 0;
- lock_sock(sk);
hvs_shutdown_lock_held(vsk->trans, mode);
- release_sock(sk);
return 0;
}
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 96ab344f17bb..cc70d651d13e 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -39,6 +39,7 @@ struct virtio_vsock {
* must be accessed with tx_lock held.
*/
struct mutex tx_lock;
+ bool tx_run;
struct work_struct send_pkt_work;
spinlock_t send_pkt_list_lock;
@@ -54,6 +55,7 @@ struct virtio_vsock {
* must be accessed with rx_lock held.
*/
struct mutex rx_lock;
+ bool rx_run;
int rx_buf_nr;
int rx_buf_max_nr;
@@ -61,46 +63,28 @@ struct virtio_vsock {
* vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held.
*/
struct mutex event_lock;
+ bool event_run;
struct virtio_vsock_event event_list[8];
u32 guest_cid;
};
-static struct virtio_vsock *virtio_vsock_get(void)
-{
- return the_virtio_vsock;
-}
-
static u32 virtio_transport_get_local_cid(void)
{
- struct virtio_vsock *vsock = virtio_vsock_get();
-
- if (!vsock)
- return VMADDR_CID_ANY;
-
- return vsock->guest_cid;
-}
-
-static void virtio_transport_loopback_work(struct work_struct *work)
-{
- struct virtio_vsock *vsock =
- container_of(work, struct virtio_vsock, loopback_work);
- LIST_HEAD(pkts);
-
- spin_lock_bh(&vsock->loopback_list_lock);
- list_splice_init(&vsock->loopback_list, &pkts);
- spin_unlock_bh(&vsock->loopback_list_lock);
-
- mutex_lock(&vsock->rx_lock);
- while (!list_empty(&pkts)) {
- struct virtio_vsock_pkt *pkt;
-
- pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list);
- list_del_init(&pkt->list);
+ struct virtio_vsock *vsock;
+ u32 ret;
- virtio_transport_recv_pkt(pkt);
+ rcu_read_lock();
+ vsock = rcu_dereference(the_virtio_vsock);
+ if (!vsock) {
+ ret = VMADDR_CID_ANY;
+ goto out_rcu;
}
- mutex_unlock(&vsock->rx_lock);
+
+ ret = vsock->guest_cid;
+out_rcu:
+ rcu_read_unlock();
+ return ret;
}
static int virtio_transport_send_pkt_loopback(struct virtio_vsock *vsock,
@@ -128,6 +112,9 @@ virtio_transport_send_pkt_work(struct work_struct *work)
mutex_lock(&vsock->tx_lock);
+ if (!vsock->tx_run)
+ goto out;
+
vq = vsock->vqs[VSOCK_VQ_TX];
for (;;) {
@@ -186,6 +173,7 @@ virtio_transport_send_pkt_work(struct work_struct *work)
if (added)
virtqueue_kick(vq);
+out:
mutex_unlock(&vsock->tx_lock);
if (restart_rx)
@@ -198,14 +186,18 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
struct virtio_vsock *vsock;
int len = pkt->len;
- vsock = virtio_vsock_get();
+ rcu_read_lock();
+ vsock = rcu_dereference(the_virtio_vsock);
if (!vsock) {
virtio_transport_free_pkt(pkt);
- return -ENODEV;
+ len = -ENODEV;
+ goto out_rcu;
}
- if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid)
- return virtio_transport_send_pkt_loopback(vsock, pkt);
+ if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) {
+ len = virtio_transport_send_pkt_loopback(vsock, pkt);
+ goto out_rcu;
+ }
if (pkt->reply)
atomic_inc(&vsock->queued_replies);
@@ -215,6 +207,9 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
spin_unlock_bh(&vsock->send_pkt_list_lock);
queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
+
+out_rcu:
+ rcu_read_unlock();
return len;
}
@@ -223,12 +218,14 @@ virtio_transport_cancel_pkt(struct vsock_sock *vsk)
{
struct virtio_vsock *vsock;
struct virtio_vsock_pkt *pkt, *n;
- int cnt = 0;
+ int cnt = 0, ret;
LIST_HEAD(freeme);
- vsock = virtio_vsock_get();
+ rcu_read_lock();
+ vsock = rcu_dereference(the_virtio_vsock);
if (!vsock) {
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_rcu;
}
spin_lock_bh(&vsock->send_pkt_list_lock);
@@ -256,7 +253,11 @@ virtio_transport_cancel_pkt(struct vsock_sock *vsk)
queue_work(virtio_vsock_workqueue, &vsock->rx_work);
}
- return 0;
+ ret = 0;
+
+out_rcu:
+ rcu_read_unlock();
+ return ret;
}
static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
@@ -308,6 +309,10 @@ static void virtio_transport_tx_work(struct work_struct *work)
vq = vsock->vqs[VSOCK_VQ_TX];
mutex_lock(&vsock->tx_lock);
+
+ if (!vsock->tx_run)
+ goto out;
+
do {
struct virtio_vsock_pkt *pkt;
unsigned int len;
@@ -318,6 +323,8 @@ static void virtio_transport_tx_work(struct work_struct *work)
added = true;
}
} while (!virtqueue_enable_cb(vq));
+
+out:
mutex_unlock(&vsock->tx_lock);
if (added)
@@ -336,56 +343,6 @@ static bool virtio_transport_more_replies(struct virtio_vsock *vsock)
return val < virtqueue_get_vring_size(vq);
}
-static void virtio_transport_rx_work(struct work_struct *work)
-{
- struct virtio_vsock *vsock =
- container_of(work, struct virtio_vsock, rx_work);
- struct virtqueue *vq;
-
- vq = vsock->vqs[VSOCK_VQ_RX];
-
- mutex_lock(&vsock->rx_lock);
-
- do {
- virtqueue_disable_cb(vq);
- for (;;) {
- struct virtio_vsock_pkt *pkt;
- unsigned int len;
-
- if (!virtio_transport_more_replies(vsock)) {
- /* Stop rx until the device processes already
- * pending replies. Leave rx virtqueue
- * callbacks disabled.
- */
- goto out;
- }
-
- pkt = virtqueue_get_buf(vq, &len);
- if (!pkt) {
- break;
- }
-
- vsock->rx_buf_nr--;
-
- /* Drop short/long packets */
- if (unlikely(len < sizeof(pkt->hdr) ||
- len > sizeof(pkt->hdr) + pkt->len)) {
- virtio_transport_free_pkt(pkt);
- continue;
- }
-
- pkt->len = len - sizeof(pkt->hdr);
- virtio_transport_deliver_tap_pkt(pkt);
- virtio_transport_recv_pkt(pkt);
- }
- } while (!virtqueue_enable_cb(vq));
-
-out:
- if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2)
- virtio_vsock_rx_fill(vsock);
- mutex_unlock(&vsock->rx_lock);
-}
-
/* event_lock must be held */
static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock,
struct virtio_vsock_event *event)
@@ -455,6 +412,9 @@ static void virtio_transport_event_work(struct work_struct *work)
mutex_lock(&vsock->event_lock);
+ if (!vsock->event_run)
+ goto out;
+
do {
struct virtio_vsock_event *event;
unsigned int len;
@@ -469,7 +429,7 @@ static void virtio_transport_event_work(struct work_struct *work)
} while (!virtqueue_enable_cb(vq));
virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
-
+out:
mutex_unlock(&vsock->event_lock);
}
@@ -546,6 +506,86 @@ static struct virtio_transport virtio_transport = {
.send_pkt = virtio_transport_send_pkt,
};
+static void virtio_transport_loopback_work(struct work_struct *work)
+{
+ struct virtio_vsock *vsock =
+ container_of(work, struct virtio_vsock, loopback_work);
+ LIST_HEAD(pkts);
+
+ spin_lock_bh(&vsock->loopback_list_lock);
+ list_splice_init(&vsock->loopback_list, &pkts);
+ spin_unlock_bh(&vsock->loopback_list_lock);
+
+ mutex_lock(&vsock->rx_lock);
+
+ if (!vsock->rx_run)
+ goto out;
+
+ while (!list_empty(&pkts)) {
+ struct virtio_vsock_pkt *pkt;
+
+ pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list);
+ list_del_init(&pkt->list);
+
+ virtio_transport_recv_pkt(&virtio_transport, pkt);
+ }
+out:
+ mutex_unlock(&vsock->rx_lock);
+}
+
+static void virtio_transport_rx_work(struct work_struct *work)
+{
+ struct virtio_vsock *vsock =
+ container_of(work, struct virtio_vsock, rx_work);
+ struct virtqueue *vq;
+
+ vq = vsock->vqs[VSOCK_VQ_RX];
+
+ mutex_lock(&vsock->rx_lock);
+
+ if (!vsock->rx_run)
+ goto out;
+
+ do {
+ virtqueue_disable_cb(vq);
+ for (;;) {
+ struct virtio_vsock_pkt *pkt;
+ unsigned int len;
+
+ if (!virtio_transport_more_replies(vsock)) {
+ /* Stop rx until the device processes already
+ * pending replies. Leave rx virtqueue
+ * callbacks disabled.
+ */
+ goto out;
+ }
+
+ pkt = virtqueue_get_buf(vq, &len);
+ if (!pkt) {
+ break;
+ }
+
+ vsock->rx_buf_nr--;
+
+ /* Drop short/long packets */
+ if (unlikely(len < sizeof(pkt->hdr) ||
+ len > sizeof(pkt->hdr) + pkt->len)) {
+ virtio_transport_free_pkt(pkt);
+ continue;
+ }
+
+ pkt->len = len - sizeof(pkt->hdr);
+ virtio_transport_deliver_tap_pkt(pkt);
+ virtio_transport_recv_pkt(&virtio_transport, pkt);
+ }
+ } while (!virtqueue_enable_cb(vq));
+
+out:
+ if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2)
+ virtio_vsock_rx_fill(vsock);
+ mutex_unlock(&vsock->rx_lock);
+}
+
static int virtio_vsock_probe(struct virtio_device *vdev)
{
vq_callback_t *callbacks[] = {
@@ -566,7 +606,8 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
return ret;
/* Only one virtio-vsock device per guest is supported */
- if (the_virtio_vsock) {
+ if (rcu_dereference_protected(the_virtio_vsock,
+ lockdep_is_held(&the_virtio_vsock_mutex))) {
ret = -EBUSY;
goto out;
}
@@ -591,8 +632,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
vsock->rx_buf_max_nr = 0;
atomic_set(&vsock->queued_replies, 0);
- vdev->priv = vsock;
- the_virtio_vsock = vsock;
mutex_init(&vsock->tx_lock);
mutex_init(&vsock->rx_lock);
mutex_init(&vsock->event_lock);
@@ -606,14 +645,23 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work);
INIT_WORK(&vsock->loopback_work, virtio_transport_loopback_work);
+ mutex_lock(&vsock->tx_lock);
+ vsock->tx_run = true;
+ mutex_unlock(&vsock->tx_lock);
+
mutex_lock(&vsock->rx_lock);
virtio_vsock_rx_fill(vsock);
+ vsock->rx_run = true;
mutex_unlock(&vsock->rx_lock);
mutex_lock(&vsock->event_lock);
virtio_vsock_event_fill(vsock);
+ vsock->event_run = true;
mutex_unlock(&vsock->event_lock);
+ vdev->priv = vsock;
+ rcu_assign_pointer(the_virtio_vsock, vsock);
+
mutex_unlock(&the_virtio_vsock_mutex);
return 0;
@@ -628,6 +676,12 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
struct virtio_vsock *vsock = vdev->priv;
struct virtio_vsock_pkt *pkt;
+ mutex_lock(&the_virtio_vsock_mutex);
+
+ vdev->priv = NULL;
+ rcu_assign_pointer(the_virtio_vsock, NULL);
+ synchronize_rcu();
+
flush_work(&vsock->loopback_work);
flush_work(&vsock->rx_work);
flush_work(&vsock->tx_work);
@@ -637,6 +691,24 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
/* Reset all connected sockets when the device disappear */
vsock_for_each_connected_socket(virtio_vsock_reset_sock);
+ /* Stop all work handlers to make sure no one is accessing the device,
+ * so we can safely call vdev->config->reset().
+ */
+ mutex_lock(&vsock->rx_lock);
+ vsock->rx_run = false;
+ mutex_unlock(&vsock->rx_lock);
+
+ mutex_lock(&vsock->tx_lock);
+ vsock->tx_run = false;
+ mutex_unlock(&vsock->tx_lock);
+
+ mutex_lock(&vsock->event_lock);
+ vsock->event_run = false;
+ mutex_unlock(&vsock->event_lock);
+
+ /* Flush all device writes and interrupts, device will not use any
+ * more buffers.
+ */
vdev->config->reset(vdev);
mutex_lock(&vsock->rx_lock);
@@ -667,12 +739,11 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
}
spin_unlock_bh(&vsock->loopback_list_lock);
- mutex_lock(&the_virtio_vsock_mutex);
- the_virtio_vsock = NULL;
- mutex_unlock(&the_virtio_vsock_mutex);
-
+ /* Delete virtqueues and flush outstanding callbacks if any */
vdev->config->del_vqs(vdev);
+ mutex_unlock(&the_virtio_vsock_mutex);
+
kfree(vsock);
}
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 52242a148c70..cbb336f01cf2 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -669,9 +669,9 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
/* Normally packets are associated with a socket. There may be no socket if an
* attempt was made to connect to a socket that does not exist.
*/
-static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
+static int virtio_transport_reset_no_sock(const struct virtio_transport *t,
+ struct virtio_vsock_pkt *pkt)
{
- const struct virtio_transport *t;
struct virtio_vsock_pkt *reply;
struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_RST,
@@ -691,7 +691,6 @@ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
if (!reply)
return -ENOMEM;
- t = virtio_transport_get_ops();
if (!t) {
virtio_transport_free_pkt(reply);
return -ENOTCONN;
@@ -993,7 +992,8 @@ static bool virtio_transport_space_update(struct sock *sk,
/* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex
* lock.
*/
-void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
+void virtio_transport_recv_pkt(struct virtio_transport *t,
+ struct virtio_vsock_pkt *pkt)
{
struct sockaddr_vm src, dst;
struct vsock_sock *vsk;
@@ -1015,7 +1015,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
le32_to_cpu(pkt->hdr.fwd_cnt));
if (le16_to_cpu(pkt->hdr.type) != VIRTIO_VSOCK_TYPE_STREAM) {
- (void)virtio_transport_reset_no_sock(pkt);
+ (void)virtio_transport_reset_no_sock(t, pkt);
goto free_pkt;
}
@@ -1026,17 +1026,17 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
if (!sk) {
sk = vsock_find_bound_socket(&dst);
if (!sk) {
- (void)virtio_transport_reset_no_sock(pkt);
+ (void)virtio_transport_reset_no_sock(t, pkt);
goto free_pkt;
}
}
vsk = vsock_sk(sk);
- space_available = virtio_transport_space_update(sk, pkt);
-
lock_sock(sk);
+ space_available = virtio_transport_space_update(sk, pkt);
+
/* Update CID in case it has changed after a transport reset event */
vsk->local_addr.svm_cid = dst.svm_cid;
@@ -1060,6 +1060,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
virtio_transport_free_pkt(pkt);
break;
default:
+ (void)virtio_transport_reset_no_sock(t, pkt);
virtio_transport_free_pkt(pkt);
break;
}
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index c3d5ab01fba7..42ab3e2ac060 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -584,8 +584,7 @@ vmci_transport_queue_pair_alloc(struct vmci_qp **qpair,
peer, flags, VMCI_NO_PRIVILEGE_FLAGS);
out:
if (err < 0) {
- pr_err("Could not attach to queue pair with %d\n",
- err);
+ pr_err_once("Could not attach to queue pair with %d\n", err);
err = vmci_transport_error_to_vsock_error(err);
}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 0221849b7218..5f0605275fa3 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1950,7 +1950,10 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
* case we'll continue with more data in the next round,
* but break unconditionally so unsplit data stops here.
*/
- state->split_start++;
+ if (state->split)
+ state->split_start++;
+ else
+ state->split_start = 0;
break;
case 9:
if (rdev->wiphy.extended_capabilities &&
@@ -3621,6 +3624,9 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
if (err)
return err;
+ if (key.idx < 0)
+ return -EINVAL;
+
if (info->attrs[NL80211_ATTR_MAC])
mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
@@ -11496,7 +11502,7 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
struct net_device *dev = info->user_ptr[1];
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct nlattr *tb[NUM_NL80211_REKEY_DATA];
- struct cfg80211_gtk_rekey_data rekey_data;
+ struct cfg80211_gtk_rekey_data rekey_data = {};
int err;
if (!info->attrs[NL80211_ATTR_REKEY_DATA])
@@ -12392,13 +12398,13 @@ static int nl80211_vendor_cmd(struct sk_buff *skb, struct genl_info *info)
if (!wdev_running(wdev))
return -ENETDOWN;
}
-
- if (!vcmd->doit)
- return -EOPNOTSUPP;
} else {
wdev = NULL;
}
+ if (!vcmd->doit)
+ return -EOPNOTSUPP;
+
if (info->attrs[NL80211_ATTR_VENDOR_DATA]) {
data = nla_data(info->attrs[NL80211_ATTR_VENDOR_DATA]);
len = nla_len(info->attrs[NL80211_ATTR_VENDOR_DATA]);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 32f575857e41..c7825b951f72 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2936,6 +2936,9 @@ int regulatory_hint_user(const char *alpha2,
if (WARN_ON(!alpha2))
return -EINVAL;
+ if (!is_world_regdom(alpha2) && !is_an_alpha2(alpha2))
+ return -EINVAL;
+
request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL);
if (!request)
return -ENOMEM;
@@ -3371,7 +3374,7 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd)
power_rule = &reg_rule->power_rule;
if (reg_rule->flags & NL80211_RRF_AUTO_BW)
- snprintf(bw, sizeof(bw), "%d KHz, %d KHz AUTO",
+ snprintf(bw, sizeof(bw), "%d KHz, %u KHz AUTO",
freq_range->max_bandwidth_khz,
reg_get_max_bandwidth(rd, reg_rule));
else
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index e5d61ba837ad..67b2747ad9ef 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -1036,6 +1036,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
if (rdev->bss_entries >= bss_entries_limit &&
!cfg80211_bss_expire_oldest(rdev)) {
+ if (!list_empty(&new->hidden_list))
+ list_del(&new->hidden_list);
kfree(new);
goto drop;
}
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index f455b9af6815..9d8b106deb0b 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -530,7 +530,7 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
cfg80211_sme_free(wdev);
}
- if (WARN_ON(wdev->conn))
+ if (wdev->conn)
return -EINPROGRESS;
wdev->conn = kzalloc(sizeof(*wdev->conn), GFP_KERNEL);
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 1a878b84cbd0..6f9cff2ee795 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -422,7 +422,7 @@ EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen);
int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
const u8 *addr, enum nl80211_iftype iftype,
- u8 data_offset)
+ u8 data_offset, bool is_amsdu)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct {
@@ -510,7 +510,7 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
skb_copy_bits(skb, hdrlen, &payload, sizeof(payload));
tmp.h_proto = payload.proto;
- if (likely((ether_addr_equal(payload.hdr, rfc1042_header) &&
+ if (likely((!is_amsdu && ether_addr_equal(payload.hdr, rfc1042_header) &&
tmp.h_proto != htons(ETH_P_AARP) &&
tmp.h_proto != htons(ETH_P_IPX)) ||
ether_addr_equal(payload.hdr, bridge_tunnel_header)))
@@ -652,6 +652,9 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
remaining = skb->len - offset;
if (subframe_len > remaining)
goto purge;
+ /* mitigate A-MSDU aggregation injection attacks */
+ if (ether_addr_equal(eth.h_dest, rfc1042_header))
+ goto purge;
offset += sizeof(struct ethhdr);
last = remaining <= subframe_len + padding;
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 69102fda9ebd..76a80a41615b 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -896,8 +896,9 @@ out:
int call_commit_handler(struct net_device *dev)
{
#ifdef CONFIG_WIRELESS_EXT
- if ((netif_running(dev)) &&
- (dev->wireless_handlers->standard[0] != NULL))
+ if (netif_running(dev) &&
+ dev->wireless_handlers &&
+ dev->wireless_handlers->standard[0])
/* Call the commit handler on the driver */
return dev->wireless_handlers->standard[0](dev, NULL,
NULL, NULL);
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index bd1cbbfe5924..f43d037ea852 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -680,7 +680,8 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
int len, i, rc = 0;
if (addr_len != sizeof(struct sockaddr_x25) ||
- addr->sx25_family != AF_X25) {
+ addr->sx25_family != AF_X25 ||
+ strnlen(addr->sx25_addr.x25_addr, X25_ADDR_LEN) == X25_ADDR_LEN) {
rc = -EINVAL;
goto out;
}
@@ -774,7 +775,8 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
rc = -EINVAL;
if (addr_len != sizeof(struct sockaddr_x25) ||
- addr->sx25_family != AF_X25)
+ addr->sx25_family != AF_X25 ||
+ strnlen(addr->sx25_addr.x25_addr, X25_ADDR_LEN) == X25_ADDR_LEN)
goto out;
rc = -ENETUNREACH;
@@ -824,7 +826,7 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
sock->state = SS_CONNECTED;
rc = 0;
out_put_neigh:
- if (rc) {
+ if (rc && x25->neighbour) {
read_lock_bh(&x25_list_lock);
x25_neigh_put(x25->neighbour);
x25->neighbour = NULL;
@@ -1049,6 +1051,7 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
makex25->lci = lci;
makex25->dest_addr = dest_addr;
makex25->source_addr = source_addr;
+ x25_neigh_hold(nb);
makex25->neighbour = nb;
makex25->facilities = facilities;
makex25->dte_facilities= dte_facilities;
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index 39231237e1c3..30f71620d4e3 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -120,8 +120,10 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev,
goto drop;
}
- if (!pskb_may_pull(skb, 1))
+ if (!pskb_may_pull(skb, 1)) {
+ x25_neigh_put(nb);
return 0;
+ }
switch (skb->data[0]) {
diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c
index 743103786652..f3d34582581b 100644
--- a/net/x25/x25_subr.c
+++ b/net/x25/x25_subr.c
@@ -362,6 +362,12 @@ void x25_disconnect(struct sock *sk, int reason, unsigned char cause,
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
+ if (x25->neighbour) {
+ read_lock_bh(&x25_list_lock);
+ x25_neigh_put(x25->neighbour);
+ x25->neighbour = NULL;
+ read_unlock_bh(&x25_list_lock);
+ }
}
/*
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index 556a649512b6..b87e63cb55be 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -258,9 +258,9 @@ static int xdp_umem_account_pages(struct xdp_umem *umem)
static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
{
u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
+ u64 npgs, addr = mr->addr, size = mr->len;
unsigned int chunks, chunks_per_page;
- u64 addr = mr->addr, size = mr->len;
- int size_chk, err, i;
+ int err, i;
if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
/* Strictly speaking we could support this, if:
@@ -285,6 +285,10 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
if ((addr + size) < addr)
return -EINVAL;
+ npgs = div_u64(size, PAGE_SIZE);
+ if (npgs > U32_MAX)
+ return -EINVAL;
+
chunks = (unsigned int)div_u64(size, chunk_size);
if (chunks == 0)
return -EINVAL;
@@ -295,8 +299,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
headroom = ALIGN(headroom, 64);
- size_chk = chunk_size - headroom - XDP_PACKET_HEADROOM;
- if (size_chk < 0)
+ if (headroom >= chunk_size - XDP_PACKET_HEADROOM)
return -EINVAL;
umem->address = (unsigned long)addr;
@@ -304,7 +307,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
umem->props.size = size;
umem->headroom = headroom;
umem->chunk_size_nohr = chunk_size - headroom;
- umem->npgs = size / PAGE_SIZE;
+ umem->npgs = (u32)npgs;
umem->pgs = NULL;
umem->user = NULL;
INIT_LIST_HEAD(&umem->xsk_list);
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 72caa4fb13f4..6bb0649c028c 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -233,10 +233,8 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
len = desc.len;
skb = sock_alloc_send_skb(sk, len, 1, &err);
- if (unlikely(!skb)) {
- err = -EAGAIN;
+ if (unlikely(!skb))
goto out;
- }
skb_put(skb, len);
addr = desc.addr;
@@ -291,17 +289,17 @@ static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
return (xs->zc) ? xsk_zc_xmit(sk) : xsk_generic_xmit(sk, m, total_len);
}
-static unsigned int xsk_poll(struct file *file, struct socket *sock,
+static __poll_t xsk_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait)
{
- unsigned int mask = datagram_poll(file, sock, wait);
+ __poll_t mask = datagram_poll(file, sock, wait);
struct sock *sk = sock->sk;
struct xdp_sock *xs = xdp_sk(sk);
if (xs->rx && !xskq_empty_desc(xs->rx))
- mask |= POLLIN | POLLRDNORM;
+ mask |= EPOLLIN | EPOLLRDNORM;
if (xs->tx && !xskq_full_desc(xs->tx))
- mask |= POLLOUT | POLLWRNORM;
+ mask |= EPOLLOUT | EPOLLWRNORM;
return mask;
}
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index fe96c0d039f2..cf7cbb5dd918 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -245,12 +245,15 @@ static inline void xskq_produce_flush_desc(struct xsk_queue *q)
static inline bool xskq_full_desc(struct xsk_queue *q)
{
- return xskq_nb_avail(q, q->nentries) == q->nentries;
+ /* No barriers needed since data is not accessed */
+ return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer) ==
+ q->nentries;
}
static inline bool xskq_empty_desc(struct xsk_queue *q)
{
- return xskq_nb_free(q, q->prod_tail, q->nentries) == q->nentries;
+ /* No barriers needed since data is not accessed */
+ return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer);
}
void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props);
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 8634ce677142..e7a0ce98479f 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -33,7 +33,7 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
netdev_features_t esp_features = features;
struct xfrm_offload *xo = xfrm_offload(skb);
- if (!xo)
+ if (!xo || (xo->flags & XFRM_XMIT))
return skb;
if (!(features & NETIF_F_HW_ESP))
@@ -53,6 +53,8 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
return skb;
}
+ xo->flags |= XFRM_XMIT;
+
if (skb_is_gso(skb)) {
struct net_device *dev = skb->dev;
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 82b0a99ee1f4..fcba8a139f61 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -407,7 +407,7 @@ resume:
dev_put(skb->dev);
spin_lock(&x->lock);
- if (nexthdr <= 0) {
+ if (nexthdr < 0) {
if (nexthdr == -EBADMSG) {
xfrm_audit_state_icvfail(x, skb,
x->type->proto);
@@ -420,7 +420,7 @@ resume:
/* only the first xfrm gets the encap type */
encap_type = 0;
- if (async && x->repl->recheck(x, skb, seq)) {
+ if (x->repl->recheck(x, skb, seq)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
goto drop_unlock;
}
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
index 317fe9c92932..35a020a70985 100644
--- a/net/xfrm/xfrm_interface.c
+++ b/net/xfrm/xfrm_interface.c
@@ -293,23 +293,26 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
}
mtu = dst_mtu(dst);
- if (!skb->ignore_df && skb->len > mtu) {
+ if (skb->len > mtu) {
skb_dst_update_pmtu_no_confirm(skb, mtu);
if (skb->protocol == htons(ETH_P_IPV6)) {
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+ icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
} else {
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
- htonl(mtu));
+ if (!(ip_hdr(skb)->frag_off & htons(IP_DF)))
+ goto xmit;
+ icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+ htonl(mtu));
}
dst_release(dst);
return -EMSGSIZE;
}
+xmit:
xfrmi_scrub_packet(skb, !net_eq(xi->net, dev_net(dev)));
skb_dst_set(skb, dst);
skb->dev = tdev;
@@ -780,7 +783,28 @@ static void __net_exit xfrmi_exit_net(struct net *net)
rtnl_unlock();
}
+static void __net_exit xfrmi_exit_batch_net(struct list_head *net_exit_list)
+{
+ struct net *net;
+ LIST_HEAD(list);
+
+ rtnl_lock();
+ list_for_each_entry(net, net_exit_list, exit_list) {
+ struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
+ struct xfrm_if __rcu **xip;
+ struct xfrm_if *xi;
+
+ for (xip = &xfrmn->xfrmi[0];
+ (xi = rtnl_dereference(*xip)) != NULL;
+ xip = &xi->next)
+ unregister_netdevice_queue(xi->dev, &list);
+ }
+ unregister_netdevice_many(&list);
+ rtnl_unlock();
+}
+
static struct pernet_operations xfrmi_net_ops = {
+ .exit_batch = xfrmi_exit_batch_net,
.init = xfrmi_init_net,
.exit = xfrmi_exit_net,
.id = &xfrmi_net_id,
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 6d20fbcde000..c46162887b94 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -235,18 +235,20 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
xfrm_state_hold(x);
if (skb_is_gso(skb)) {
- skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;
+ if (skb->inner_protocol)
+ return xfrm_output_gso(net, sk, skb);
- return xfrm_output2(net, sk, skb);
+ skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;
+ goto out;
}
if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)
goto out;
+ } else {
+ if (skb_is_gso(skb))
+ return xfrm_output_gso(net, sk, skb);
}
- if (skb_is_gso(skb))
- return xfrm_output_gso(net, sk, skb);
-
if (skb->ip_summed == CHECKSUM_PARTIAL) {
err = skb_checksum_help(skb);
if (err) {
@@ -283,7 +285,8 @@ void xfrm_local_error(struct sk_buff *skb, int mtu)
if (skb->protocol == htons(ETH_P_IP))
proto = AF_INET;
- else if (skb->protocol == htons(ETH_P_IPV6))
+ else if (skb->protocol == htons(ETH_P_IPV6) &&
+ skb->sk->sk_family == AF_INET6)
proto = AF_INET6;
else
return;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index b30c074160e3..e9aea82f370d 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -730,12 +730,7 @@ static void xfrm_policy_requeue(struct xfrm_policy *old,
static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
struct xfrm_policy *pol)
{
- u32 mark = policy->mark.v & policy->mark.m;
-
- if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
- return true;
-
- if ((mark & pol->mark.m) == pol->mark.v &&
+ if (policy->mark.v == pol->mark.v &&
policy->priority == pol->priority)
return true;
@@ -2106,8 +2101,8 @@ struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
xflo.flags = flags;
/* To accelerate a bit... */
- if ((dst_orig->flags & DST_NOXFRM) ||
- !net->xfrm.policy_count[XFRM_POLICY_OUT])
+ if (!if_id && ((dst_orig->flags & DST_NOXFRM) ||
+ !net->xfrm.policy_count[XFRM_POLICY_OUT]))
goto nopol;
xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 47a8ff972a2b..44acc724122b 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -41,7 +41,6 @@ static void xfrm_state_gc_task(struct work_struct *work);
*/
static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
-static __read_mostly seqcount_t xfrm_state_hash_generation = SEQCNT_ZERO(xfrm_state_hash_generation);
static struct kmem_cache *xfrm_state_cache __ro_after_init;
static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task);
@@ -137,7 +136,7 @@ static void xfrm_hash_resize(struct work_struct *work)
}
spin_lock_bh(&net->xfrm.xfrm_state_lock);
- write_seqcount_begin(&xfrm_state_hash_generation);
+ write_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net);
@@ -153,7 +152,7 @@ static void xfrm_hash_resize(struct work_struct *work)
rcu_assign_pointer(net->xfrm.state_byspi, nspi);
net->xfrm.state_hmask = nhashmask;
- write_seqcount_end(&xfrm_state_hash_generation);
+ write_seqcount_end(&net->xfrm.xfrm_state_hash_generation);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
osize = (ohashmask + 1) * sizeof(struct hlist_head);
@@ -923,7 +922,8 @@ static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
*/
if (x->km.state == XFRM_STATE_VALID) {
if ((x->sel.family &&
- !xfrm_selector_match(&x->sel, fl, x->sel.family)) ||
+ (x->sel.family != family ||
+ !xfrm_selector_match(&x->sel, fl, family))) ||
!security_xfrm_state_pol_flow_match(x, pol, fl))
return;
@@ -936,7 +936,9 @@ static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
*acq_in_progress = 1;
} else if (x->km.state == XFRM_STATE_ERROR ||
x->km.state == XFRM_STATE_EXPIRED) {
- if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
+ if ((!x->sel.family ||
+ (x->sel.family == family &&
+ xfrm_selector_match(&x->sel, fl, family))) &&
security_xfrm_state_pol_flow_match(x, pol, fl))
*error = -ESRCH;
}
@@ -962,7 +964,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
to_put = NULL;
- sequence = read_seqcount_begin(&xfrm_state_hash_generation);
+ sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
rcu_read_lock();
h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
@@ -976,7 +978,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
tmpl->mode == x->props.mode &&
tmpl->id.proto == x->id.proto &&
(tmpl->id.spi == x->id.spi || !tmpl->id.spi))
- xfrm_state_look_at(pol, x, fl, encap_family,
+ xfrm_state_look_at(pol, x, fl, family,
&best, &acquire_in_progress, &error);
}
if (best || acquire_in_progress)
@@ -993,7 +995,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
tmpl->mode == x->props.mode &&
tmpl->id.proto == x->id.proto &&
(tmpl->id.spi == x->id.spi || !tmpl->id.spi))
- xfrm_state_look_at(pol, x, fl, encap_family,
+ xfrm_state_look_at(pol, x, fl, family,
&best, &acquire_in_progress, &error);
}
@@ -1073,7 +1075,7 @@ out:
if (to_put)
xfrm_state_put(to_put);
- if (read_seqcount_retry(&xfrm_state_hash_generation, sequence)) {
+ if (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence)) {
*err = -EAGAIN;
if (x) {
xfrm_state_put(x);
@@ -1341,6 +1343,30 @@ out:
EXPORT_SYMBOL(xfrm_state_add);
#ifdef CONFIG_XFRM_MIGRATE
+static inline int clone_security(struct xfrm_state *x, struct xfrm_sec_ctx *security)
+{
+ struct xfrm_user_sec_ctx *uctx;
+ int size = sizeof(*uctx) + security->ctx_len;
+ int err;
+
+ uctx = kmalloc(size, GFP_KERNEL);
+ if (!uctx)
+ return -ENOMEM;
+
+ uctx->exttype = XFRMA_SEC_CTX;
+ uctx->len = size;
+ uctx->ctx_doi = security->ctx_doi;
+ uctx->ctx_alg = security->ctx_alg;
+ uctx->ctx_len = security->ctx_len;
+ memcpy(uctx + 1, security->ctx_str, security->ctx_len);
+ err = security_xfrm_state_alloc(x, uctx);
+ kfree(uctx);
+ if (err)
+ return err;
+
+ return 0;
+}
+
static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
struct xfrm_encap_tmpl *encap)
{
@@ -1397,6 +1423,10 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
goto error;
}
+ if (orig->security)
+ if (clone_security(x, orig->security))
+ goto error;
+
if (orig->coaddr) {
x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
GFP_KERNEL);
@@ -1410,6 +1440,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
}
memcpy(&x->mark, &orig->mark, sizeof(x->mark));
+ memcpy(&x->props.smark, &orig->props.smark, sizeof(x->props.smark));
if (xfrm_init_state(x) < 0)
goto error;
@@ -1421,7 +1452,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
x->tfcpad = orig->tfcpad;
x->replay_maxdiff = orig->replay_maxdiff;
x->replay_maxage = orig->replay_maxage;
- x->curlft.add_time = orig->curlft.add_time;
+ memcpy(&x->curlft, &orig->curlft, sizeof(x->curlft));
x->km.state = orig->km.state;
x->km.seq = orig->km.seq;
x->replay = orig->replay;
@@ -1793,6 +1824,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
int err = -ENOENT;
__be32 minspi = htonl(low);
__be32 maxspi = htonl(high);
+ __be32 newspi = 0;
u32 mark = x->mark.v & x->mark.m;
spin_lock_bh(&x->lock);
@@ -1811,21 +1843,22 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
xfrm_state_put(x0);
goto unlock;
}
- x->id.spi = minspi;
+ newspi = minspi;
} else {
u32 spi = 0;
for (h = 0; h < high-low+1; h++) {
spi = low + prandom_u32()%(high-low+1);
x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
if (x0 == NULL) {
- x->id.spi = htonl(spi);
+ newspi = htonl(spi);
break;
}
xfrm_state_put(x0);
}
}
- if (x->id.spi) {
+ if (newspi) {
spin_lock_bh(&net->xfrm.xfrm_state_lock);
+ x->id.spi = newspi;
h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
@@ -2372,6 +2405,7 @@ int __net_init xfrm_state_init(struct net *net)
net->xfrm.state_num = 0;
INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
spin_lock_init(&net->xfrm.xfrm_state_lock);
+ seqcount_init(&net->xfrm.xfrm_state_hash_generation);
return 0;
out_byspi:
diff --git a/samples/bpf/lwt_len_hist.sh b/samples/bpf/lwt_len_hist.sh
index 090b96eaf7f7..0eda9754f50b 100644..100755
--- a/samples/bpf/lwt_len_hist.sh
+++ b/samples/bpf/lwt_len_hist.sh
@@ -8,6 +8,8 @@ VETH1=tst_lwt1b
TRACE_ROOT=/sys/kernel/debug/tracing
function cleanup {
+ # To reset saved histogram, remove pinned map
+ rm /sys/fs/bpf/tc/globals/lwt_len_hist_map
ip route del 192.168.253.2/32 dev $VETH0 2> /dev/null
ip link del $VETH0 2> /dev/null
ip link del $VETH1 2> /dev/null
diff --git a/samples/bpf/lwt_len_hist_user.c b/samples/bpf/lwt_len_hist_user.c
index 587b68b1f8dd..430a4b7e353e 100644
--- a/samples/bpf/lwt_len_hist_user.c
+++ b/samples/bpf/lwt_len_hist_user.c
@@ -15,8 +15,6 @@
#define MAX_INDEX 64
#define MAX_STARS 38
-char bpf_log_buf[BPF_LOG_BUF_SIZE];
-
static void stars(char *str, long val, long max, int width)
{
int i;
diff --git a/samples/bpf/test_lwt_bpf.sh b/samples/bpf/test_lwt_bpf.sh
index 65a976058dd3..65a976058dd3 100644..100755
--- a/samples/bpf/test_lwt_bpf.sh
+++ b/samples/bpf/test_lwt_bpf.sh
diff --git a/samples/bpf/tracex1_kern.c b/samples/bpf/tracex1_kern.c
index 107da148820f..9c74b45c5720 100644
--- a/samples/bpf/tracex1_kern.c
+++ b/samples/bpf/tracex1_kern.c
@@ -20,7 +20,7 @@
SEC("kprobe/__netif_receive_skb_core")
int bpf_prog1(struct pt_regs *ctx)
{
- /* attaches to kprobe netif_receive_skb,
+ /* attaches to kprobe __netif_receive_skb_core,
* looks for packets on loobpack device and prints them
*/
char devname[IFNAMSIZ];
@@ -29,7 +29,7 @@ int bpf_prog1(struct pt_regs *ctx)
int len;
/* non-portable! works for the given kernel only */
- skb = (struct sk_buff *) PT_REGS_PARM1(ctx);
+ bpf_probe_read_kernel(&skb, sizeof(skb), (void *)PT_REGS_PARM1(ctx));
dev = _(skb->dev);
len = _(skb->len);
diff --git a/samples/kfifo/bytestream-example.c b/samples/kfifo/bytestream-example.c
index 2fca916d9edf..a7f5ee8b6edc 100644
--- a/samples/kfifo/bytestream-example.c
+++ b/samples/kfifo/bytestream-example.c
@@ -124,8 +124,10 @@ static ssize_t fifo_write(struct file *file, const char __user *buf,
ret = kfifo_from_user(&test, buf, count, &copied);
mutex_unlock(&write_lock);
+ if (ret)
+ return ret;
- return ret ? ret : copied;
+ return copied;
}
static ssize_t fifo_read(struct file *file, char __user *buf,
@@ -140,8 +142,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf,
ret = kfifo_to_user(&test, buf, count, &copied);
mutex_unlock(&read_lock);
+ if (ret)
+ return ret;
- return ret ? ret : copied;
+ return copied;
}
static const struct file_operations fifo_fops = {
diff --git a/samples/kfifo/inttype-example.c b/samples/kfifo/inttype-example.c
index 8dc3c2e7105a..a326a37e9163 100644
--- a/samples/kfifo/inttype-example.c
+++ b/samples/kfifo/inttype-example.c
@@ -117,8 +117,10 @@ static ssize_t fifo_write(struct file *file, const char __user *buf,
ret = kfifo_from_user(&test, buf, count, &copied);
mutex_unlock(&write_lock);
+ if (ret)
+ return ret;
- return ret ? ret : copied;
+ return copied;
}
static ssize_t fifo_read(struct file *file, char __user *buf,
@@ -133,8 +135,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf,
ret = kfifo_to_user(&test, buf, count, &copied);
mutex_unlock(&read_lock);
+ if (ret)
+ return ret;
- return ret ? ret : copied;
+ return copied;
}
static const struct file_operations fifo_fops = {
diff --git a/samples/kfifo/record-example.c b/samples/kfifo/record-example.c
index 2d7529eeb294..deb87a2e4e6b 100644
--- a/samples/kfifo/record-example.c
+++ b/samples/kfifo/record-example.c
@@ -131,8 +131,10 @@ static ssize_t fifo_write(struct file *file, const char __user *buf,
ret = kfifo_from_user(&test, buf, count, &copied);
mutex_unlock(&write_lock);
+ if (ret)
+ return ret;
- return ret ? ret : copied;
+ return copied;
}
static ssize_t fifo_read(struct file *file, char __user *buf,
@@ -147,8 +149,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf,
ret = kfifo_to_user(&test, buf, count, &copied);
mutex_unlock(&read_lock);
+ if (ret)
+ return ret;
- return ret ? ret : copied;
+ return copied;
}
static const struct file_operations fifo_fops = {
diff --git a/samples/mic/mpssd/mpssd.c b/samples/mic/mpssd/mpssd.c
index f42ce551bb48..a50d27473e12 100644
--- a/samples/mic/mpssd/mpssd.c
+++ b/samples/mic/mpssd/mpssd.c
@@ -414,9 +414,9 @@ mic_virtio_copy(struct mic_info *mic, int fd,
static inline unsigned _vring_size(unsigned int num, unsigned long align)
{
- return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num)
+ return _ALIGN_UP(((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num)
+ align - 1) & ~(align - 1))
- + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num;
+ + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num, 4);
}
/*
diff --git a/samples/vfio-mdev/mdpy.c b/samples/vfio-mdev/mdpy.c
index 96e7969c473a..d774717cd906 100644
--- a/samples/vfio-mdev/mdpy.c
+++ b/samples/vfio-mdev/mdpy.c
@@ -418,7 +418,7 @@ static int mdpy_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
return -EINVAL;
return remap_vmalloc_range_partial(vma, vma->vm_start,
- mdev_state->memblk,
+ mdev_state->memblk, 0,
vma->vm_end - vma->vm_start);
}
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index ce53639a864a..c830750d725b 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -81,20 +81,21 @@ cc-cross-prefix = \
fi)))
# output directory for tests below
-TMPOUT := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/)
+TMPOUT = $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/).tmp_$$$$
# try-run
# Usage: option = $(call try-run, $(CC)...-o "$$TMP",option-ok,otherwise)
# Exit code chooses option. "$$TMP" serves as a temporary file and is
# automatically cleaned up.
try-run = $(shell set -e; \
- TMP="$(TMPOUT).$$$$.tmp"; \
- TMPO="$(TMPOUT).$$$$.o"; \
+ TMP=$(TMPOUT)/tmp; \
+ TMPO=$(TMPOUT)/tmp.o; \
+ mkdir -p $(TMPOUT); \
+ trap "rm -rf $(TMPOUT)" EXIT; \
if ($(1)) >/dev/null 2>&1; \
then echo "$(2)"; \
else echo "$(3)"; \
- fi; \
- rm -f "$$TMP" "$$TMPO")
+ fi)
# as-option
# Usage: cflags-y += $(call as-option,-Wa$(comma)-isa=foo,)
diff --git a/scripts/Makefile b/scripts/Makefile
index 61affa300d25..3ce4981bb2b6 100644
--- a/scripts/Makefile
+++ b/scripts/Makefile
@@ -10,6 +10,9 @@
HOST_EXTRACFLAGS += -I$(srctree)/tools/include
+CRYPTO_LIBS = $(shell pkg-config --libs libcrypto 2> /dev/null || echo -lcrypto)
+CRYPTO_CFLAGS = $(shell pkg-config --cflags libcrypto 2> /dev/null)
+
hostprogs-$(CONFIG_BUILD_BIN2C) += bin2c
hostprogs-$(CONFIG_KALLSYMS) += kallsyms
hostprogs-$(CONFIG_LOGO) += pnmtologo
@@ -23,8 +26,10 @@ hostprogs-$(CONFIG_SYSTEM_EXTRA_CERTIFICATE) += insert-sys-cert
HOSTCFLAGS_sortextable.o = -I$(srctree)/tools/include
HOSTCFLAGS_asn1_compiler.o = -I$(srctree)/include
-HOSTLDLIBS_sign-file = -lcrypto
-HOSTLDLIBS_extract-cert = -lcrypto
+HOSTCFLAGS_sign-file.o = $(CRYPTO_CFLAGS)
+HOSTLDLIBS_sign-file = $(CRYPTO_LIBS)
+HOSTCFLAGS_extract-cert.o = $(CRYPTO_CFLAGS)
+HOSTLDLIBS_extract-cert = $(CRYPTO_LIBS)
always := $(hostprogs-y) $(hostprogs-m)
diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter
index a923f05edb36..e12daed3f41d 100755
--- a/scripts/bloat-o-meter
+++ b/scripts/bloat-o-meter
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
#
# Copyright 2004 Matt Mackall <mpm@selenic.com>
#
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 161b0224d6ae..2e31ec137821 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2541,8 +2541,8 @@ sub process {
# Check if the commit log has what seems like a diff which can confuse patch
if ($in_commit_log && !$commit_log_has_diff &&
- (($line =~ m@^\s+diff\b.*a/[\w/]+@ &&
- $line =~ m@^\s+diff\b.*a/([\w/]+)\s+b/$1\b@) ||
+ (($line =~ m@^\s+diff\b.*a/([\w/]+)@ &&
+ $line =~ m@^\s+diff\b.*a/[\w/]+\s+b/$1\b@) ||
$line =~ m@^\s*(?:\-\-\-\s+a/|\+\+\+\s+b/)@ ||
$line =~ m/^\s*\@\@ \-\d+,\d+ \+\d+,\d+ \@\@/)) {
ERROR("DIFF_IN_COMMIT_MSG",
@@ -4059,7 +4059,7 @@ sub process {
$fix) {
fix_delete_line($fixlinenr, $rawline);
my $fixed_line = $rawline;
- $fixed_line =~ /(^..*$Type\s*$Ident\(.*\)\s*){(.*)$/;
+ $fixed_line =~ /(^..*$Type\s*$Ident\(.*\)\s*)\{(.*)$/;
my $line1 = $1;
my $line2 = $2;
fix_insert_line($fixlinenr, ltrim($line1));
diff --git a/scripts/config b/scripts/config
index e0e39826dae9..8c8d7c3d7acc 100755
--- a/scripts/config
+++ b/scripts/config
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# SPDX-License-Identifier: GPL-2.0
# Manipulate options in a .config file from the command line
@@ -7,6 +7,9 @@ myname=${0##*/}
# If no prefix forced, use the default CONFIG_
CONFIG_="${CONFIG_-CONFIG_}"
+# We use an uncommon delimiter for sed substitutions
+SED_DELIM=$(echo -en "\001")
+
usage() {
cat >&2 <<EOL
Manipulate options in a .config file from the command line.
@@ -83,7 +86,7 @@ txt_subst() {
local infile="$3"
local tmpfile="$infile.swp"
- sed -e "s:$before:$after:" "$infile" >"$tmpfile"
+ sed -e "s$SED_DELIM$before$SED_DELIM$after$SED_DELIM" "$infile" >"$tmpfile"
# replace original file with the edited one
mv "$tmpfile" "$infile"
}
diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh
index 5aa75a0a1ced..946735bd5a25 100755
--- a/scripts/decode_stacktrace.sh
+++ b/scripts/decode_stacktrace.sh
@@ -77,8 +77,8 @@ parse_symbol() {
return
fi
- # Strip out the base of the path
- code=${code#$basepath/}
+ # Strip out the base of the path on each line
+ code=$(while read -r line; do echo "${line#$basepath/}"; done <<< "$code")
# In the case of inlines, move everything to same line
code=${code//$'\n'/' '}
diff --git a/scripts/decodecode b/scripts/decodecode
index 9cef558528aa..eeaa435d1bd2 100755
--- a/scripts/decodecode
+++ b/scripts/decodecode
@@ -119,7 +119,7 @@ faultlinenum=$(( $(wc -l $T.oo | cut -d" " -f1) - \
faultline=`cat $T.dis | head -1 | cut -d":" -f2-`
faultline=`echo "$faultline" | sed -e 's/\[/\\\[/g; s/\]/\\\]/g'`
-cat $T.oo | sed -e "${faultlinenum}s/^\(.*:\)\(.*\)/\1\*\2\t\t<-- trapping instruction/"
+cat $T.oo | sed -e "${faultlinenum}s/^\([^:]*:\)\(.*\)/\1\*\2\t\t<-- trapping instruction/"
echo
cat $T.aa
cleanup
diff --git a/scripts/depmod.sh b/scripts/depmod.sh
index e083bcae343f..3643b4f896ed 100755
--- a/scripts/depmod.sh
+++ b/scripts/depmod.sh
@@ -15,6 +15,8 @@ if ! test -r System.map ; then
exit 0
fi
+# legacy behavior: "depmod" in /sbin, no /sbin in PATH
+PATH="$PATH:/sbin"
if [ -z $(command -v $DEPMOD) ]; then
echo "Warning: 'make modules_install' requires $DEPMOD. Please install it." >&2
echo "This is probably in the kmod package." >&2
diff --git a/scripts/diffconfig b/scripts/diffconfig
index 89abf777f197..d5da5fa05d1d 100755
--- a/scripts/diffconfig
+++ b/scripts/diffconfig
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
#
# diffconfig - a tool to compare .config files.
diff --git a/scripts/gcc-plugins/Makefile b/scripts/gcc-plugins/Makefile
index aa0d0ec6936d..9e95862f2788 100644
--- a/scripts/gcc-plugins/Makefile
+++ b/scripts/gcc-plugins/Makefile
@@ -11,6 +11,7 @@ else
HOST_EXTRACXXFLAGS += -I$(GCC_PLUGINS_DIR)/include -I$(src) -std=gnu++98 -fno-rtti
HOST_EXTRACXXFLAGS += -fno-exceptions -fasynchronous-unwind-tables -ggdb
HOST_EXTRACXXFLAGS += -Wno-narrowing -Wno-unused-variable
+ HOST_EXTRACXXFLAGS += -Wno-format-diag
export HOST_EXTRACXXFLAGS
endif
diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
index 17f06079a712..9ad76b7f3f10 100644
--- a/scripts/gcc-plugins/gcc-common.h
+++ b/scripts/gcc-plugins/gcc-common.h
@@ -35,7 +35,9 @@
#include "ggc.h"
#include "timevar.h"
+#if BUILDING_GCC_VERSION < 10000
#include "params.h"
+#endif
#if BUILDING_GCC_VERSION <= 4009
#include "pointer-set.h"
@@ -847,6 +849,7 @@ static inline gimple gimple_build_assign_with_ops(enum tree_code subcode, tree l
return gimple_build_assign(lhs, subcode, op1, op2 PASS_MEM_STAT);
}
+#if BUILDING_GCC_VERSION < 10000
template <>
template <>
inline bool is_a_helper<const ggoto *>::test(const_gimple gs)
@@ -860,6 +863,7 @@ inline bool is_a_helper<const greturn *>::test(const_gimple gs)
{
return gs->code == GIMPLE_RETURN;
}
+#endif
static inline gasm *as_a_gasm(gimple stmt)
{
diff --git a/scripts/gdb/linux/symbols.py b/scripts/gdb/linux/symbols.py
index 4644f1a83b57..aa4f87e3ddb1 100644
--- a/scripts/gdb/linux/symbols.py
+++ b/scripts/gdb/linux/symbols.py
@@ -96,7 +96,7 @@ lx-symbols command."""
return ""
attrs = sect_attrs['attrs']
section_name_to_address = {
- attrs[n]['name'].string(): attrs[n]['address']
+ attrs[n]['battr']['attr']['name'].string(): attrs[n]['address']
for n in range(int(sect_attrs['nsections']))}
args = []
for section_name in [".data", ".data..read_mostly", ".rodata", ".bss",
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
index c8ff1c99dd5c..552cf7557c7a 100644
--- a/scripts/kconfig/nconf.c
+++ b/scripts/kconfig/nconf.c
@@ -504,8 +504,8 @@ static int get_mext_match(const char *match_str, match_f flag)
else if (flag == FIND_NEXT_MATCH_UP)
--match_start;
+ match_start = (match_start + items_num) % items_num;
index = match_start;
- index = (index + items_num) % items_num;
while (true) {
char *str = k_menu_items[index].str;
if (strcasestr(str, match_str) != NULL)
diff --git a/scripts/kconfig/preprocess.c b/scripts/kconfig/preprocess.c
index 5ca2df790d3c..389814b02d06 100644
--- a/scripts/kconfig/preprocess.c
+++ b/scripts/kconfig/preprocess.c
@@ -111,7 +111,7 @@ static char *do_error_if(int argc, char *argv[])
if (!strcmp(argv[0], "y"))
pperror("%s", argv[1]);
- return NULL;
+ return xstrdup("");
}
static char *do_filename(int argc, char *argv[])
diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
index ef4310f2558b..1ee33d2e15bf 100644
--- a/scripts/kconfig/qconf.cc
+++ b/scripts/kconfig/qconf.cc
@@ -627,7 +627,7 @@ void ConfigList::updateMenuList(ConfigItem *parent, struct menu* menu)
last = item;
continue;
}
- hide:
+hide:
if (item && item->menu == child) {
last = parent->firstChild();
if (last == item)
@@ -692,7 +692,7 @@ void ConfigList::updateMenuList(ConfigList *parent, struct menu* menu)
last = item;
continue;
}
- hide:
+hide:
if (item && item->menu == child) {
last = (ConfigItem*)parent->topLevelItem(0);
if (last == item)
@@ -869,40 +869,40 @@ void ConfigList::focusInEvent(QFocusEvent *e)
void ConfigList::contextMenuEvent(QContextMenuEvent *e)
{
- if (e->y() <= header()->geometry().bottom()) {
- if (!headerPopup) {
- QAction *action;
-
- headerPopup = new QMenu(this);
- action = new QAction("Show Name", this);
- action->setCheckable(true);
- connect(action, SIGNAL(toggled(bool)),
- parent(), SLOT(setShowName(bool)));
- connect(parent(), SIGNAL(showNameChanged(bool)),
- action, SLOT(setOn(bool)));
- action->setChecked(showName);
- headerPopup->addAction(action);
- action = new QAction("Show Range", this);
- action->setCheckable(true);
- connect(action, SIGNAL(toggled(bool)),
- parent(), SLOT(setShowRange(bool)));
- connect(parent(), SIGNAL(showRangeChanged(bool)),
- action, SLOT(setOn(bool)));
- action->setChecked(showRange);
- headerPopup->addAction(action);
- action = new QAction("Show Data", this);
- action->setCheckable(true);
- connect(action, SIGNAL(toggled(bool)),
- parent(), SLOT(setShowData(bool)));
- connect(parent(), SIGNAL(showDataChanged(bool)),
- action, SLOT(setOn(bool)));
- action->setChecked(showData);
- headerPopup->addAction(action);
- }
- headerPopup->exec(e->globalPos());
- e->accept();
- } else
- e->ignore();
+ if (!headerPopup) {
+ QAction *action;
+
+ headerPopup = new QMenu(this);
+ action = new QAction("Show Name", this);
+ action->setCheckable(true);
+ connect(action, SIGNAL(toggled(bool)),
+ parent(), SLOT(setShowName(bool)));
+ connect(parent(), SIGNAL(showNameChanged(bool)),
+ action, SLOT(setChecked(bool)));
+ action->setChecked(showName);
+ headerPopup->addAction(action);
+
+ action = new QAction("Show Range", this);
+ action->setCheckable(true);
+ connect(action, SIGNAL(toggled(bool)),
+ parent(), SLOT(setShowRange(bool)));
+ connect(parent(), SIGNAL(showRangeChanged(bool)),
+ action, SLOT(setChecked(bool)));
+ action->setChecked(showRange);
+ headerPopup->addAction(action);
+
+ action = new QAction("Show Data", this);
+ action->setCheckable(true);
+ connect(action, SIGNAL(toggled(bool)),
+ parent(), SLOT(setShowData(bool)));
+ connect(parent(), SIGNAL(showDataChanged(bool)),
+ action, SLOT(setChecked(bool)));
+ action->setChecked(showData);
+ headerPopup->addAction(action);
+ }
+
+ headerPopup->exec(e->globalPos());
+ e->accept();
}
ConfigView*ConfigView::viewList;
@@ -1225,10 +1225,11 @@ QMenu* ConfigInfoView::createStandardContextMenu(const QPoint & pos)
{
QMenu* popup = Parent::createStandardContextMenu(pos);
QAction* action = new QAction("Show Debug Info", popup);
- action->setCheckable(true);
- connect(action, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool)));
- connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setOn(bool)));
- action->setChecked(showDebug());
+
+ action->setCheckable(true);
+ connect(action, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool)));
+ connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setChecked(bool)));
+ action->setChecked(showDebug());
popup->addSeparator();
popup->addAction(action);
return popup;
diff --git a/scripts/mksysmap b/scripts/mksysmap
index a35acc0d0b82..9aa23d15862a 100755
--- a/scripts/mksysmap
+++ b/scripts/mksysmap
@@ -41,4 +41,4 @@
# so we just ignore them to let readprofile continue to work.
# (At least sparc64 has __crc_ in the middle).
-$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)\|\( .L\)' > $2
+$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)\|\( \.L\)' > $2
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 91a80036c05d..7c693bd775c1 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -895,7 +895,7 @@ static void check_section(const char *modname, struct elf_info *elf,
#define DATA_SECTIONS ".data", ".data.rel"
#define TEXT_SECTIONS ".text", ".text.unlikely", ".sched.text", \
- ".kprobes.text", ".cpuidle.text"
+ ".kprobes.text", ".cpuidle.text", ".noinstr.text"
#define OTHER_TEXT_SECTIONS ".ref.text", ".head.text", ".spinlock.text", \
".fixup", ".entry.text", ".exception.text", ".text.*", \
".coldtext"
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
index 3b0dcf38fd8b..d3e61dcc6129 100644
--- a/scripts/recordmcount.c
+++ b/scripts/recordmcount.c
@@ -401,7 +401,7 @@ static uint32_t (*w2)(uint16_t);
static int
is_mcounted_section_name(char const *const txtname)
{
- return strcmp(".text", txtname) == 0 ||
+ return strncmp(".text", txtname, 5) == 0 ||
strcmp(".init.text", txtname) == 0 ||
strcmp(".ref.text", txtname) == 0 ||
strcmp(".sched.text", txtname) == 0 ||
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index f599031260d5..657e69125a46 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -142,6 +142,11 @@ my %text_sections = (
".text.unlikely" => 1,
);
+# Acceptable section-prefixes to record.
+my %text_section_prefixes = (
+ ".text." => 1,
+);
+
# Note: we are nice to C-programmers here, thus we skip the '||='-idiom.
$objdump = 'objdump' if (!$objdump);
$objcopy = 'objcopy' if (!$objcopy);
@@ -263,7 +268,11 @@ if ($arch eq "x86_64") {
# force flags for this arch
$ld .= " -m shlelf_linux";
- $objcopy .= " -O elf32-sh-linux";
+ if ($endian eq "big") {
+ $objcopy .= " -O elf32-shbig-linux";
+ } else {
+ $objcopy .= " -O elf32-sh-linux";
+ }
} elsif ($arch eq "powerpc") {
my $ldemulation;
@@ -386,7 +395,7 @@ if ($arch eq "x86_64") {
$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$";
} elsif ($arch eq "riscv") {
$function_regex = "^([0-9a-fA-F]+)\\s+<([^.0-9][0-9a-zA-Z_\\.]+)>:";
- $mcount_regex = "^\\s*([0-9a-fA-F]+):\\sR_RISCV_CALL\\s_mcount\$";
+ $mcount_regex = "^\\s*([0-9a-fA-F]+):\\sR_RISCV_CALL(_PLT)?\\s_?mcount\$";
$type = ".quad";
$alignment = 2;
} elsif ($arch eq "nds32") {
@@ -519,6 +528,14 @@ while (<IN>) {
# Only record text sections that we know are safe
$read_function = defined($text_sections{$1});
+ if (!$read_function) {
+ foreach my $prefix (keys %text_section_prefixes) {
+ if (substr($1, 0, length $prefix) eq $prefix) {
+ $read_function = 1;
+ last;
+ }
+ }
+ }
# print out any recorded offsets
update_funcs();
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index 365b3c2b8f43..2cb0b92f40be 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -45,7 +45,7 @@ scm_version()
# Check for git and a git repo.
if test -z "$(git rev-parse --show-cdup 2>/dev/null)" &&
- head=`git rev-parse --verify --short HEAD 2>/dev/null`; then
+ head=$(git rev-parse --verify HEAD 2>/dev/null); then
# If we are at a tagged commit (like "v2.6.30-rc6"), we ignore
# it, because this version is defined in the top level Makefile.
@@ -59,11 +59,22 @@ scm_version()
fi
# If we are past a tagged commit (like
# "v2.6.30-rc5-302-g72357d5"), we pretty print it.
- if atag="`git describe 2>/dev/null`"; then
- echo "$atag" | awk -F- '{printf("-%05d-%s", $(NF-1),$(NF))}'
-
- # If we don't have a tag at all we print -g{commitish}.
+ #
+ # Ensure the abbreviated sha1 has exactly 12
+ # hex characters, to make the output
+ # independent of git version, local
+ # core.abbrev settings and/or total number of
+ # objects in the current repository - passing
+ # --abbrev=12 ensures a minimum of 12, and the
+ # awk substr() then picks the 'g' and first 12
+ # hex chars.
+ if atag="$(git describe --abbrev=12 2>/dev/null)"; then
+ echo "$atag" | awk -F- '{printf("-%05d-%s", $(NF-1),substr($(NF),0,13))}'
+
+ # If we don't have a tag at all we print -g{commitish},
+ # again using exactly 12 hex chars.
else
+ head="$(echo $head | cut -c1-12)"
printf '%s%s' -g $head
fi
fi
diff --git a/scripts/split-man.pl b/scripts/split-man.pl
index c3db607ee9ec..96bd99dc977a 100755
--- a/scripts/split-man.pl
+++ b/scripts/split-man.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
# SPDX-License-Identifier: GPL-2.0
#
# Author: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 0a57d105cc5b..1ec1e928cc09 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -424,7 +424,7 @@ static ssize_t policy_update(u32 mask, const char __user *buf, size_t size,
*/
error = aa_may_manage_policy(label, ns, mask);
if (error)
- return error;
+ goto end_section;
data = aa_simple_write_to_buffer(buf, size, size, pos);
error = PTR_ERR(data);
@@ -432,6 +432,7 @@ static ssize_t policy_update(u32 mask, const char __user *buf, size_t size,
error = aa_replace_profiles(ns, label, mask, data);
aa_put_loaddata(data);
}
+end_section:
end_current_label_crit_section(label);
return error;
diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c
index eeaddfe0c0fb..70b9730c0be6 100644
--- a/security/apparmor/audit.c
+++ b/security/apparmor/audit.c
@@ -201,8 +201,9 @@ int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
rule->label = aa_label_parse(&root_ns->unconfined->label, rulestr,
GFP_KERNEL, true, false);
if (IS_ERR(rule->label)) {
+ int err = PTR_ERR(rule->label);
aa_audit_rule_free(rule);
- return PTR_ERR(rule->label);
+ return err;
}
*vrule = rule;
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index bdf9e4cefe25..13b33490e079 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -939,7 +939,8 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
* aways results in a further reduction of permissions.
*/
if ((bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS) &&
- !unconfined(label) && !aa_label_is_subset(new, ctx->nnp)) {
+ !unconfined(label) &&
+ !aa_label_is_unconfined_subset(new, ctx->nnp)) {
error = -EPERM;
info = "no new privs";
goto audit;
@@ -1217,7 +1218,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags)
* reduce restrictions.
*/
if (task_no_new_privs(current) && !unconfined(label) &&
- !aa_label_is_subset(new, ctx->nnp)) {
+ !aa_label_is_unconfined_subset(new, ctx->nnp)) {
/* not an apparmor denial per se, so don't log it */
AA_DEBUG("no_new_privs - change_hat denied");
error = -EPERM;
@@ -1238,7 +1239,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags)
* reduce restrictions.
*/
if (task_no_new_privs(current) && !unconfined(label) &&
- !aa_label_is_subset(previous, ctx->nnp)) {
+ !aa_label_is_unconfined_subset(previous, ctx->nnp)) {
/* not an apparmor denial per se, so don't log it */
AA_DEBUG("no_new_privs - change_hat denied");
error = -EPERM;
@@ -1338,6 +1339,7 @@ int aa_change_profile(const char *fqname, int flags)
ctx->nnp = aa_get_label(label);
if (!fqname || !*fqname) {
+ aa_put_label(label);
AA_DEBUG("no profile name");
return -EINVAL;
}
@@ -1356,8 +1358,6 @@ int aa_change_profile(const char *fqname, int flags)
op = OP_CHANGE_PROFILE;
}
- label = aa_get_current_label();
-
if (*fqname == '&') {
stack = true;
/* don't have label_parse() do stacking */
@@ -1434,7 +1434,7 @@ check:
* reduce restrictions.
*/
if (task_no_new_privs(current) && !unconfined(label) &&
- !aa_label_is_subset(new, ctx->nnp)) {
+ !aa_label_is_unconfined_subset(new, ctx->nnp)) {
/* not an apparmor denial per se, so don't log it */
AA_DEBUG("no_new_privs - change_hat denied");
error = -EPERM;
diff --git a/security/apparmor/include/label.h b/security/apparmor/include/label.h
index 7ce5fe73ae7f..cecbd3f5429c 100644
--- a/security/apparmor/include/label.h
+++ b/security/apparmor/include/label.h
@@ -285,6 +285,7 @@ bool aa_label_init(struct aa_label *label, int size, gfp_t gfp);
struct aa_label *aa_label_alloc(int size, struct aa_proxy *proxy, gfp_t gfp);
bool aa_label_is_subset(struct aa_label *set, struct aa_label *sub);
+bool aa_label_is_unconfined_subset(struct aa_label *set, struct aa_label *sub);
struct aa_profile *__aa_label_next_not_in_set(struct label_it *I,
struct aa_label *set,
struct aa_label *sub);
diff --git a/security/apparmor/label.c b/security/apparmor/label.c
index 2469549842d2..6727e6fb69df 100644
--- a/security/apparmor/label.c
+++ b/security/apparmor/label.c
@@ -554,6 +554,39 @@ bool aa_label_is_subset(struct aa_label *set, struct aa_label *sub)
return __aa_label_next_not_in_set(&i, set, sub) == NULL;
}
+/**
+ * aa_label_is_unconfined_subset - test if @sub is a subset of @set
+ * @set: label to test against
+ * @sub: label to test if is subset of @set
+ *
+ * This checks for subset but taking into account unconfined. IF
+ * @sub contains an unconfined profile that does not have a matching
+ * unconfined in @set then this will not cause the test to fail.
+ * Conversely we don't care about an unconfined in @set that is not in
+ * @sub
+ *
+ * Returns: true if @sub is special_subset of @set
+ * else false
+ */
+bool aa_label_is_unconfined_subset(struct aa_label *set, struct aa_label *sub)
+{
+ struct label_it i = { };
+ struct aa_profile *p;
+
+ AA_BUG(!set);
+ AA_BUG(!sub);
+
+ if (sub == set)
+ return true;
+
+ do {
+ p = __aa_label_next_not_in_set(&i, set, sub);
+ if (p && !profile_unconfined(p))
+ break;
+ } while (p);
+
+ return p == NULL;
+}
/**
@@ -1535,13 +1568,13 @@ static const char *label_modename(struct aa_ns *ns, struct aa_label *label,
label_for_each(i, label, profile) {
if (aa_ns_visible(ns, profile->ns, flags & FLAG_VIEW_SUBNS)) {
- if (profile->mode == APPARMOR_UNCONFINED)
+ count++;
+ if (profile == profile->ns->unconfined)
/* special case unconfined so stacks with
* unconfined don't report as mixed. ie.
* profile_foo//&:ns1:unconfined (mixed)
*/
continue;
- count++;
if (mode == -1)
mode = profile->mode;
else if (mode != profile->mode)
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 730de4638b4e..898752b818dc 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -130,11 +130,11 @@ static int apparmor_ptrace_traceme(struct task_struct *parent)
struct aa_label *tracer, *tracee;
int error;
- tracee = begin_current_label_crit_section();
+ tracee = __begin_current_label_crit_section();
tracer = aa_get_task_label(parent);
error = aa_may_ptrace(tracer, tracee, AA_PTRACE_TRACE);
aa_put_label(tracer);
- end_current_label_crit_section(tracee);
+ __end_current_label_crit_section(tracee);
return error;
}
@@ -797,7 +797,12 @@ static void apparmor_sk_clone_security(const struct sock *sk,
struct aa_sk_ctx *ctx = SK_CTX(sk);
struct aa_sk_ctx *new = SK_CTX(newsk);
+ if (new->label)
+ aa_put_label(new->label);
new->label = aa_get_label(ctx->label);
+
+ if (new->peer)
+ aa_put_label(new->peer);
new->peer = aa_get_label(ctx->peer);
}
diff --git a/security/apparmor/match.c b/security/apparmor/match.c
index 55f2ee505a01..c1b96a3841da 100644
--- a/security/apparmor/match.c
+++ b/security/apparmor/match.c
@@ -101,6 +101,9 @@ static struct table_header *unpack_table(char *blob, size_t bsize)
th.td_flags == YYTD_DATA8))
goto out;
+ /* if we have a table it must have some entries */
+ if (th.td_lolen == 0)
+ goto out;
tsize = table_size(th.td_lolen, th.td_flags);
if (bsize < tsize)
goto out;
@@ -202,6 +205,8 @@ static int verify_dfa(struct aa_dfa *dfa)
state_count = dfa->tables[YYTD_ID_BASE]->td_lolen;
trans_count = dfa->tables[YYTD_ID_NXT]->td_lolen;
+ if (state_count == 0)
+ goto out;
for (i = 0; i < state_count; i++) {
if (!(BASE_TABLE(dfa)[i] & MATCH_FLAG_DIFF_ENCODE) &&
(DEFAULT_TABLE(dfa)[i] >= state_count))
diff --git a/security/commoncap.c b/security/commoncap.c
index 3023b4ad38a7..876cfe01d939 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -377,10 +377,11 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
{
int size, ret;
kuid_t kroot;
+ u32 nsmagic, magic;
uid_t root, mappedroot;
char *tmpbuf = NULL;
struct vfs_cap_data *cap;
- struct vfs_ns_cap_data *nscap;
+ struct vfs_ns_cap_data *nscap = NULL;
struct dentry *dentry;
struct user_namespace *fs_ns;
@@ -396,52 +397,67 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
&tmpbuf, size, GFP_NOFS);
dput(dentry);
- if (ret < 0)
+ if (ret < 0 || !tmpbuf)
return ret;
fs_ns = inode->i_sb->s_user_ns;
cap = (struct vfs_cap_data *) tmpbuf;
if (is_v2header((size_t) ret, cap)) {
- /* If this is sizeof(vfs_cap_data) then we're ok with the
- * on-disk value, so return that. */
- if (alloc)
- *buffer = tmpbuf;
- else
- kfree(tmpbuf);
- return ret;
- } else if (!is_v3header((size_t) ret, cap)) {
- kfree(tmpbuf);
- return -EINVAL;
+ root = 0;
+ } else if (is_v3header((size_t) ret, cap)) {
+ nscap = (struct vfs_ns_cap_data *) tmpbuf;
+ root = le32_to_cpu(nscap->rootid);
+ } else {
+ size = -EINVAL;
+ goto out_free;
}
- nscap = (struct vfs_ns_cap_data *) tmpbuf;
- root = le32_to_cpu(nscap->rootid);
kroot = make_kuid(fs_ns, root);
/* If the root kuid maps to a valid uid in current ns, then return
* this as a nscap. */
mappedroot = from_kuid(current_user_ns(), kroot);
if (mappedroot != (uid_t)-1 && mappedroot != (uid_t)0) {
+ size = sizeof(struct vfs_ns_cap_data);
if (alloc) {
- *buffer = tmpbuf;
+ if (!nscap) {
+ /* v2 -> v3 conversion */
+ nscap = kzalloc(size, GFP_ATOMIC);
+ if (!nscap) {
+ size = -ENOMEM;
+ goto out_free;
+ }
+ nsmagic = VFS_CAP_REVISION_3;
+ magic = le32_to_cpu(cap->magic_etc);
+ if (magic & VFS_CAP_FLAGS_EFFECTIVE)
+ nsmagic |= VFS_CAP_FLAGS_EFFECTIVE;
+ memcpy(&nscap->data, &cap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
+ nscap->magic_etc = cpu_to_le32(nsmagic);
+ } else {
+ /* use allocated v3 buffer */
+ tmpbuf = NULL;
+ }
nscap->rootid = cpu_to_le32(mappedroot);
- } else
- kfree(tmpbuf);
- return size;
+ *buffer = nscap;
+ }
+ goto out_free;
}
if (!rootid_owns_currentns(kroot)) {
- kfree(tmpbuf);
- return -EOPNOTSUPP;
+ size = -EOVERFLOW;
+ goto out_free;
}
/* This comes from a parent namespace. Return as a v2 capability */
size = sizeof(struct vfs_cap_data);
if (alloc) {
- *buffer = kmalloc(size, GFP_ATOMIC);
- if (*buffer) {
- struct vfs_cap_data *cap = *buffer;
- __le32 nsmagic, magic;
+ if (nscap) {
+ /* v3 -> v2 conversion */
+ cap = kzalloc(size, GFP_ATOMIC);
+ if (!cap) {
+ size = -ENOMEM;
+ goto out_free;
+ }
magic = VFS_CAP_REVISION_2;
nsmagic = le32_to_cpu(nscap->magic_etc);
if (nsmagic & VFS_CAP_FLAGS_EFFECTIVE)
@@ -449,9 +465,12 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
memcpy(&cap->data, &nscap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
cap->magic_etc = cpu_to_le32(magic);
} else {
- size = -ENOMEM;
+ /* use unconverted v2 */
+ tmpbuf = NULL;
}
+ *buffer = cap;
}
+out_free:
kfree(tmpbuf);
return size;
}
@@ -819,6 +838,7 @@ int cap_bprm_set_creds(struct linux_binprm *bprm)
int ret;
kuid_t root_uid;
+ new->cap_ambient = old->cap_ambient;
if (WARN_ON(!cap_ambient_invariant_ok(old)))
return -EPERM;
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index 6a314fb0d480..fc142d04d365 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -96,7 +96,7 @@ static struct shash_desc *init_desc(char type, uint8_t hash_algo)
algo = hash_algo_name[hash_algo];
}
- if (*tfm == NULL) {
+ if (IS_ERR_OR_NULL(*tfm)) {
mutex_lock(&mutex);
if (*tfm)
goto out;
@@ -215,7 +215,7 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
data->hdr.length = crypto_shash_digestsize(desc->tfm);
error = -ENODATA;
- list_for_each_entry_rcu(xattr, &evm_config_xattrnames, list) {
+ list_for_each_entry_lockless(xattr, &evm_config_xattrnames, list) {
bool is_ima = false;
if (strcmp(xattr->name, XATTR_NAME_IMA) == 0)
@@ -249,7 +249,7 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
/* Portable EVM signatures must include an IMA hash */
if (type == EVM_XATTR_PORTABLE_DIGSIG && !ima_present)
- return -EPERM;
+ error = -EPERM;
out:
kfree(xattr_value);
kfree(desc);
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index 7f3f54d89a6e..651c0127c00d 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -102,7 +102,7 @@ static int evm_find_protected_xattrs(struct dentry *dentry)
if (!(inode->i_opflags & IOP_XATTR))
return -EOPNOTSUPP;
- list_for_each_entry_rcu(xattr, &evm_config_xattrnames, list) {
+ list_for_each_entry_lockless(xattr, &evm_config_xattrnames, list) {
error = __vfs_getxattr(dentry, inode, xattr->name, NULL, 0);
if (error < 0) {
if (error == -ENODATA)
@@ -186,6 +186,12 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
break;
case EVM_IMA_XATTR_DIGSIG:
case EVM_XATTR_PORTABLE_DIGSIG:
+ /* accept xattr with non-empty signature field */
+ if (xattr_len <= sizeof(struct signature_v2_hdr)) {
+ evm_status = INTEGRITY_FAIL;
+ goto out;
+ }
+
hdr = (struct signature_v2_hdr *)xattr_data;
digest.hdr.algo = hdr->hash_algo;
rc = evm_calc_hash(dentry, xattr_name, xattr_value,
@@ -233,7 +239,7 @@ static int evm_protected_xattr(const char *req_xattr_name)
struct xattr_list *xattr;
namelen = strlen(req_xattr_name);
- list_for_each_entry_rcu(xattr, &evm_config_xattrnames, list) {
+ list_for_each_entry_lockless(xattr, &evm_config_xattrnames, list) {
if ((strlen(xattr->name) == namelen)
&& (strncmp(req_xattr_name, xattr->name, namelen) == 0)) {
found = 1;
diff --git a/security/integrity/evm/evm_secfs.c b/security/integrity/evm/evm_secfs.c
index 77de71b7794c..f112ca593adc 100644
--- a/security/integrity/evm/evm_secfs.c
+++ b/security/integrity/evm/evm_secfs.c
@@ -237,7 +237,14 @@ static ssize_t evm_write_xattrs(struct file *file, const char __user *buf,
goto out;
}
- /* Guard against races in evm_read_xattrs */
+ /*
+ * xattr_list_mutex guards against races in evm_read_xattrs().
+ * Entries are only added to the evm_config_xattrnames list
+ * and never deleted. Therefore, the list is traversed
+ * using list_for_each_entry_lockless() without holding
+ * the mutex in evm_calc_hmac_or_hash(), evm_find_protected_xattrs()
+ * and evm_protected_xattr().
+ */
mutex_lock(&xattr_list_mutex);
list_for_each_entry(tmp, &evm_config_xattrnames, list) {
if (strcmp(xattr->name, tmp->name) == 0) {
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 67db9d9454ca..d12b07eb3a58 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -40,7 +40,7 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
#define IMA_DIGEST_SIZE SHA1_DIGEST_SIZE
#define IMA_EVENT_NAME_LEN_MAX 255
-#define IMA_HASH_BITS 9
+#define IMA_HASH_BITS 10
#define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS)
#define IMA_TEMPLATE_FIELD_ID_MAX_LEN 16
@@ -56,6 +56,7 @@ extern int ima_policy_flag;
extern int ima_hash_algo;
extern int ima_appraise;
extern struct tpm_chip *ima_tpm_chip;
+extern const char boot_aggregate_name[];
/* IMA event related data */
struct ima_event_data {
@@ -139,7 +140,7 @@ int ima_calc_buffer_hash(const void *buf, loff_t len,
int ima_calc_field_array_hash(struct ima_field_data *field_data,
struct ima_template_desc *desc, int num_fields,
struct ima_digest_data *hash);
-int __init ima_calc_boot_aggregate(struct ima_digest_data *hash);
+int ima_calc_boot_aggregate(struct ima_digest_data *hash);
void ima_add_violation(struct file *file, const unsigned char *filename,
struct integrity_iint_cache *iint,
const char *op, const char *cause);
@@ -166,9 +167,10 @@ struct ima_h_table {
};
extern struct ima_h_table ima_htable;
-static inline unsigned long ima_hash_key(u8 *digest)
+static inline unsigned int ima_hash_key(u8 *digest)
{
- return hash_long(*digest, IMA_HASH_BITS);
+ /* there is no point in taking a hash of part of a digest */
+ return (digest[0] | digest[1] << 8) % IMA_MEASURE_HTABLE_SIZE;
}
#define __ima_hooks(hook) \
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index f63b4bd45d60..f0b244989734 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -415,7 +415,7 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
loff_t i_size;
int rc;
struct file *f = file;
- bool new_file_instance = false, modified_flags = false;
+ bool new_file_instance = false;
/*
* For consistency, fail file's opened with the O_DIRECT flag on
@@ -433,18 +433,10 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
O_TRUNC | O_CREAT | O_NOCTTY | O_EXCL);
flags |= O_RDONLY;
f = dentry_open(&file->f_path, flags, file->f_cred);
- if (IS_ERR(f)) {
- /*
- * Cannot open the file again, lets modify f_flags
- * of original and continue
- */
- pr_info_ratelimited("Unable to reopen file for reading.\n");
- f = file;
- f->f_flags |= FMODE_READ;
- modified_flags = true;
- } else {
- new_file_instance = true;
- }
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ new_file_instance = true;
}
i_size = i_size_read(file_inode(f));
@@ -459,8 +451,6 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
out:
if (new_file_instance)
fput(f);
- else if (modified_flags)
- f->f_flags &= ~FMODE_READ;
return rc;
}
@@ -651,7 +641,7 @@ int ima_calc_buffer_hash(const void *buf, loff_t len,
return calc_buffer_shash(buf, len, hash);
}
-static void __init ima_pcrread(int idx, u8 *pcr)
+static void ima_pcrread(int idx, u8 *pcr)
{
if (!ima_tpm_chip)
return;
@@ -663,8 +653,8 @@ static void __init ima_pcrread(int idx, u8 *pcr)
/*
* Calculate the boot aggregate hash
*/
-static int __init ima_calc_boot_aggregate_tfm(char *digest,
- struct crypto_shash *tfm)
+static int ima_calc_boot_aggregate_tfm(char *digest,
+ struct crypto_shash *tfm)
{
u8 pcr_i[TPM_DIGEST_SIZE];
int rc, i;
@@ -682,13 +672,15 @@ static int __init ima_calc_boot_aggregate_tfm(char *digest,
ima_pcrread(i, pcr_i);
/* now accumulate with current aggregate */
rc = crypto_shash_update(shash, pcr_i, TPM_DIGEST_SIZE);
+ if (rc != 0)
+ return rc;
}
if (!rc)
crypto_shash_final(shash, digest);
return rc;
}
-int __init ima_calc_boot_aggregate(struct ima_digest_data *hash)
+int ima_calc_boot_aggregate(struct ima_digest_data *hash)
{
struct crypto_shash *tfm;
int rc;
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index cfb8cc3b975e..604cdac63d84 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -343,8 +343,7 @@ static ssize_t ima_write_policy(struct file *file, const char __user *buf,
integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL,
"policy_update", "signed policy required",
1, 0);
- if (ima_appraise & IMA_APPRAISE_ENFORCE)
- result = -EACCES;
+ result = -EACCES;
} else {
result = ima_parse_add_rule(data);
}
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index faac9ecaa0ae..a2bc4cb4482a 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -25,7 +25,7 @@
#include "ima.h"
/* name for boot aggregate entry */
-static const char *boot_aggregate_name = "boot_aggregate";
+const char boot_aggregate_name[] = "boot_aggregate";
struct tpm_chip *ima_tpm_chip;
/* Add the boot aggregate to the IMA measurement list and extend
diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c
index 16bd18747cfa..6a10d4d8b6e1 100644
--- a/security/integrity/ima/ima_kexec.c
+++ b/security/integrity/ima/ima_kexec.c
@@ -124,6 +124,7 @@ void ima_add_kexec_buffer(struct kimage *image)
ret = kexec_add_buffer(&kbuf);
if (ret) {
pr_err("Error passing over kexec measurement buffer.\n");
+ vfree(kexec_buffer);
return;
}
@@ -133,6 +134,8 @@ void ima_add_kexec_buffer(struct kimage *image)
return;
}
+ image->ima_buffer = kexec_buffer;
+
pr_debug("kexec measurement buffer for the loaded kernel at 0x%lx.\n",
kbuf.mem);
}
diff --git a/security/integrity/ima/ima_mok.c b/security/integrity/ima/ima_mok.c
index 073ddc9bce5b..3e7a1523663b 100644
--- a/security/integrity/ima/ima_mok.c
+++ b/security/integrity/ima/ima_mok.c
@@ -43,13 +43,12 @@ __init int ima_mok_init(void)
(KEY_POS_ALL & ~KEY_POS_SETATTR) |
KEY_USR_VIEW | KEY_USR_READ |
KEY_USR_WRITE | KEY_USR_SEARCH,
- KEY_ALLOC_NOT_IN_QUOTA,
+ KEY_ALLOC_NOT_IN_QUOTA |
+ KEY_ALLOC_SET_KEEP,
restriction, NULL);
if (IS_ERR(ima_blacklist_keyring))
panic("Can't allocate IMA blacklist keyring.");
-
- set_bit(KEY_FLAG_KEEP, &ima_blacklist_keyring->flags);
return 0;
}
device_initcall(ima_mok_init);
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index 93babb60b05a..2d5a3daa02f9 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -196,7 +196,7 @@ static struct ima_rule_entry secure_boot_rules[] __ro_after_init = {
static LIST_HEAD(ima_default_rules);
static LIST_HEAD(ima_policy_rules);
static LIST_HEAD(ima_temp_rules);
-static struct list_head *ima_rules;
+static struct list_head *ima_rules = &ima_default_rules;
static int ima_policy __initdata;
@@ -544,7 +544,6 @@ void __init ima_init_policy(void)
temp_ima_appraise |= IMA_APPRAISE_POLICY;
}
- ima_rules = &ima_default_rules;
ima_update_policy_flag();
}
diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
index 43752002c222..48c5a1be88ac 100644
--- a/security/integrity/ima/ima_template_lib.c
+++ b/security/integrity/ima/ima_template_lib.c
@@ -284,6 +284,24 @@ int ima_eventdigest_init(struct ima_event_data *event_data,
goto out;
}
+ if ((const char *)event_data->filename == boot_aggregate_name) {
+ if (ima_tpm_chip) {
+ hash.hdr.algo = HASH_ALGO_SHA1;
+ result = ima_calc_boot_aggregate(&hash.hdr);
+
+ /* algo can change depending on available PCR banks */
+ if (!result && hash.hdr.algo != HASH_ALGO_SHA1)
+ result = -EINVAL;
+
+ if (result < 0)
+ memset(&hash, 0, sizeof(hash));
+ }
+
+ cur_digest = hash.hdr.digest;
+ cur_digestsize = hash_digest_size[HASH_ALGO_SHA1];
+ goto out;
+ }
+
if (!event_data->file) /* missing info to re-calculate the digest */
return -EINVAL;
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
index 2806e70d7f8f..630594a5b46e 100644
--- a/security/keys/big_key.c
+++ b/security/keys/big_key.c
@@ -356,7 +356,7 @@ void big_key_describe(const struct key *key, struct seq_file *m)
* read the key data
* - the key's semaphore is read-locked
*/
-long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
+long big_key_read(const struct key *key, char *buffer, size_t buflen)
{
size_t datalen = (size_t)key->payload.data[big_key_len];
long ret;
@@ -395,9 +395,8 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
ret = datalen;
- /* copy decrypted data to user */
- if (copy_to_user(buffer, buf->virt, datalen) != 0)
- ret = -EFAULT;
+ /* copy out decrypted data */
+ memcpy(buffer, buf->virt, datalen);
err_fput:
fput(file);
@@ -405,9 +404,7 @@ error:
big_key_free_buffer(buf);
} else {
ret = datalen;
- if (copy_to_user(buffer, key->payload.data[big_key_data],
- datalen) != 0)
- ret = -EFAULT;
+ memcpy(buffer, key->payload.data[big_key_data], datalen);
}
return ret;
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index d92cbf9687c3..571f6d486838 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -895,14 +895,14 @@ out:
}
/*
- * encrypted_read - format and copy the encrypted data to userspace
+ * encrypted_read - format and copy out the encrypted data
*
* The resulting datablob format is:
* <master-key name> <decrypted data length> <encrypted iv> <encrypted data>
*
* On success, return to userspace the encrypted key datablob size.
*/
-static long encrypted_read(const struct key *key, char __user *buffer,
+static long encrypted_read(const struct key *key, char *buffer,
size_t buflen)
{
struct encrypted_key_payload *epayload;
@@ -950,8 +950,7 @@ static long encrypted_read(const struct key *key, char __user *buffer,
key_put(mkey);
memzero_explicit(derived_key, sizeof(derived_key));
- if (copy_to_user(buffer, ascii_buf, asciiblob_len) != 0)
- ret = -EFAULT;
+ memcpy(buffer, ascii_buf, asciiblob_len);
kzfree(ascii_buf);
return asciiblob_len;
diff --git a/security/keys/internal.h b/security/keys/internal.h
index a02742621c8d..d1b9c5957000 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -20,6 +20,8 @@
#include <linux/keyctl.h>
#include <linux/refcount.h>
#include <linux/compat.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
struct iovec;
@@ -304,5 +306,4 @@ static inline void key_check(const struct key *key)
#define key_check(key) do {} while(0)
#endif
-
#endif /* _INTERNAL_H */
diff --git a/security/keys/key.c b/security/keys/key.c
index 749a5cf27a19..d3ebc0533e3a 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -305,6 +305,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
key->flags |= 1 << KEY_FLAG_BUILTIN;
if (flags & KEY_ALLOC_UID_KEYRING)
key->flags |= 1 << KEY_FLAG_UID_KEYRING;
+ if (flags & KEY_ALLOC_SET_KEEP)
+ key->flags |= 1 << KEY_FLAG_KEEP;
#ifdef KEY_DEBUGGING
key->magic = KEY_DEBUG_MAGIC;
@@ -383,7 +385,7 @@ int key_payload_reserve(struct key *key, size_t datalen)
spin_lock(&key->user->lock);
if (delta > 0 &&
- (key->user->qnbytes + delta >= maxbytes ||
+ (key->user->qnbytes + delta > maxbytes ||
key->user->qnbytes + delta < key->user->qnbytes)) {
ret = -EDQUOT;
}
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index ca31af186abd..9394d72a77e8 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -133,10 +133,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
key_ref_put(keyring_ref);
error3:
- if (payload) {
- memzero_explicit(payload, plen);
- kvfree(payload);
- }
+ kvfree_sensitive(payload, plen);
error2:
kfree(description);
error:
@@ -330,7 +327,7 @@ long keyctl_update_key(key_serial_t id,
payload = NULL;
if (plen) {
ret = -ENOMEM;
- payload = kmalloc(plen, GFP_KERNEL);
+ payload = kvmalloc(plen, GFP_KERNEL);
if (!payload)
goto error;
@@ -351,7 +348,7 @@ long keyctl_update_key(key_serial_t id,
key_ref_put(key_ref);
error2:
- kzfree(payload);
+ kvfree_sensitive(payload, plen);
error:
return ret;
}
@@ -743,6 +740,21 @@ error:
}
/*
+ * Call the read method
+ */
+static long __keyctl_read_key(struct key *key, char *buffer, size_t buflen)
+{
+ long ret;
+
+ down_read(&key->sem);
+ ret = key_validate(key);
+ if (ret == 0)
+ ret = key->type->read(key, buffer, buflen);
+ up_read(&key->sem);
+ return ret;
+}
+
+/*
* Read a key's payload.
*
* The key must either grant the caller Read permission, or it must grant the
@@ -757,26 +769,28 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
struct key *key;
key_ref_t key_ref;
long ret;
+ char *key_data = NULL;
+ size_t key_data_len;
/* find the key first */
key_ref = lookup_user_key(keyid, 0, 0);
if (IS_ERR(key_ref)) {
ret = -ENOKEY;
- goto error;
+ goto out;
}
key = key_ref_to_ptr(key_ref);
ret = key_read_state(key);
if (ret < 0)
- goto error2; /* Negatively instantiated */
+ goto key_put_out; /* Negatively instantiated */
/* see if we can read it directly */
ret = key_permission(key_ref, KEY_NEED_READ);
if (ret == 0)
goto can_read_key;
if (ret != -EACCES)
- goto error2;
+ goto key_put_out;
/* we can't; see if it's searchable from this process's keyrings
* - we automatically take account of the fact that it may be
@@ -784,26 +798,78 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
*/
if (!is_key_possessed(key_ref)) {
ret = -EACCES;
- goto error2;
+ goto key_put_out;
}
/* the key is probably readable - now try to read it */
can_read_key:
- ret = -EOPNOTSUPP;
- if (key->type->read) {
- /* Read the data with the semaphore held (since we might sleep)
- * to protect against the key being updated or revoked.
+ if (!key->type->read) {
+ ret = -EOPNOTSUPP;
+ goto key_put_out;
+ }
+
+ if (!buffer || !buflen) {
+ /* Get the key length from the read method */
+ ret = __keyctl_read_key(key, NULL, 0);
+ goto key_put_out;
+ }
+
+ /*
+ * Read the data with the semaphore held (since we might sleep)
+ * to protect against the key being updated or revoked.
+ *
+ * Allocating a temporary buffer to hold the keys before
+ * transferring them to user buffer to avoid potential
+ * deadlock involving page fault and mmap_sem.
+ *
+ * key_data_len = (buflen <= PAGE_SIZE)
+ * ? buflen : actual length of key data
+ *
+ * This prevents allocating arbitrary large buffer which can
+ * be much larger than the actual key length. In the latter case,
+ * at least 2 passes of this loop is required.
+ */
+ key_data_len = (buflen <= PAGE_SIZE) ? buflen : 0;
+ for (;;) {
+ if (key_data_len) {
+ key_data = kvmalloc(key_data_len, GFP_KERNEL);
+ if (!key_data) {
+ ret = -ENOMEM;
+ goto key_put_out;
+ }
+ }
+
+ ret = __keyctl_read_key(key, key_data, key_data_len);
+
+ /*
+ * Read methods will just return the required length without
+ * any copying if the provided length isn't large enough.
*/
- down_read(&key->sem);
- ret = key_validate(key);
- if (ret == 0)
- ret = key->type->read(key, buffer, buflen);
- up_read(&key->sem);
+ if (ret <= 0 || ret > buflen)
+ break;
+
+ /*
+ * The key may change (unlikely) in between 2 consecutive
+ * __keyctl_read_key() calls. In this case, we reallocate
+ * a larger buffer and redo the key read when
+ * key_data_len < ret <= buflen.
+ */
+ if (ret > key_data_len) {
+ if (unlikely(key_data))
+ kvfree_sensitive(key_data, key_data_len);
+ key_data_len = ret;
+ continue; /* Allocate buffer */
+ }
+
+ if (copy_to_user(buffer, key_data, ret))
+ ret = -EFAULT;
+ break;
}
+ kvfree_sensitive(key_data, key_data_len);
-error2:
+key_put_out:
key_put(key);
-error:
+out:
return ret;
}
@@ -882,8 +948,8 @@ long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group)
key_quota_root_maxbytes : key_quota_maxbytes;
spin_lock(&newowner->lock);
- if (newowner->qnkeys + 1 >= maxkeys ||
- newowner->qnbytes + key->quotalen >= maxbytes ||
+ if (newowner->qnkeys + 1 > maxkeys ||
+ newowner->qnbytes + key->quotalen > maxbytes ||
newowner->qnbytes + key->quotalen <
newowner->qnbytes)
goto quota_overrun;
@@ -1101,10 +1167,7 @@ long keyctl_instantiate_key_common(key_serial_t id,
keyctl_change_reqkey_auth(NULL);
error2:
- if (payload) {
- memzero_explicit(payload, plen);
- kvfree(payload);
- }
+ kvfree_sensitive(payload, plen);
error:
return ret;
}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 99a55145ddcd..e8f2366021ea 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -432,7 +432,6 @@ static int keyring_read_iterator(const void *object, void *data)
{
struct keyring_read_iterator_context *ctx = data;
const struct key *key = keyring_ptr_to_key(object);
- int ret;
kenter("{%s,%d},,{%zu/%zu}",
key->type->name, key->serial, ctx->count, ctx->buflen);
@@ -440,10 +439,7 @@ static int keyring_read_iterator(const void *object, void *data)
if (ctx->count >= ctx->buflen)
return 1;
- ret = put_user(key->serial, ctx->buffer);
- if (ret < 0)
- return ret;
- ctx->buffer++;
+ *ctx->buffer++ = key->serial;
ctx->count += sizeof(key->serial);
return 0;
}
diff --git a/security/keys/proc.c b/security/keys/proc.c
index d38be9db2cc0..7ec2779cb089 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -144,6 +144,8 @@ static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos)
n = key_serial_next(p, v);
if (n)
*_pos = key_node_serial(n);
+ else
+ (*_pos)++;
return n;
}
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 1d34b2a5f485..13ac3b1e57da 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -27,7 +27,7 @@ static int request_key_auth_instantiate(struct key *,
static void request_key_auth_describe(const struct key *, struct seq_file *);
static void request_key_auth_revoke(struct key *);
static void request_key_auth_destroy(struct key *);
-static long request_key_auth_read(const struct key *, char __user *, size_t);
+static long request_key_auth_read(const struct key *, char *, size_t);
/*
* The request-key authorisation key type definition.
@@ -85,7 +85,7 @@ static void request_key_auth_describe(const struct key *key,
* - the key's semaphore is read-locked
*/
static long request_key_auth_read(const struct key *key,
- char __user *buffer, size_t buflen)
+ char *buffer, size_t buflen)
{
struct request_key_auth *rka = get_request_key_auth(key);
size_t datalen;
@@ -102,8 +102,7 @@ static long request_key_auth_read(const struct key *key,
if (buflen > datalen)
buflen = datalen;
- if (copy_to_user(buffer, rka->callout_info, buflen) != 0)
- ret = -EFAULT;
+ memcpy(buffer, rka->callout_info, buflen);
}
return ret;
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index b69d3b1777c2..9179a5bbf998 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -796,7 +796,7 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
case Opt_migratable:
if (*args[0].from == '0')
pay->migratable = 0;
- else
+ else if (*args[0].from != '1')
return -EINVAL;
break;
case Opt_pcrlock:
@@ -1135,11 +1135,10 @@ out:
* trusted_read - copy the sealed blob data to userspace in hex.
* On success, return to userspace the trusted key datablob size.
*/
-static long trusted_read(const struct key *key, char __user *buffer,
+static long trusted_read(const struct key *key, char *buffer,
size_t buflen)
{
const struct trusted_key_payload *p;
- char *ascii_buf;
char *bufp;
int i;
@@ -1148,18 +1147,9 @@ static long trusted_read(const struct key *key, char __user *buffer,
return -EINVAL;
if (buffer && buflen >= 2 * p->blob_len) {
- ascii_buf = kmalloc_array(2, p->blob_len, GFP_KERNEL);
- if (!ascii_buf)
- return -ENOMEM;
-
- bufp = ascii_buf;
+ bufp = buffer;
for (i = 0; i < p->blob_len; i++)
bufp = hex_byte_pack(bufp, p->blob[i]);
- if (copy_to_user(buffer, ascii_buf, 2 * p->blob_len) != 0) {
- kzfree(ascii_buf);
- return -EFAULT;
- }
- kzfree(ascii_buf);
}
return 2 * p->blob_len;
}
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index 9f558bedba23..0e723b676aef 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -172,7 +172,7 @@ EXPORT_SYMBOL_GPL(user_describe);
* read the key data
* - the key's semaphore is read-locked
*/
-long user_read(const struct key *key, char __user *buffer, size_t buflen)
+long user_read(const struct key *key, char *buffer, size_t buflen)
{
const struct user_key_payload *upayload;
long ret;
@@ -185,8 +185,7 @@ long user_read(const struct key *key, char __user *buffer, size_t buflen)
if (buflen > upayload->datalen)
buflen = upayload->datalen;
- if (copy_to_user(buffer, upayload->data, buflen) != 0)
- ret = -EFAULT;
+ memcpy(buffer, upayload->data, buflen);
}
return ret;
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 33028c098ef3..d6fea68d22ad 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -277,7 +277,9 @@ static void dump_common_audit_data(struct audit_buffer *ab,
struct inode *inode;
audit_log_format(ab, " name=");
+ spin_lock(&a->u.dentry->d_lock);
audit_log_untrustedstring(ab, a->u.dentry->d_name.name);
+ spin_unlock(&a->u.dentry->d_lock);
inode = d_backing_inode(a->u.dentry);
if (inode) {
@@ -295,8 +297,9 @@ static void dump_common_audit_data(struct audit_buffer *ab,
dentry = d_find_alias(inode);
if (dentry) {
audit_log_format(ab, " name=");
- audit_log_untrustedstring(ab,
- dentry->d_name.name);
+ spin_lock(&dentry->d_lock);
+ audit_log_untrustedstring(ab, dentry->d_name.name);
+ spin_unlock(&dentry->d_lock);
dput(dentry);
}
audit_log_format(ab, " dev=");
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index c574285966f9..08833bbb97aa 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1618,7 +1618,7 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
* inode_doinit with a dentry, before these inodes could
* be used again by userspace.
*/
- goto out;
+ goto out_invalid;
}
len = INITCONTEXTLEN;
@@ -1734,7 +1734,7 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
* could be used again by userspace.
*/
if (!dentry)
- goto out;
+ goto out_invalid;
rc = selinux_genfs_get_sid(dentry, sclass,
sbsec->flags, &sid);
dput(dentry);
@@ -1747,11 +1747,10 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
out:
spin_lock(&isec->lock);
if (isec->initialized == LABEL_PENDING) {
- if (!sid || rc) {
+ if (rc) {
isec->initialized = LABEL_INVALID;
goto out_unlock;
}
-
isec->initialized = LABEL_INITIALIZED;
isec->sid = sid;
}
@@ -1759,6 +1758,15 @@ out:
out_unlock:
spin_unlock(&isec->lock);
return rc;
+
+out_invalid:
+ spin_lock(&isec->lock);
+ if (isec->initialized == LABEL_PENDING) {
+ isec->initialized = LABEL_INVALID;
+ isec->sid = sid;
+ }
+ spin_unlock(&isec->lock);
+ return 0;
}
/* Convert a Linux signal to an access vector. */
@@ -3304,6 +3312,9 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
return dentry_has_perm(current_cred(), dentry, FILE__SETATTR);
}
+ if (!selinux_state.initialized)
+ return (inode_owner_or_capable(inode) ? 0 : -EPERM);
+
sbsec = inode->i_sb->s_security;
if (!(sbsec->flags & SBLABEL_MNT))
return -EOPNOTSUPP;
@@ -3387,6 +3398,15 @@ static void selinux_inode_post_setxattr(struct dentry *dentry, const char *name,
return;
}
+ if (!selinux_state.initialized) {
+ /* If we haven't even been initialized, then we can't validate
+ * against a policy, so leave the label as invalid. It may
+ * resolve to a valid label on the next revalidation try if
+ * we've since initialized.
+ */
+ return;
+ }
+
rc = security_context_to_sid_force(&selinux_state, value, size,
&newsid);
if (rc) {
@@ -5595,40 +5615,60 @@ static int selinux_tun_dev_open(void *security)
static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb)
{
- int err = 0;
- u32 perm;
+ int rc = 0;
+ unsigned int msg_len;
+ unsigned int data_len = skb->len;
+ unsigned char *data = skb->data;
struct nlmsghdr *nlh;
struct sk_security_struct *sksec = sk->sk_security;
+ u16 sclass = sksec->sclass;
+ u32 perm;
- if (skb->len < NLMSG_HDRLEN) {
- err = -EINVAL;
- goto out;
- }
- nlh = nlmsg_hdr(skb);
+ while (data_len >= nlmsg_total_size(0)) {
+ nlh = (struct nlmsghdr *)data;
- err = selinux_nlmsg_lookup(sksec->sclass, nlh->nlmsg_type, &perm);
- if (err) {
- if (err == -EINVAL) {
+ /* NOTE: the nlmsg_len field isn't reliably set by some netlink
+ * users which means we can't reject skb's with bogus
+ * length fields; our solution is to follow what
+ * netlink_rcv_skb() does and simply skip processing at
+ * messages with length fields that are clearly junk
+ */
+ if (nlh->nlmsg_len < NLMSG_HDRLEN || nlh->nlmsg_len > data_len)
+ return 0;
+
+ rc = selinux_nlmsg_lookup(sclass, nlh->nlmsg_type, &perm);
+ if (rc == 0) {
+ rc = sock_has_perm(sk, perm);
+ if (rc)
+ return rc;
+ } else if (rc == -EINVAL) {
+ /* -EINVAL is a missing msg/perm mapping */
pr_warn_ratelimited("SELinux: unrecognized netlink"
- " message: protocol=%hu nlmsg_type=%hu sclass=%s"
- " pig=%d comm=%s\n",
- sk->sk_protocol, nlh->nlmsg_type,
- secclass_map[sksec->sclass - 1].name,
- task_pid_nr(current), current->comm);
- if (!enforcing_enabled(&selinux_state) ||
- security_get_allow_unknown(&selinux_state))
- err = 0;
+ " message: protocol=%hu nlmsg_type=%hu sclass=%s"
+ " pid=%d comm=%s\n",
+ sk->sk_protocol, nlh->nlmsg_type,
+ secclass_map[sclass - 1].name,
+ task_pid_nr(current), current->comm);
+ if (enforcing_enabled(&selinux_state) &&
+ !security_get_allow_unknown(&selinux_state))
+ return rc;
+ rc = 0;
+ } else if (rc == -ENOENT) {
+ /* -ENOENT is a missing socket/class mapping, ignore */
+ rc = 0;
+ } else {
+ return rc;
}
- /* Ignore */
- if (err == -ENOENT)
- err = 0;
- goto out;
+ /* move to the next message after applying netlink padding */
+ msg_len = NLMSG_ALIGN(nlh->nlmsg_len);
+ if (msg_len >= data_len)
+ return 0;
+ data_len -= msg_len;
+ data += msg_len;
}
- err = sock_has_perm(sk, perm);
-out:
- return err;
+ return rc;
}
#ifdef CONFIG_NETFILTER
diff --git a/security/selinux/ibpkey.c b/security/selinux/ibpkey.c
index 0a4b89d48297..cb05ae28ce00 100644
--- a/security/selinux/ibpkey.c
+++ b/security/selinux/ibpkey.c
@@ -161,8 +161,10 @@ static int sel_ib_pkey_sid_slow(u64 subnet_prefix, u16 pkey_num, u32 *sid)
* is valid, it just won't be added to the cache.
*/
new = kzalloc(sizeof(*new), GFP_ATOMIC);
- if (!new)
+ if (!new) {
+ ret = -ENOMEM;
goto out;
+ }
new->psec.subnet_prefix = subnet_prefix;
new->psec.pkey = pkey_num;
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index f3a5a138a096..60b3f16bb5c7 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -1509,6 +1509,7 @@ static struct avc_cache_stats *sel_avc_get_stat_idx(loff_t *idx)
*idx = cpu + 1;
return &per_cpu(avc_cache_stats, cpu);
}
+ (*idx)++;
return NULL;
}
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index f3def298a90e..a9f2bc8443bd 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -2857,8 +2857,12 @@ err:
if (*names) {
for (i = 0; i < *len; i++)
kfree((*names)[i]);
+ kfree(*names);
}
kfree(*values);
+ *len = 0;
+ *names = NULL;
+ *values = NULL;
goto out;
}
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index f6482e53d55a..4f8c1a272df0 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -906,11 +906,21 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf,
else
rule += strlen(skp->smk_known) + 1;
+ if (rule > data + count) {
+ rc = -EOVERFLOW;
+ goto out;
+ }
+
ret = sscanf(rule, "%d", &maplevel);
- if (ret != 1 || maplevel > SMACK_CIPSO_MAXLEVEL)
+ if (ret != 1 || maplevel < 0 || maplevel > SMACK_CIPSO_MAXLEVEL)
goto out;
rule += SMK_DIGITLEN;
+ if (rule > data + count) {
+ rc = -EOVERFLOW;
+ goto out;
+ }
+
ret = sscanf(rule, "%d", &catlen);
if (ret != 1 || catlen > SMACK_CIPSO_MAXCATNUM)
goto out;
@@ -923,6 +933,10 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf,
for (i = 0; i < catlen; i++) {
rule += SMK_DIGITLEN;
+ if (rule > data + count) {
+ rc = -EOVERFLOW;
+ goto out;
+ }
ret = sscanf(rule, "%u", &cat);
if (ret != 1 || cat > SMACK_CIPSO_MAXCATNUM)
goto out;
@@ -1177,7 +1191,7 @@ static ssize_t smk_write_net4addr(struct file *file, const char __user *buf,
return -EPERM;
if (*ppos != 0)
return -EINVAL;
- if (count < SMK_NETLBLADDRMIN)
+ if (count < SMK_NETLBLADDRMIN || count > PAGE_SIZE - 1)
return -EINVAL;
data = memdup_user_nul(buf, count);
@@ -1437,7 +1451,7 @@ static ssize_t smk_write_net6addr(struct file *file, const char __user *buf,
return -EPERM;
if (*ppos != 0)
return -EINVAL;
- if (count < SMK_NETLBLADDRMIN)
+ if (count < SMK_NETLBLADDRMIN || count > PAGE_SIZE - 1)
return -EINVAL;
data = memdup_user_nul(buf, count);
@@ -1844,6 +1858,10 @@ static ssize_t smk_write_ambient(struct file *file, const char __user *buf,
if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
+ /* Enough data must be present */
+ if (count == 0 || count > PAGE_SIZE)
+ return -EINVAL;
+
data = memdup_user_nul(buf, count);
if (IS_ERR(data))
return PTR_ERR(data);
@@ -2015,6 +2033,9 @@ static ssize_t smk_write_onlycap(struct file *file, const char __user *buf,
if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
+ if (count > PAGE_SIZE)
+ return -EINVAL;
+
data = memdup_user_nul(buf, count);
if (IS_ERR(data))
return PTR_ERR(data);
@@ -2102,6 +2123,9 @@ static ssize_t smk_write_unconfined(struct file *file, const char __user *buf,
if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
+ if (count > PAGE_SIZE)
+ return -EINVAL;
+
data = memdup_user_nul(buf, count);
if (IS_ERR(data))
return PTR_ERR(data);
@@ -2655,6 +2679,10 @@ static ssize_t smk_write_syslog(struct file *file, const char __user *buf,
if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
+ /* Enough data must be present */
+ if (count == 0 || count > PAGE_SIZE)
+ return -EINVAL;
+
data = memdup_user_nul(buf, count);
if (IS_ERR(data))
return PTR_ERR(data);
@@ -2736,7 +2764,6 @@ static int smk_open_relabel_self(struct inode *inode, struct file *file)
static ssize_t smk_write_relabel_self(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- struct task_smack *tsp = current_security();
char *data;
int rc;
LIST_HEAD(list_tmp);
@@ -2748,10 +2775,13 @@ static ssize_t smk_write_relabel_self(struct file *file, const char __user *buf,
return -EPERM;
/*
+ * No partial write.
* Enough data must be present.
*/
if (*ppos != 0)
return -EINVAL;
+ if (count == 0 || count > PAGE_SIZE)
+ return -EINVAL;
data = memdup_user_nul(buf, count);
if (IS_ERR(data))
@@ -2761,11 +2791,21 @@ static ssize_t smk_write_relabel_self(struct file *file, const char __user *buf,
kfree(data);
if (!rc || (rc == -EINVAL && list_empty(&list_tmp))) {
+ struct cred *new;
+ struct task_smack *tsp;
+
+ new = prepare_creds();
+ if (!new) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ tsp = new->security;
smk_destroy_label_list(&tsp->smk_relabel);
list_splice(&list_tmp, &tsp->smk_relabel);
+ commit_creds(new);
return count;
}
-
+out:
smk_destroy_label_list(&list_tmp);
return rc;
}
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 509038d6bccd..68f016e4929a 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -723,6 +723,9 @@ static int snd_compr_stop(struct snd_compr_stream *stream)
retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
if (!retval) {
+ /* clear flags and stop any drain wait */
+ stream->partial_drain = false;
+ stream->metadata_set = false;
snd_compr_drain_notify(stream);
stream->runtime->total_bytes_available = 0;
stream->runtime->total_bytes_transferred = 0;
@@ -880,6 +883,7 @@ static int snd_compr_partial_drain(struct snd_compr_stream *stream)
if (stream->next_track == false)
return -EPERM;
+ stream->partial_drain = true;
retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
if (retval) {
pr_debug("Partial drain returned failure\n");
diff --git a/sound/core/control.c b/sound/core/control.c
index d1312f14d78f..9cec0e8c9d53 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -1388,7 +1388,7 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
unlock:
up_write(&card->controls_rwsem);
- return 0;
+ return err;
}
static int snd_ctl_elem_add_user(struct snd_ctl_file *file,
diff --git a/sound/core/hwdep.c b/sound/core/hwdep.c
index 26e71cf05f1e..600ab2eb1b50 100644
--- a/sound/core/hwdep.c
+++ b/sound/core/hwdep.c
@@ -231,12 +231,12 @@ static int snd_hwdep_dsp_load(struct snd_hwdep *hw,
if (info.index >= 32)
return -EINVAL;
/* check whether the dsp was already loaded */
- if (hw->dsp_loaded & (1 << info.index))
+ if (hw->dsp_loaded & (1u << info.index))
return -EBUSY;
err = hw->ops.dsp_load(hw, &info);
if (err < 0)
return err;
- hw->dsp_loaded |= (1 << info.index);
+ hw->dsp_loaded |= (1u << info.index);
return 0;
}
diff --git a/sound/core/info.c b/sound/core/info.c
index 679136fba730..3fa8336794f8 100644
--- a/sound/core/info.c
+++ b/sound/core/info.c
@@ -634,7 +634,9 @@ int snd_info_get_line(struct snd_info_buffer *buffer, char *line, int len)
{
int c = -1;
- if (snd_BUG_ON(!buffer || !buffer->buffer))
+ if (snd_BUG_ON(!buffer))
+ return 1;
+ if (!buffer->buffer)
return 1;
if (len <= 0 || buffer->stop || buffer->error)
return 1;
diff --git a/sound/core/init.c b/sound/core/init.c
index 16b7cc7aa66b..3eafa15006f8 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -405,10 +405,8 @@ int snd_card_disconnect(struct snd_card *card)
return 0;
}
card->shutdown = 1;
- spin_unlock(&card->files_lock);
/* replace file->f_op with special dummy operations */
- spin_lock(&card->files_lock);
list_for_each_entry(mfile, &card->files_list, list) {
/* it's critical part, use endless loop */
/* we have no room to fail */
diff --git a/sound/core/oss/mulaw.c b/sound/core/oss/mulaw.c
index 3788906421a7..fe27034f2846 100644
--- a/sound/core/oss/mulaw.c
+++ b/sound/core/oss/mulaw.c
@@ -329,8 +329,8 @@ int snd_pcm_plugin_build_mulaw(struct snd_pcm_substream *plug,
snd_BUG();
return -EINVAL;
}
- if (snd_BUG_ON(!snd_pcm_format_linear(format->format)))
- return -ENXIO;
+ if (!snd_pcm_format_linear(format->format))
+ return -EINVAL;
err = snd_pcm_plugin_build(plug, "Mu-Law<->linear conversion",
src_format, dst_format,
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index 41abb8bd466a..2a286167460f 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -708,6 +708,8 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
oss_buffer_size = snd_pcm_plug_client_size(substream,
snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, NULL)) * oss_frame_size;
+ if (!oss_buffer_size)
+ return -EINVAL;
oss_buffer_size = rounddown_pow_of_two(oss_buffer_size);
if (atomic_read(&substream->mmap_count)) {
if (oss_buffer_size > runtime->oss.mmap_bytes)
@@ -743,17 +745,21 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
min_period_size = snd_pcm_plug_client_size(substream,
snd_pcm_hw_param_value_min(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL));
- min_period_size *= oss_frame_size;
- min_period_size = roundup_pow_of_two(min_period_size);
- if (oss_period_size < min_period_size)
- oss_period_size = min_period_size;
+ if (min_period_size) {
+ min_period_size *= oss_frame_size;
+ min_period_size = roundup_pow_of_two(min_period_size);
+ if (oss_period_size < min_period_size)
+ oss_period_size = min_period_size;
+ }
max_period_size = snd_pcm_plug_client_size(substream,
snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL));
- max_period_size *= oss_frame_size;
- max_period_size = rounddown_pow_of_two(max_period_size);
- if (oss_period_size > max_period_size)
- oss_period_size = max_period_size;
+ if (max_period_size) {
+ max_period_size *= oss_frame_size;
+ max_period_size = rounddown_pow_of_two(max_period_size);
+ if (oss_period_size > max_period_size)
+ oss_period_size = max_period_size;
+ }
oss_periods = oss_buffer_size / oss_period_size;
@@ -1949,11 +1955,15 @@ static int snd_pcm_oss_set_subdivide(struct snd_pcm_oss_file *pcm_oss_file, int
static int snd_pcm_oss_set_fragment1(struct snd_pcm_substream *substream, unsigned int val)
{
struct snd_pcm_runtime *runtime;
+ int fragshift;
runtime = substream->runtime;
if (runtime->oss.subdivision || runtime->oss.fragshift)
return -EINVAL;
- runtime->oss.fragshift = val & 0xffff;
+ fragshift = val & 0xffff;
+ if (fragshift >= 31)
+ return -EINVAL;
+ runtime->oss.fragshift = fragshift;
runtime->oss.maxfrags = (val >> 16) & 0xffff;
if (runtime->oss.fragshift < 4) /* < 16 */
runtime->oss.fragshift = 4;
diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c
index 732bbede7ebf..da400da1fafe 100644
--- a/sound/core/oss/pcm_plugin.c
+++ b/sound/core/oss/pcm_plugin.c
@@ -196,7 +196,9 @@ int snd_pcm_plugin_free(struct snd_pcm_plugin *plugin)
return 0;
}
-snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_pcm_uframes_t drv_frames)
+static snd_pcm_sframes_t plug_client_size(struct snd_pcm_substream *plug,
+ snd_pcm_uframes_t drv_frames,
+ bool check_size)
{
struct snd_pcm_plugin *plugin, *plugin_prev, *plugin_next;
int stream;
@@ -209,21 +211,23 @@ snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_p
if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
plugin = snd_pcm_plug_last(plug);
while (plugin && drv_frames > 0) {
- if (drv_frames > plugin->buf_frames)
- drv_frames = plugin->buf_frames;
plugin_prev = plugin->prev;
if (plugin->src_frames)
drv_frames = plugin->src_frames(plugin, drv_frames);
+ if (check_size && plugin->buf_frames &&
+ drv_frames > plugin->buf_frames)
+ drv_frames = plugin->buf_frames;
plugin = plugin_prev;
}
} else if (stream == SNDRV_PCM_STREAM_CAPTURE) {
plugin = snd_pcm_plug_first(plug);
while (plugin && drv_frames > 0) {
plugin_next = plugin->next;
+ if (check_size && plugin->buf_frames &&
+ drv_frames > plugin->buf_frames)
+ drv_frames = plugin->buf_frames;
if (plugin->dst_frames)
drv_frames = plugin->dst_frames(plugin, drv_frames);
- if (drv_frames > plugin->buf_frames)
- drv_frames = plugin->buf_frames;
plugin = plugin_next;
}
} else
@@ -231,7 +235,9 @@ snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_p
return drv_frames;
}
-snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pcm_uframes_t clt_frames)
+static snd_pcm_sframes_t plug_slave_size(struct snd_pcm_substream *plug,
+ snd_pcm_uframes_t clt_frames,
+ bool check_size)
{
struct snd_pcm_plugin *plugin, *plugin_prev, *plugin_next;
snd_pcm_sframes_t frames;
@@ -247,26 +253,28 @@ snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pc
plugin = snd_pcm_plug_first(plug);
while (plugin && frames > 0) {
plugin_next = plugin->next;
+ if (check_size && plugin->buf_frames &&
+ frames > plugin->buf_frames)
+ frames = plugin->buf_frames;
if (plugin->dst_frames) {
frames = plugin->dst_frames(plugin, frames);
if (frames < 0)
return frames;
}
- if (frames > plugin->buf_frames)
- frames = plugin->buf_frames;
plugin = plugin_next;
}
} else if (stream == SNDRV_PCM_STREAM_CAPTURE) {
plugin = snd_pcm_plug_last(plug);
while (plugin) {
- if (frames > plugin->buf_frames)
- frames = plugin->buf_frames;
plugin_prev = plugin->prev;
if (plugin->src_frames) {
frames = plugin->src_frames(plugin, frames);
if (frames < 0)
return frames;
}
+ if (check_size && plugin->buf_frames &&
+ frames > plugin->buf_frames)
+ frames = plugin->buf_frames;
plugin = plugin_prev;
}
} else
@@ -274,6 +282,18 @@ snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pc
return frames;
}
+snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug,
+ snd_pcm_uframes_t drv_frames)
+{
+ return plug_client_size(plug, drv_frames, false);
+}
+
+snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug,
+ snd_pcm_uframes_t clt_frames)
+{
+ return plug_slave_size(plug, clt_frames, false);
+}
+
static int snd_pcm_plug_formats(const struct snd_mask *mask,
snd_pcm_format_t format)
{
@@ -630,7 +650,7 @@ snd_pcm_sframes_t snd_pcm_plug_write_transfer(struct snd_pcm_substream *plug, st
src_channels = dst_channels;
plugin = next;
}
- return snd_pcm_plug_client_size(plug, frames);
+ return plug_client_size(plug, frames, true);
}
snd_pcm_sframes_t snd_pcm_plug_read_transfer(struct snd_pcm_substream *plug, struct snd_pcm_plugin_channel *dst_channels_final, snd_pcm_uframes_t size)
@@ -640,7 +660,7 @@ snd_pcm_sframes_t snd_pcm_plug_read_transfer(struct snd_pcm_substream *plug, str
snd_pcm_sframes_t frames = size;
int err;
- frames = snd_pcm_plug_slave_size(plug, frames);
+ frames = plug_slave_size(plug, frames, true);
if (frames < 0)
return frames;
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index ad52126d3d22..56295936387c 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -438,6 +438,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
no_delta_check:
if (runtime->status->hw_ptr == new_hw_ptr) {
+ runtime->hw_ptr_jiffies = curr_jiffies;
update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
return 0;
}
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index f03ceaff75b5..db62dbe7eaa8 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -753,8 +753,13 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
runtime->boundary *= 2;
/* clear the buffer for avoiding possible kernel info leaks */
- if (runtime->dma_area && !substream->ops->copy_user)
- memset(runtime->dma_area, 0, runtime->dma_bytes);
+ if (runtime->dma_area && !substream->ops->copy_user) {
+ size_t size = runtime->dma_bytes;
+
+ if (runtime->info & SNDRV_PCM_INFO_MMAP)
+ size = PAGE_ALIGN(size);
+ memset(runtime->dma_area, 0, size);
+ }
snd_pcm_timer_resolution_change(substream);
snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);
@@ -1982,6 +1987,11 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
}
pcm_file = f.file->private_data;
substream1 = pcm_file->substream;
+ if (substream == substream1) {
+ res = -EINVAL;
+ goto _badf;
+ }
+
group = kmalloc(sizeof(*group), GFP_KERNEL);
if (!group) {
res = -ENOMEM;
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index a52d6d16efc4..f4f855d7a791 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -87,11 +87,21 @@ static inline unsigned short snd_rawmidi_file_flags(struct file *file)
}
}
-static inline int snd_rawmidi_ready(struct snd_rawmidi_substream *substream)
+static inline bool __snd_rawmidi_ready(struct snd_rawmidi_runtime *runtime)
+{
+ return runtime->avail >= runtime->avail_min;
+}
+
+static bool snd_rawmidi_ready(struct snd_rawmidi_substream *substream)
{
struct snd_rawmidi_runtime *runtime = substream->runtime;
+ unsigned long flags;
+ bool ready;
- return runtime->avail >= runtime->avail_min;
+ spin_lock_irqsave(&runtime->lock, flags);
+ ready = __snd_rawmidi_ready(runtime);
+ spin_unlock_irqrestore(&runtime->lock, flags);
+ return ready;
}
static inline int snd_rawmidi_ready_append(struct snd_rawmidi_substream *substream,
@@ -112,6 +122,17 @@ static void snd_rawmidi_input_event_work(struct work_struct *work)
runtime->event(runtime->substream);
}
+/* buffer refcount management: call with runtime->lock held */
+static inline void snd_rawmidi_buffer_ref(struct snd_rawmidi_runtime *runtime)
+{
+ runtime->buffer_ref++;
+}
+
+static inline void snd_rawmidi_buffer_unref(struct snd_rawmidi_runtime *runtime)
+{
+ runtime->buffer_ref--;
+}
+
static int snd_rawmidi_runtime_create(struct snd_rawmidi_substream *substream)
{
struct snd_rawmidi_runtime *runtime;
@@ -661,6 +682,11 @@ static int resize_runtime_buffer(struct snd_rawmidi_runtime *runtime,
if (!newbuf)
return -ENOMEM;
spin_lock_irq(&runtime->lock);
+ if (runtime->buffer_ref) {
+ spin_unlock_irq(&runtime->lock);
+ kvfree(newbuf);
+ return -EBUSY;
+ }
oldbuf = runtime->buffer;
runtime->buffer = newbuf;
runtime->buffer_size = params->buffer_size;
@@ -944,7 +970,7 @@ int snd_rawmidi_receive(struct snd_rawmidi_substream *substream,
if (result > 0) {
if (runtime->event)
schedule_work(&runtime->event_work);
- else if (snd_rawmidi_ready(substream))
+ else if (__snd_rawmidi_ready(runtime))
wake_up(&runtime->sleep);
}
spin_unlock_irqrestore(&runtime->lock, flags);
@@ -960,8 +986,10 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream,
long result = 0, count1;
struct snd_rawmidi_runtime *runtime = substream->runtime;
unsigned long appl_ptr;
+ int err = 0;
spin_lock_irqsave(&runtime->lock, flags);
+ snd_rawmidi_buffer_ref(runtime);
while (count > 0 && runtime->avail) {
count1 = runtime->buffer_size - runtime->appl_ptr;
if (count1 > count)
@@ -980,16 +1008,19 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream,
if (userbuf) {
spin_unlock_irqrestore(&runtime->lock, flags);
if (copy_to_user(userbuf + result,
- runtime->buffer + appl_ptr, count1)) {
- return result > 0 ? result : -EFAULT;
- }
+ runtime->buffer + appl_ptr, count1))
+ err = -EFAULT;
spin_lock_irqsave(&runtime->lock, flags);
+ if (err)
+ goto out;
}
result += count1;
count -= count1;
}
+ out:
+ snd_rawmidi_buffer_unref(runtime);
spin_unlock_irqrestore(&runtime->lock, flags);
- return result;
+ return result > 0 ? result : err;
}
long snd_rawmidi_kernel_read(struct snd_rawmidi_substream *substream,
@@ -1018,7 +1049,7 @@ static ssize_t snd_rawmidi_read(struct file *file, char __user *buf, size_t coun
result = 0;
while (count > 0) {
spin_lock_irq(&runtime->lock);
- while (!snd_rawmidi_ready(substream)) {
+ while (!__snd_rawmidi_ready(runtime)) {
wait_queue_entry_t wait;
if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
@@ -1035,9 +1066,11 @@ static ssize_t snd_rawmidi_read(struct file *file, char __user *buf, size_t coun
return -ENODEV;
if (signal_pending(current))
return result > 0 ? result : -ERESTARTSYS;
- if (!runtime->avail)
- return result > 0 ? result : -EIO;
spin_lock_irq(&runtime->lock);
+ if (!runtime->avail) {
+ spin_unlock_irq(&runtime->lock);
+ return result > 0 ? result : -EIO;
+ }
}
spin_unlock_irq(&runtime->lock);
count1 = snd_rawmidi_kernel_read1(substream,
@@ -1175,7 +1208,7 @@ int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int coun
runtime->avail += count;
substream->bytes += count;
if (count > 0) {
- if (runtime->drain || snd_rawmidi_ready(substream))
+ if (runtime->drain || __snd_rawmidi_ready(runtime))
wake_up(&runtime->sleep);
}
return count;
@@ -1261,6 +1294,7 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
return -EAGAIN;
}
}
+ snd_rawmidi_buffer_ref(runtime);
while (count > 0 && runtime->avail > 0) {
count1 = runtime->buffer_size - runtime->appl_ptr;
if (count1 > count)
@@ -1292,6 +1326,7 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
}
__end:
count1 = runtime->avail < runtime->buffer_size;
+ snd_rawmidi_buffer_unref(runtime);
spin_unlock_irqrestore(&runtime->lock, flags);
if (count1)
snd_rawmidi_output_trigger(substream, 1);
@@ -1340,9 +1375,11 @@ static ssize_t snd_rawmidi_write(struct file *file, const char __user *buf,
return -ENODEV;
if (signal_pending(current))
return result > 0 ? result : -ERESTARTSYS;
- if (!runtime->avail && !timeout)
- return result > 0 ? result : -EIO;
spin_lock_irq(&runtime->lock);
+ if (!runtime->avail && !timeout) {
+ spin_unlock_irq(&runtime->lock);
+ return result > 0 ? result : -EIO;
+ }
}
spin_unlock_irq(&runtime->lock);
count1 = snd_rawmidi_kernel_write1(substream, buf, NULL, count);
@@ -1422,6 +1459,7 @@ static void snd_rawmidi_proc_info_read(struct snd_info_entry *entry,
struct snd_rawmidi *rmidi;
struct snd_rawmidi_substream *substream;
struct snd_rawmidi_runtime *runtime;
+ unsigned long buffer_size, avail, xruns;
rmidi = entry->private_data;
snd_iprintf(buffer, "%s\n\n", rmidi->name);
@@ -1440,13 +1478,16 @@ static void snd_rawmidi_proc_info_read(struct snd_info_entry *entry,
" Owner PID : %d\n",
pid_vnr(substream->pid));
runtime = substream->runtime;
+ spin_lock_irq(&runtime->lock);
+ buffer_size = runtime->buffer_size;
+ avail = runtime->avail;
+ spin_unlock_irq(&runtime->lock);
snd_iprintf(buffer,
" Mode : %s\n"
" Buffer size : %lu\n"
" Avail : %lu\n",
runtime->oss ? "OSS compatible" : "native",
- (unsigned long) runtime->buffer_size,
- (unsigned long) runtime->avail);
+ buffer_size, avail);
}
}
}
@@ -1464,13 +1505,16 @@ static void snd_rawmidi_proc_info_read(struct snd_info_entry *entry,
" Owner PID : %d\n",
pid_vnr(substream->pid));
runtime = substream->runtime;
+ spin_lock_irq(&runtime->lock);
+ buffer_size = runtime->buffer_size;
+ avail = runtime->avail;
+ xruns = runtime->xruns;
+ spin_unlock_irq(&runtime->lock);
snd_iprintf(buffer,
" Buffer size : %lu\n"
" Avail : %lu\n"
" Overruns : %lu\n",
- (unsigned long) runtime->buffer_size,
- (unsigned long) runtime->avail,
- (unsigned long) runtime->xruns);
+ buffer_size, avail, xruns);
}
}
}
diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c
index e1f44fc86885..f4a9d9972330 100644
--- a/sound/core/seq/oss/seq_oss.c
+++ b/sound/core/seq/oss/seq_oss.c
@@ -181,10 +181,19 @@ static long
odev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct seq_oss_devinfo *dp;
+ long rc;
+
dp = file->private_data;
if (snd_BUG_ON(!dp))
return -ENXIO;
- return snd_seq_oss_ioctl(dp, cmd, arg);
+
+ if (cmd != SNDCTL_SEQ_SYNC &&
+ mutex_lock_interruptible(&register_mutex))
+ return -ERESTARTSYS;
+ rc = snd_seq_oss_ioctl(dp, cmd, arg);
+ if (cmd != SNDCTL_SEQ_SYNC)
+ mutex_unlock(&register_mutex);
+ return rc;
}
#ifdef CONFIG_COMPAT
diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
index c93945917235..247b68790a52 100644
--- a/sound/core/seq/oss/seq_oss_synth.c
+++ b/sound/core/seq/oss/seq_oss_synth.c
@@ -624,7 +624,8 @@ snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_in
if (info->is_midi) {
struct midi_info minf;
- snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf);
+ if (snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf))
+ return -ENXIO;
inf->synth_type = SYNTH_TYPE_MIDI;
inf->synth_subtype = 0;
inf->nr_voices = 16;
diff --git a/sound/core/seq/seq_queue.h b/sound/core/seq/seq_queue.h
index e006fc8e3a36..6b634cfb42ed 100644
--- a/sound/core/seq/seq_queue.h
+++ b/sound/core/seq/seq_queue.h
@@ -40,10 +40,10 @@ struct snd_seq_queue {
struct snd_seq_timer *timer; /* time keeper for this queue */
int owner; /* client that 'owns' the timer */
- unsigned int locked:1, /* timer is only accesibble by owner if set */
- klocked:1, /* kernel lock (after START) */
- check_again:1,
- check_blocked:1;
+ bool locked; /* timer is only accesibble by owner if set */
+ bool klocked; /* kernel lock (after START) */
+ bool check_again; /* concurrent access happened during check */
+ bool check_blocked; /* queue being checked */
unsigned int flags; /* status flags */
unsigned int info_flags; /* info for sync */
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index 1e34e6381baa..3c65e52b014c 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -1047,6 +1047,14 @@ static int loopback_mixer_new(struct loopback *loopback, int notify)
return -ENOMEM;
kctl->id.device = dev;
kctl->id.subdevice = substr;
+
+ /* Add the control before copying the id so that
+ * the numid field of the id is set in the copy.
+ */
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
+ return err;
+
switch (idx) {
case ACTIVE_IDX:
setup->active_id = kctl->id;
@@ -1063,9 +1071,6 @@ static int loopback_mixer_new(struct loopback *loopback, int notify)
default:
break;
}
- err = snd_ctl_add(card, kctl);
- if (err < 0)
- return err;
}
}
}
diff --git a/sound/drivers/opl3/opl3_synth.c b/sound/drivers/opl3/opl3_synth.c
index d522925fc5c0..1a87f8ea074b 100644
--- a/sound/drivers/opl3/opl3_synth.c
+++ b/sound/drivers/opl3/opl3_synth.c
@@ -105,6 +105,8 @@ int snd_opl3_ioctl(struct snd_hwdep * hw, struct file *file,
{
struct snd_dm_fm_info info;
+ memset(&info, 0, sizeof(info));
+
info.fm_mode = opl3->fm_mode;
info.rhythm = opl3->rhythm;
if (copy_to_user(argp, &info, sizeof(struct snd_dm_fm_info)))
diff --git a/sound/firewire/Kconfig b/sound/firewire/Kconfig
index 0cb65d0864cc..a2ed164d80b4 100644
--- a/sound/firewire/Kconfig
+++ b/sound/firewire/Kconfig
@@ -37,7 +37,7 @@ config SND_OXFW
* Mackie(Loud) Onyx 1640i (former model)
* Mackie(Loud) Onyx Satellite
* Mackie(Loud) Tapco Link.Firewire
- * Mackie(Loud) d.2 pro/d.4 pro
+ * Mackie(Loud) d.4 pro
* Mackie(Loud) U.420/U.420d
* TASCAM FireOne
* Stanton Controllers & Systems 1 Deck/Mixer
@@ -83,7 +83,7 @@ config SND_BEBOB
* PreSonus FIREBOX/FIREPOD/FP10/Inspire1394
* BridgeCo RDAudio1/Audio5
* Mackie Onyx 1220/1620/1640 (FireWire I/O Card)
- * Mackie d.2 (FireWire Option)
+ * Mackie d.2 (FireWire Option) and d.2 Pro
* Stanton FinalScratch 2 (ScratchAmp)
* Tascam IF-FW/DM
* Behringer XENIX UFX 1204/1604
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
index 5636e89ce5c7..2bcfeee75853 100644
--- a/sound/firewire/bebob/bebob.c
+++ b/sound/firewire/bebob/bebob.c
@@ -414,7 +414,7 @@ static const struct ieee1394_device_id bebob_id_table[] = {
SND_BEBOB_DEV_ENTRY(VEN_BRIDGECO, 0x00010049, &spec_normal),
/* Mackie, Onyx 1220/1620/1640 (Firewire I/O Card) */
SND_BEBOB_DEV_ENTRY(VEN_MACKIE2, 0x00010065, &spec_normal),
- /* Mackie, d.2 (Firewire Option) */
+ // Mackie, d.2 (Firewire option card) and d.2 Pro (the card is built-in).
SND_BEBOB_DEV_ENTRY(VEN_MACKIE1, 0x00010067, &spec_normal),
/* Stanton, ScratchAmp */
SND_BEBOB_DEV_ENTRY(VEN_STANTON, 0x00000001, &spec_normal),
diff --git a/sound/firewire/bebob/bebob_hwdep.c b/sound/firewire/bebob/bebob_hwdep.c
index 04c321e08c62..a04b5880926c 100644
--- a/sound/firewire/bebob/bebob_hwdep.c
+++ b/sound/firewire/bebob/bebob_hwdep.c
@@ -37,12 +37,11 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
}
memset(&event, 0, sizeof(event));
+ count = min_t(long, count, sizeof(event.lock_status));
if (bebob->dev_lock_changed) {
event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
event.lock_status.status = (bebob->dev_lock_count > 0);
bebob->dev_lock_changed = false;
-
- count = min_t(long, count, sizeof(event.lock_status));
}
spin_unlock_irq(&bebob->lock);
diff --git a/sound/firewire/dice/dice-alesis.c b/sound/firewire/dice/dice-alesis.c
index f5b325263b67..39a4ef8c8543 100644
--- a/sound/firewire/dice/dice-alesis.c
+++ b/sound/firewire/dice/dice-alesis.c
@@ -16,7 +16,7 @@ alesis_io14_tx_pcm_chs[MAX_STREAMS][SND_DICE_RATE_MODE_COUNT] = {
static const unsigned int
alesis_io26_tx_pcm_chs[MAX_STREAMS][SND_DICE_RATE_MODE_COUNT] = {
{10, 10, 4}, /* Tx0 = Analog + S/PDIF. */
- {16, 8, 0}, /* Tx1 = ADAT1 + ADAT2. */
+ {16, 4, 0}, /* Tx1 = ADAT1 + ADAT2 (available at low rate). */
};
int snd_dice_detect_alesis_formats(struct snd_dice *dice)
diff --git a/sound/firewire/dice/dice-tcelectronic.c b/sound/firewire/dice/dice-tcelectronic.c
index a8875d24ba2a..43a3bcb15b3d 100644
--- a/sound/firewire/dice/dice-tcelectronic.c
+++ b/sound/firewire/dice/dice-tcelectronic.c
@@ -38,8 +38,8 @@ static const struct dice_tc_spec konnekt_24d = {
};
static const struct dice_tc_spec konnekt_live = {
- .tx_pcm_chs = {{16, 16, 16}, {0, 0, 0} },
- .rx_pcm_chs = {{16, 16, 16}, {0, 0, 0} },
+ .tx_pcm_chs = {{16, 16, 6}, {0, 0, 0} },
+ .rx_pcm_chs = {{16, 16, 6}, {0, 0, 0} },
.has_midi = true,
};
diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c
index ef689997d6a5..bf53e342788e 100644
--- a/sound/firewire/digi00x/digi00x.c
+++ b/sound/firewire/digi00x/digi00x.c
@@ -15,6 +15,7 @@ MODULE_LICENSE("GPL v2");
#define VENDOR_DIGIDESIGN 0x00a07e
#define MODEL_CONSOLE 0x000001
#define MODEL_RACK 0x000002
+#define SPEC_VERSION 0x000001
static int name_card(struct snd_dg00x *dg00x)
{
@@ -185,14 +186,18 @@ static const struct ieee1394_device_id snd_dg00x_id_table[] = {
/* Both of 002/003 use the same ID. */
{
.match_flags = IEEE1394_MATCH_VENDOR_ID |
+ IEEE1394_MATCH_VERSION |
IEEE1394_MATCH_MODEL_ID,
.vendor_id = VENDOR_DIGIDESIGN,
+ .version = SPEC_VERSION,
.model_id = MODEL_CONSOLE,
},
{
.match_flags = IEEE1394_MATCH_VENDOR_ID |
+ IEEE1394_MATCH_VERSION |
IEEE1394_MATCH_MODEL_ID,
.vendor_id = VENDOR_DIGIDESIGN,
+ .version = SPEC_VERSION,
.model_id = MODEL_RACK,
},
{}
diff --git a/sound/firewire/fireface/ff-transaction.c b/sound/firewire/fireface/ff-transaction.c
index 332b29f8ed75..47280ae50682 100644
--- a/sound/firewire/fireface/ff-transaction.c
+++ b/sound/firewire/fireface/ff-transaction.c
@@ -99,7 +99,7 @@ static void transmit_midi_msg(struct snd_ff *ff, unsigned int port)
/* Set interval to next transaction. */
ff->next_ktime[port] = ktime_add_ns(ktime_get(),
- len * 8 * NSEC_PER_SEC / 31250);
+ len * 8 * (NSEC_PER_SEC / 31250));
ff->rx_bytes[port] = len;
/*
diff --git a/sound/firewire/fireworks/fireworks_transaction.c b/sound/firewire/fireworks/fireworks_transaction.c
index 36a08ba51ec7..58674a711132 100644
--- a/sound/firewire/fireworks/fireworks_transaction.c
+++ b/sound/firewire/fireworks/fireworks_transaction.c
@@ -124,7 +124,7 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
t = (struct snd_efw_transaction *)data;
length = min_t(size_t, be32_to_cpu(t->length) * sizeof(u32), length);
- spin_lock_irq(&efw->lock);
+ spin_lock(&efw->lock);
if (efw->push_ptr < efw->pull_ptr)
capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr);
@@ -191,7 +191,7 @@ handle_resp_for_user(struct fw_card *card, int generation, int source,
copy_resp_to_buf(efw, data, length, rcode);
end:
- spin_unlock_irq(&instances_lock);
+ spin_unlock(&instances_lock);
}
static void
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
index 4ecaf69569dc..3c9aa797747b 100644
--- a/sound/firewire/oxfw/oxfw.c
+++ b/sound/firewire/oxfw/oxfw.c
@@ -400,7 +400,6 @@ static const struct ieee1394_device_id oxfw_id_table[] = {
* Onyx-i series (former models): 0x081216
* Mackie Onyx Satellite: 0x00200f
* Tapco LINK.firewire 4x6: 0x000460
- * d.2 pro: Unknown
* d.4 pro: Unknown
* U.420: Unknown
* U.420d: Unknown
diff --git a/sound/firewire/tascam/tascam-transaction.c b/sound/firewire/tascam/tascam-transaction.c
index 2ad692dd4b13..92f7e74d5847 100644
--- a/sound/firewire/tascam/tascam-transaction.c
+++ b/sound/firewire/tascam/tascam-transaction.c
@@ -210,7 +210,7 @@ static void midi_port_work(struct work_struct *work)
/* Set interval to next transaction. */
port->next_ktime = ktime_add_ns(ktime_get(),
- port->consume_bytes * 8 * NSEC_PER_SEC / 31250);
+ port->consume_bytes * 8 * (NSEC_PER_SEC / 31250));
/* Start this transaction. */
port->idling = false;
diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c
index d3fdc463a884..1e61cdce2895 100644
--- a/sound/firewire/tascam/tascam.c
+++ b/sound/firewire/tascam/tascam.c
@@ -225,11 +225,39 @@ static void snd_tscm_remove(struct fw_unit *unit)
}
static const struct ieee1394_device_id snd_tscm_id_table[] = {
+ // Tascam, FW-1884.
{
.match_flags = IEEE1394_MATCH_VENDOR_ID |
- IEEE1394_MATCH_SPECIFIER_ID,
+ IEEE1394_MATCH_SPECIFIER_ID |
+ IEEE1394_MATCH_VERSION,
.vendor_id = 0x00022e,
.specifier_id = 0x00022e,
+ .version = 0x800000,
+ },
+ // Tascam, FE-8 (.version = 0x800001)
+ // This kernel module doesn't support FE-8 because the most of features
+ // can be implemented in userspace without any specific support of this
+ // module.
+ //
+ // .version = 0x800002 is unknown.
+ //
+ // Tascam, FW-1082.
+ {
+ .match_flags = IEEE1394_MATCH_VENDOR_ID |
+ IEEE1394_MATCH_SPECIFIER_ID |
+ IEEE1394_MATCH_VERSION,
+ .vendor_id = 0x00022e,
+ .specifier_id = 0x00022e,
+ .version = 0x800003,
+ },
+ // Tascam, FW-1804.
+ {
+ .match_flags = IEEE1394_MATCH_VENDOR_ID |
+ IEEE1394_MATCH_SPECIFIER_ID |
+ IEEE1394_MATCH_VERSION,
+ .vendor_id = 0x00022e,
+ .specifier_id = 0x00022e,
+ .version = 0x800004,
},
/* FE-08 requires reverse-engineering because it just has faders. */
{}
diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c
index 84b44cdae28a..b96abebcfd1a 100644
--- a/sound/hda/ext/hdac_ext_controller.c
+++ b/sound/hda/ext/hdac_ext_controller.c
@@ -156,6 +156,8 @@ struct hdac_ext_link *snd_hdac_ext_bus_get_link(struct hdac_bus *bus,
return NULL;
if (bus->idx != bus_idx)
return NULL;
+ if (addr < 0 || addr > 31)
+ return NULL;
list_for_each_entry(hlink, &bus->hlink_list, list) {
for (i = 0; i < HDA_MAX_CODECS; i++) {
diff --git a/sound/hda/hdac_bus.c b/sound/hda/hdac_bus.c
index 714a51721a31..ab9236e4c157 100644
--- a/sound/hda/hdac_bus.c
+++ b/sound/hda/hdac_bus.c
@@ -155,6 +155,7 @@ static void process_unsol_events(struct work_struct *work)
struct hdac_driver *drv;
unsigned int rp, caddr, res;
+ spin_lock_irq(&bus->reg_lock);
while (bus->unsol_rp != bus->unsol_wp) {
rp = (bus->unsol_rp + 1) % HDA_UNSOL_QUEUE_SIZE;
bus->unsol_rp = rp;
@@ -166,10 +167,13 @@ static void process_unsol_events(struct work_struct *work)
codec = bus->caddr_tbl[caddr & 0x0f];
if (!codec || !codec->dev.driver)
continue;
+ spin_unlock_irq(&bus->reg_lock);
drv = drv_to_hdac_driver(codec->dev.driver);
if (drv->unsol_event)
drv->unsol_event(codec, res);
+ spin_lock_irq(&bus->reg_lock);
}
+ spin_unlock_irq(&bus->reg_lock);
}
/**
diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
index dbf02a3a8d2f..58b53a4bc4d0 100644
--- a/sound/hda/hdac_device.c
+++ b/sound/hda/hdac_device.c
@@ -124,6 +124,8 @@ EXPORT_SYMBOL_GPL(snd_hdac_device_init);
void snd_hdac_device_exit(struct hdac_device *codec)
{
pm_runtime_put_noidle(&codec->dev);
+ /* keep balance of runtime PM child_count in parent device */
+ pm_runtime_set_suspended(&codec->dev);
snd_hdac_bus_remove_device(codec->bus, codec);
kfree(codec->vendor_name);
kfree(codec->chip_name);
diff --git a/sound/isa/es1688/es1688.c b/sound/isa/es1688/es1688.c
index 3dfe7e592c25..5a69314413b6 100644
--- a/sound/isa/es1688/es1688.c
+++ b/sound/isa/es1688/es1688.c
@@ -282,8 +282,10 @@ static int snd_es968_pnp_detect(struct pnp_card_link *pcard,
return error;
}
error = snd_es1688_probe(card, dev);
- if (error < 0)
+ if (error < 0) {
+ snd_card_free(card);
return error;
+ }
pnp_set_card_drvdata(pcard, card);
snd_es968_pnp_is_probed = 1;
return 0;
diff --git a/sound/isa/opti9xx/miro.c b/sound/isa/opti9xx/miro.c
index c6136c6b0214..81eb9c89428d 100644
--- a/sound/isa/opti9xx/miro.c
+++ b/sound/isa/opti9xx/miro.c
@@ -880,10 +880,13 @@ static void snd_miro_write(struct snd_miro *chip, unsigned char reg,
spin_unlock_irqrestore(&chip->lock, flags);
}
+static inline void snd_miro_write_mask(struct snd_miro *chip,
+ unsigned char reg, unsigned char value, unsigned char mask)
+{
+ unsigned char oldval = snd_miro_read(chip, reg);
-#define snd_miro_write_mask(chip, reg, value, mask) \
- snd_miro_write(chip, reg, \
- (snd_miro_read(chip, reg) & ~(mask)) | ((value) & (mask)))
+ snd_miro_write(chip, reg, (oldval & ~mask) | (value & mask));
+}
/*
* Proc Interface
diff --git a/sound/isa/opti9xx/opti92x-ad1848.c b/sound/isa/opti9xx/opti92x-ad1848.c
index ac0ab6eb40f0..0d0fa6f54bdd 100644
--- a/sound/isa/opti9xx/opti92x-ad1848.c
+++ b/sound/isa/opti9xx/opti92x-ad1848.c
@@ -329,10 +329,13 @@ static void snd_opti9xx_write(struct snd_opti9xx *chip, unsigned char reg,
}
-#define snd_opti9xx_write_mask(chip, reg, value, mask) \
- snd_opti9xx_write(chip, reg, \
- (snd_opti9xx_read(chip, reg) & ~(mask)) | ((value) & (mask)))
+static inline void snd_opti9xx_write_mask(struct snd_opti9xx *chip,
+ unsigned char reg, unsigned char value, unsigned char mask)
+{
+ unsigned char oldval = snd_opti9xx_read(chip, reg);
+ snd_opti9xx_write(chip, reg, (oldval & ~mask) | (value & mask));
+}
static int snd_opti9xx_configure(struct snd_opti9xx *chip,
long port,
diff --git a/sound/isa/sb/emu8000.c b/sound/isa/sb/emu8000.c
index d56973b770c7..24b91cb32839 100644
--- a/sound/isa/sb/emu8000.c
+++ b/sound/isa/sb/emu8000.c
@@ -1042,8 +1042,10 @@ snd_emu8000_create_mixer(struct snd_card *card, struct snd_emu8000 *emu)
memset(emu->controls, 0, sizeof(emu->controls));
for (i = 0; i < EMU8000_NUM_CONTROLS; i++) {
- if ((err = snd_ctl_add(card, emu->controls[i] = snd_ctl_new1(mixer_defs[i], emu))) < 0)
+ if ((err = snd_ctl_add(card, emu->controls[i] = snd_ctl_new1(mixer_defs[i], emu))) < 0) {
+ emu->controls[i] = NULL;
goto __error;
+ }
}
return 0;
diff --git a/sound/isa/sb/sb16_csp.c b/sound/isa/sb/sb16_csp.c
index bf3db0d2ea12..2e00b64ef13b 100644
--- a/sound/isa/sb/sb16_csp.c
+++ b/sound/isa/sb/sb16_csp.c
@@ -1059,10 +1059,14 @@ static int snd_sb_qsound_build(struct snd_sb_csp * p)
spin_lock_init(&p->q_lock);
- if ((err = snd_ctl_add(card, p->qsound_switch = snd_ctl_new1(&snd_sb_qsound_switch, p))) < 0)
+ if ((err = snd_ctl_add(card, p->qsound_switch = snd_ctl_new1(&snd_sb_qsound_switch, p))) < 0) {
+ p->qsound_switch = NULL;
goto __error;
- if ((err = snd_ctl_add(card, p->qsound_space = snd_ctl_new1(&snd_sb_qsound_space, p))) < 0)
+ }
+ if ((err = snd_ctl_add(card, p->qsound_space = snd_ctl_new1(&snd_sb_qsound_space, p))) < 0) {
+ p->qsound_space = NULL;
goto __error;
+ }
return 0;
diff --git a/sound/isa/sb/sb8.c b/sound/isa/sb/sb8.c
index 1eb8b61a185b..d77dcba276b5 100644
--- a/sound/isa/sb/sb8.c
+++ b/sound/isa/sb/sb8.c
@@ -111,10 +111,6 @@ static int snd_sb8_probe(struct device *pdev, unsigned int dev)
/* block the 0x388 port to avoid PnP conflicts */
acard->fm_res = request_region(0x388, 4, "SoundBlaster FM");
- if (!acard->fm_res) {
- err = -EBUSY;
- goto _err;
- }
if (port[dev] != SNDRV_AUTO_PORT) {
if ((err = snd_sbdsp_create(card, port[dev], irq[dev],
diff --git a/sound/isa/wavefront/wavefront_synth.c b/sound/isa/wavefront/wavefront_synth.c
index 0b1e4b34b299..13c8e6542a2f 100644
--- a/sound/isa/wavefront/wavefront_synth.c
+++ b/sound/isa/wavefront/wavefront_synth.c
@@ -1175,7 +1175,10 @@ wavefront_send_alias (snd_wavefront_t *dev, wavefront_patch_info *header)
"alias for %d\n",
header->number,
header->hdr.a.OriginalSample);
-
+
+ if (header->number >= WF_MAX_SAMPLE)
+ return -EINVAL;
+
munge_int32 (header->number, &alias_hdr[0], 2);
munge_int32 (header->hdr.a.OriginalSample, &alias_hdr[2], 2);
munge_int32 (*((unsigned int *)&header->hdr.a.sampleStartOffset),
@@ -1206,6 +1209,9 @@ wavefront_send_multisample (snd_wavefront_t *dev, wavefront_patch_info *header)
int num_samples;
unsigned char *msample_hdr;
+ if (header->number >= WF_MAX_SAMPLE)
+ return -EINVAL;
+
msample_hdr = kmalloc(WF_MSAMPLE_BYTES, GFP_KERNEL);
if (! msample_hdr)
return -ENOMEM;
diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
index 7d049569012c..3f06986fbecf 100644
--- a/sound/pci/asihpi/hpioctl.c
+++ b/sound/pci/asihpi/hpioctl.c
@@ -350,7 +350,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev,
struct hpi_message hm;
struct hpi_response hr;
struct hpi_adapter adapter;
- struct hpi_pci pci;
+ struct hpi_pci pci = { 0 };
memset(&adapter, 0, sizeof(adapter));
@@ -506,7 +506,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev,
return 0;
err:
- for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) {
+ while (--idx >= 0) {
if (pci.ap_mem_base[idx]) {
iounmap(pci.ap_mem_base[idx]);
pci.ap_mem_base[idx] = NULL;
diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c
index cd27b5536654..675b812e96d6 100644
--- a/sound/pci/ca0106/ca0106_main.c
+++ b/sound/pci/ca0106/ca0106_main.c
@@ -551,7 +551,8 @@ static int snd_ca0106_pcm_power_dac(struct snd_ca0106 *chip, int channel_id,
else
/* Power down */
chip->spi_dac_reg[reg] |= bit;
- return snd_ca0106_spi_write(chip, chip->spi_dac_reg[reg]);
+ if (snd_ca0106_spi_write(chip, chip->spi_dac_reg[reg]) != 0)
+ return -ENXIO;
}
return 0;
}
diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c
index 146e1a3498c7..419da70cd942 100644
--- a/sound/pci/cs46xx/cs46xx_lib.c
+++ b/sound/pci/cs46xx/cs46xx_lib.c
@@ -780,7 +780,7 @@ static void snd_cs46xx_set_capture_sample_rate(struct snd_cs46xx *chip, unsigned
rate = 48000 / 9;
/*
- * We can not capture at at rate greater than the Input Rate (48000).
+ * We can not capture at a rate greater than the Input Rate (48000).
* Return an error if an attempt is made to stray outside that limit.
*/
if (rate > 48000)
diff --git a/sound/pci/cs46xx/dsp_spos_scb_lib.c b/sound/pci/cs46xx/dsp_spos_scb_lib.c
index 8d0a3d357345..8ef51a29380a 100644
--- a/sound/pci/cs46xx/dsp_spos_scb_lib.c
+++ b/sound/pci/cs46xx/dsp_spos_scb_lib.c
@@ -1739,7 +1739,7 @@ int cs46xx_iec958_pre_open (struct snd_cs46xx *chip)
struct dsp_spos_instance * ins = chip->dsp_spos_instance;
if ( ins->spdif_status_out & DSP_SPDIF_STATUS_OUTPUT_ENABLED ) {
- /* remove AsynchFGTxSCB and and PCMSerialInput_II */
+ /* remove AsynchFGTxSCB and PCMSerialInput_II */
cs46xx_dsp_disable_spdif_out (chip);
/* save state */
diff --git a/sound/pci/ctxfi/cthw20k2.c b/sound/pci/ctxfi/cthw20k2.c
index 3c966fafc754..6c9ab602212e 100644
--- a/sound/pci/ctxfi/cthw20k2.c
+++ b/sound/pci/ctxfi/cthw20k2.c
@@ -995,7 +995,7 @@ static int daio_mgr_dao_init(void *blk, unsigned int idx, unsigned int conf)
if (idx < 4) {
/* S/PDIF output */
- switch ((conf & 0x7)) {
+ switch ((conf & 0xf)) {
case 1:
set_field(&ctl->txctl[idx], ATXCTL_NUC, 0);
break;
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
index 3ef2b27ebbe8..f32c55ffffc7 100644
--- a/sound/pci/echoaudio/echoaudio.c
+++ b/sound/pci/echoaudio/echoaudio.c
@@ -2216,7 +2216,6 @@ static int snd_echo_resume(struct device *dev)
if (err < 0) {
kfree(commpage_bak);
dev_err(dev, "resume init_hw err=%d\n", err);
- snd_echo_free(chip);
return err;
}
@@ -2243,7 +2242,6 @@ static int snd_echo_resume(struct device *dev)
if (request_irq(pci->irq, snd_echo_interrupt, IRQF_SHARED,
KBUILD_MODNAME, chip)) {
dev_err(chip->card->dev, "cannot grab irq\n");
- snd_echo_free(chip);
return -EBUSY;
}
chip->irq = pci->irq;
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index d8ba3a6d5042..d23d61dbc20f 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -76,6 +76,12 @@ static int compare_input_type(const void *ap, const void *bp)
if (a->type != b->type)
return (int)(a->type - b->type);
+ /* If has both hs_mic and hp_mic, pick the hs_mic ahead of hp_mic. */
+ if (a->is_headset_mic && b->is_headphone_mic)
+ return -1; /* don't swap */
+ else if (a->is_headphone_mic && b->is_headset_mic)
+ return 1; /* swap */
+
/* In case one has boost and the other one has not,
pick the one with boost first. */
return (int)(b->has_boost_on_pin - a->has_boost_on_pin);
diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c
index 066b5b59c4d7..0224011a240f 100644
--- a/sound/pci/hda/hda_beep.c
+++ b/sound/pci/hda/hda_beep.c
@@ -297,8 +297,12 @@ int snd_hda_mixer_amp_switch_get_beep(struct snd_kcontrol *kcontrol,
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct hda_beep *beep = codec->beep;
+ int chs = get_amp_channels(kcontrol);
+
if (beep && (!beep->enabled || !ctl_has_mute(kcontrol))) {
- ucontrol->value.integer.value[0] =
+ if (chs & 1)
+ ucontrol->value.integer.value[0] = beep->enabled;
+ if (chs & 2)
ucontrol->value.integer.value[1] = beep->enabled;
return 0;
}
diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
index c175b2cf63f7..66010d0774b4 100644
--- a/sound/pci/hda/hda_bind.c
+++ b/sound/pci/hda/hda_bind.c
@@ -46,6 +46,10 @@ static void hda_codec_unsol_event(struct hdac_device *dev, unsigned int ev)
if (codec->bus->shutdown)
return;
+ /* ignore unsol events during system suspend/resume */
+ if (codec->core.dev.power.power_state.event != PM_EVENT_ON)
+ return;
+
if (codec->patch_ops.unsol_event)
codec->patch_ops.unsol_event(codec, ev);
}
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index f3a6b1d869d8..7f1e763ccca8 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -1782,7 +1782,7 @@ int snd_hda_codec_reset(struct hda_codec *codec)
return -EBUSY;
/* OK, let it free */
- snd_hdac_device_unregister(&codec->core);
+ device_release_driver(hda_codec_dev(codec));
/* allow device access again */
snd_hda_unlock_devices(bus);
@@ -3410,7 +3410,7 @@ EXPORT_SYMBOL_GPL(snd_hda_set_power_save);
* @nid: NID to check / update
*
* Check whether the given NID is in the amp list. If it's in the list,
- * check the current AMP status, and update the the power-status according
+ * check the current AMP status, and update the power-status according
* to the mute status.
*
* This function is supposed to be set or called from the check_power_status
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index fa261b27d858..0c5d41e5d146 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -624,13 +624,6 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
20,
178000000);
- /* by some reason, the playback stream stalls on PulseAudio with
- * tsched=1 when a capture stream triggers. Until we figure out the
- * real cause, disable tsched mode by telling the PCM info flag.
- */
- if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND)
- runtime->hw.info |= SNDRV_PCM_INFO_BATCH;
-
if (chip->align_buffer_size)
/* constrain buffer sizes to be multiple of 128
bytes. This is more efficient in terms of memory
@@ -1169,16 +1162,23 @@ irqreturn_t azx_interrupt(int irq, void *dev_id)
if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update))
active = true;
- /* clear rirb int */
status = azx_readb(chip, RIRBSTS);
if (status & RIRB_INT_MASK) {
+ /*
+ * Clearing the interrupt status here ensures that no
+ * interrupt gets masked after the RIRB wp is read in
+ * snd_hdac_bus_update_rirb. This avoids a possible
+ * race condition where codec response in RIRB may
+ * remain unserviced by IRQ, eventually falling back
+ * to polling mode in azx_rirb_get_response.
+ */
+ azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
active = true;
if (status & RIRB_INT_RESPONSE) {
if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
udelay(80);
snd_hdac_bus_update_rirb(bus);
}
- azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
}
} while (active && ++repeat < 10);
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 2609161707a4..6099a9f1cb3d 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -825,7 +825,7 @@ static void activate_amp_in(struct hda_codec *codec, struct nid_path *path,
}
}
-/* sync power of each widget in the the given path */
+/* sync power of each widget in the given path */
static hda_nid_t path_power_update(struct hda_codec *codec,
struct nid_path *path,
bool allow_powerdown)
@@ -1214,11 +1214,17 @@ static const char *get_line_out_pfx(struct hda_codec *codec, int ch,
*index = ch;
return "Headphone";
case AUTO_PIN_LINE_OUT:
- /* This deals with the case where we have two DACs and
- * one LO, one HP and one Speaker */
- if (!ch && cfg->speaker_outs && cfg->hp_outs) {
- bool hp_lo_shared = !path_has_mixer(codec, spec->hp_paths[0], ctl_type);
- bool spk_lo_shared = !path_has_mixer(codec, spec->speaker_paths[0], ctl_type);
+ /* This deals with the case where one HP or one Speaker or
+ * one HP + one Speaker need to share the DAC with LO
+ */
+ if (!ch) {
+ bool hp_lo_shared = false, spk_lo_shared = false;
+
+ if (cfg->speaker_outs)
+ spk_lo_shared = !path_has_mixer(codec,
+ spec->speaker_paths[0], ctl_type);
+ if (cfg->hp_outs)
+ hp_lo_shared = !path_has_mixer(codec, spec->hp_paths[0], ctl_type);
if (hp_lo_shared && spk_lo_shared)
return spec->vmaster_mute.hook ? "PCM" : "Master";
if (hp_lo_shared)
@@ -1376,16 +1382,20 @@ static int try_assign_dacs(struct hda_codec *codec, int num_outs,
struct nid_path *path;
hda_nid_t pin = pins[i];
- path = snd_hda_get_path_from_idx(codec, path_idx[i]);
- if (path) {
- badness += assign_out_path_ctls(codec, path);
- continue;
+ if (!spec->obey_preferred_dacs) {
+ path = snd_hda_get_path_from_idx(codec, path_idx[i]);
+ if (path) {
+ badness += assign_out_path_ctls(codec, path);
+ continue;
+ }
}
dacs[i] = get_preferred_dac(codec, pin);
if (dacs[i]) {
if (is_dac_already_used(codec, dacs[i]))
badness += bad->shared_primary;
+ } else if (spec->obey_preferred_dacs) {
+ badness += BAD_NO_PRIMARY_DAC;
}
if (!dacs[i])
@@ -4025,7 +4035,7 @@ int snd_hda_gen_add_micmute_led(struct hda_codec *codec,
spec->micmute_led.led_mode = MICMUTE_LED_FOLLOW_MUTE;
spec->micmute_led.capture = 0;
- spec->micmute_led.led_value = 0;
+ spec->micmute_led.led_value = -1;
spec->micmute_led.old_hook = spec->cap_sync_hook;
spec->micmute_led.update = hook;
spec->cap_sync_hook = update_micmute_led;
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index 8933c0f64cc4..2ccdd92c8c7f 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -240,6 +240,7 @@ struct hda_gen_spec {
unsigned int power_down_unused:1; /* power down unused widgets */
unsigned int dac_min_mute:1; /* minimal = mute for DACs */
unsigned int suppress_vmaster:1; /* don't create vmaster kctls */
+ unsigned int obey_preferred_dacs:1; /* obey preferred_dacs assignment */
/* other internal flags */
unsigned int no_analog:1; /* digital I/O only */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 3ee5b7b9b595..2cd8bfd5293b 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2054,7 +2054,7 @@ static int azx_first_init(struct azx *chip)
/* codec detection */
if (!azx_bus(chip)->codec_mask) {
dev_err(card->dev, "no codecs found!\n");
- return -ENODEV;
+ /* keep running the rest for the runtime PM */
}
if (azx_acquire_irq(chip, 0) < 0)
@@ -2076,24 +2076,15 @@ static void azx_firmware_cb(const struct firmware *fw, void *context)
{
struct snd_card *card = context;
struct azx *chip = card->private_data;
- struct pci_dev *pci = chip->pci;
- if (!fw) {
- dev_err(card->dev, "Cannot load firmware, aborting\n");
- goto error;
- }
-
- chip->fw = fw;
+ if (fw)
+ chip->fw = fw;
+ else
+ dev_err(card->dev, "Cannot load firmware, continue without patching\n");
if (!chip->disabled) {
/* continue probing */
- if (azx_probe_continue(chip))
- goto error;
+ azx_probe_continue(chip);
}
- return; /* OK */
-
- error:
- snd_card_free(card);
- pci_set_drvdata(pci, NULL);
}
#endif
@@ -2219,6 +2210,17 @@ static const struct hdac_io_ops pci_hda_io_ops = {
.dma_free_pages = dma_free_pages,
};
+/* Blacklist for skipping the whole probe:
+ * some HD-audio PCI entries are exposed without any codecs, and such devices
+ * should be ignored from the beginning.
+ */
+static const struct pci_device_id driver_blacklist[] = {
+ { PCI_DEVICE_SUB(0x1022, 0x1487, 0x1043, 0x874f) }, /* ASUS ROG Zenith II / Strix */
+ { PCI_DEVICE_SUB(0x1022, 0x1487, 0x1462, 0xcb59) }, /* MSI TRX40 Creator */
+ { PCI_DEVICE_SUB(0x1022, 0x1487, 0x1462, 0xcb60) }, /* MSI TRX40 */
+ {}
+};
+
static const struct hda_controller_ops pci_hda_ops = {
.disable_msi_reset_irq = disable_msi_reset_irq,
.substream_alloc_pages = substream_alloc_pages,
@@ -2238,6 +2240,11 @@ static int azx_probe(struct pci_dev *pci,
bool schedule_probe;
int err;
+ if (pci_match_id(driver_blacklist, pci)) {
+ dev_info(&pci->dev, "Skipping the blacklisted device\n");
+ return -ENODEV;
+ }
+
if (dev >= SNDRV_CARDS)
return -ENODEV;
if (!enable[dev]) {
@@ -2321,8 +2328,6 @@ static struct snd_pci_quirk power_save_blacklist[] = {
SND_PCI_QUIRK(0x1849, 0x7662, "Asrock H81M-HDS", 0),
/* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
- /* https://bugzilla.redhat.com/show_bug.cgi?id=1581607 */
- SND_PCI_QUIRK(0x1558, 0x3501, "Clevo W35xSS_370SS", 0),
/* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
SND_PCI_QUIRK(0x1558, 0x6504, "Clevo W65_67SB", 0),
/* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
@@ -2434,9 +2439,11 @@ static int azx_probe_continue(struct azx *chip)
#endif
/* create codec instances */
- err = azx_probe_codecs(chip, azx_max_codecs[chip->driver_type]);
- if (err < 0)
- goto out_free;
+ if (bus->codec_mask) {
+ err = azx_probe_codecs(chip, azx_max_codecs[chip->driver_type]);
+ if (err < 0)
+ goto out_free;
+ }
#ifdef CONFIG_SND_HDA_PATCH_LOADER
if (chip->fw) {
@@ -2450,7 +2457,7 @@ static int azx_probe_continue(struct azx *chip)
#endif
}
#endif
- if ((probe_only[dev] & 1) == 0) {
+ if (bus->codec_mask && !(probe_only[dev] & 1)) {
err = azx_codec_configure(chip);
if (err < 0)
goto out_free;
@@ -2467,8 +2474,10 @@ static int azx_probe_continue(struct azx *chip)
set_default_power_save(chip);
- if (azx_has_pm_runtime(chip))
+ if (azx_has_pm_runtime(chip)) {
+ pm_runtime_use_autosuspend(&pci->dev);
pm_runtime_put_autosuspend(&pci->dev);
+ }
out_free:
if ((chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
index 6535155e992d..25a4c2d580da 100644
--- a/sound/pci/hda/hda_sysfs.c
+++ b/sound/pci/hda/hda_sysfs.c
@@ -138,7 +138,7 @@ static int reconfig_codec(struct hda_codec *codec)
"The codec is being used, can't reconfigure.\n");
goto error;
}
- err = snd_hda_codec_configure(codec);
+ err = device_reprobe(hda_codec_dev(codec));
if (err < 0)
goto error;
err = snd_card_register(codec->card);
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index c9f3c002bd55..004a7772bb5d 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -106,7 +106,7 @@ enum {
};
/* Strings for Input Source Enum Control */
-static const char *const in_src_str[3] = {"Rear Mic", "Line", "Front Mic" };
+static const char *const in_src_str[3] = { "Microphone", "Line In", "Front Microphone" };
#define IN_SRC_NUM_OF_INPUTS 3
enum {
REAR_MIC,
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 78bb96263bc2..8851cd11dc9c 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -911,18 +911,18 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
- SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
- SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
- SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
- SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
- SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
+ SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
- SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
- SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
+ SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
+ SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
+ SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x8402, "HP ProBook 645 G4", CXT_FIXUP_MUTE_LED_GPIO),
SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x8456, "HP Z2 G4 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
@@ -1088,6 +1088,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
static const struct hda_device_id snd_hda_id_conexant[] = {
HDA_CODEC_ENTRY(0x14f11f86, "CX8070", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto),
+ HDA_CODEC_ENTRY(0x14f120d0, "CX11970", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f15051, "CX20561 (Hermosa)", patch_conexant_auto),
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index c67fadd5aae5..d21a4eb1ca49 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1848,8 +1848,10 @@ static bool check_non_pcm_per_cvt(struct hda_codec *codec, hda_nid_t cvt_nid)
/* Add sanity check to pass klockwork check.
* This should never happen.
*/
- if (WARN_ON(spdif == NULL))
+ if (WARN_ON(spdif == NULL)) {
+ mutex_unlock(&codec->spdif_mutex);
return true;
+ }
non_pcm = !!(spdif->status & IEC958_AES0_NONAUDIO);
mutex_unlock(&codec->spdif_mutex);
return non_pcm;
@@ -1953,20 +1955,23 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
int pinctl;
int err = 0;
+ mutex_lock(&spec->pcm_lock);
if (hinfo->nid) {
pcm_idx = hinfo_to_pcm_index(codec, hinfo);
- if (snd_BUG_ON(pcm_idx < 0))
- return -EINVAL;
+ if (snd_BUG_ON(pcm_idx < 0)) {
+ err = -EINVAL;
+ goto unlock;
+ }
cvt_idx = cvt_nid_to_cvt_index(codec, hinfo->nid);
- if (snd_BUG_ON(cvt_idx < 0))
- return -EINVAL;
+ if (snd_BUG_ON(cvt_idx < 0)) {
+ err = -EINVAL;
+ goto unlock;
+ }
per_cvt = get_cvt(spec, cvt_idx);
-
snd_BUG_ON(!per_cvt->assigned);
per_cvt->assigned = 0;
hinfo->nid = 0;
- mutex_lock(&spec->pcm_lock);
snd_hda_spdif_ctls_unassign(codec, pcm_idx);
clear_bit(pcm_idx, &spec->pcm_in_use);
pin_idx = hinfo_to_pin_index(codec, hinfo);
@@ -1994,10 +1999,11 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
per_pin->setup = false;
per_pin->channels = 0;
mutex_unlock(&per_pin->lock);
- unlock:
- mutex_unlock(&spec->pcm_lock);
}
+unlock:
+ mutex_unlock(&spec->pcm_lock);
+
return err;
}
@@ -2209,7 +2215,9 @@ static int generic_hdmi_build_controls(struct hda_codec *codec)
for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
+ struct hdmi_eld *pin_eld = &per_pin->sink_eld;
+ pin_eld->eld_valid = false;
hdmi_present_sense(per_pin, 0);
}
@@ -2318,6 +2326,18 @@ static void generic_hdmi_free(struct hda_codec *codec)
}
#ifdef CONFIG_PM
+static int generic_hdmi_suspend(struct hda_codec *codec)
+{
+ struct hdmi_spec *spec = codec->spec;
+ int pin_idx;
+
+ for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
+ struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
+ cancel_delayed_work_sync(&per_pin->work);
+ }
+ return 0;
+}
+
static int generic_hdmi_resume(struct hda_codec *codec)
{
struct hdmi_spec *spec = codec->spec;
@@ -2341,6 +2361,7 @@ static const struct hda_codec_ops generic_hdmi_patch_ops = {
.build_controls = generic_hdmi_build_controls,
.unsol_event = hdmi_unsol_event,
#ifdef CONFIG_PM
+ .suspend = generic_hdmi_suspend,
.resume = generic_hdmi_resume,
#endif
};
@@ -2570,6 +2591,7 @@ static void i915_pin_cvt_fixup(struct hda_codec *codec,
hda_nid_t cvt_nid)
{
if (per_pin) {
+ haswell_verify_D0(codec, per_pin->cvt_nid, per_pin->pin_nid);
snd_hda_set_dev_select(codec, per_pin->pin_nid,
per_pin->dev_id);
intel_verify_pin_cvt_connect(codec, per_pin);
@@ -3426,6 +3448,7 @@ static int tegra_hdmi_build_pcms(struct hda_codec *codec)
static int patch_tegra_hdmi(struct hda_codec *codec)
{
+ struct hdmi_spec *spec;
int err;
err = patch_generic_hdmi(codec);
@@ -3433,6 +3456,10 @@ static int patch_tegra_hdmi(struct hda_codec *codec)
return err;
codec->patch_ops.build_pcms = tegra_hdmi_build_pcms;
+ spec = codec->spec;
+ spec->chmap.ops.chmap_cea_alloc_validate_get_type =
+ nvhdmi_chmap_cea_alloc_validate_get_type;
+ spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
return 0;
}
@@ -3894,6 +3921,11 @@ HDA_CODEC_ENTRY(0x10de0095, "GPU 95 HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de0097, "GPU 97 HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de0098, "GPU 98 HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de0099, "GPU 99 HDMI/DP", patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de009a, "GPU 9a HDMI/DP", patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de009d, "GPU 9d HDMI/DP", patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de009e, "GPU 9e HDMI/DP", patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de009f, "GPU 9f HDMI/DP", patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de00a0, "GPU a0 HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch),
HDA_CODEC_ENTRY(0x10de8067, "MCP67/68 HDMI", patch_nvhdmi_2ch),
HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 23aab2fdac46..f9ebbee3824b 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -94,6 +94,7 @@ struct alc_spec {
/* mute LED for HP laptops, see alc269_fixup_mic_mute_hook() */
int mute_led_polarity;
+ int micmute_led_polarity;
hda_nid_t mute_led_nid;
hda_nid_t cap_mute_led_nid;
@@ -379,7 +380,10 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
case 0x10ec0215:
case 0x10ec0233:
case 0x10ec0235:
+ case 0x10ec0236:
+ case 0x10ec0245:
case 0x10ec0255:
+ case 0x10ec0256:
case 0x10ec0257:
case 0x10ec0282:
case 0x10ec0283:
@@ -391,14 +395,13 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
case 0x10ec0300:
alc_update_coef_idx(codec, 0x10, 1<<9, 0);
break;
- case 0x10ec0236:
- case 0x10ec0256:
- alc_write_coef_idx(codec, 0x36, 0x5757);
- alc_update_coef_idx(codec, 0x10, 1<<9, 0);
- break;
case 0x10ec0275:
alc_update_coef_idx(codec, 0xe, 0, 1<<0);
break;
+ case 0x10ec0287:
+ alc_update_coef_idx(codec, 0x10, 1<<9, 0);
+ alc_write_coef_idx(codec, 0x8, 0x4ab7);
+ break;
case 0x10ec0293:
alc_update_coef_idx(codec, 0xa, 1<<13, 0);
break;
@@ -439,6 +442,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
alc_update_coef_idx(codec, 0x7, 1<<5, 0);
break;
case 0x10ec0892:
+ case 0x10ec0897:
alc_update_coef_idx(codec, 0x7, 1<<5, 0);
break;
case 0x10ec0899:
@@ -804,9 +808,11 @@ static void alc_ssid_check(struct hda_codec *codec, const hda_nid_t *ports)
{
if (!alc_subsystem_id(codec, ports)) {
struct alc_spec *spec = codec->spec;
- codec_dbg(codec,
- "realtek: Enable default setup for auto mode as fallback\n");
- spec->init_amp = ALC_INIT_DEFAULT;
+ if (spec->init_amp == ALC_INIT_UNDEFINED) {
+ codec_dbg(codec,
+ "realtek: Enable default setup for auto mode as fallback\n");
+ spec->init_amp = ALC_INIT_DEFAULT;
+ }
}
}
@@ -1877,6 +1883,7 @@ enum {
ALC889_FIXUP_FRONT_HP_NO_PRESENCE,
ALC889_FIXUP_VAIO_TT,
ALC888_FIXUP_EEE1601,
+ ALC886_FIXUP_EAPD,
ALC882_FIXUP_EAPD,
ALC883_FIXUP_EAPD,
ALC883_FIXUP_ACER_EAPD,
@@ -1904,6 +1911,8 @@ enum {
ALC1220_FIXUP_CLEVO_P950,
ALC1220_FIXUP_CLEVO_PB51ED,
ALC1220_FIXUP_CLEVO_PB51ED_PINS,
+ ALC887_FIXUP_ASUS_AUDIO,
+ ALC887_FIXUP_ASUS_HMIC,
};
static void alc889_fixup_coef(struct hda_codec *codec,
@@ -2116,6 +2125,31 @@ static void alc1220_fixup_clevo_pb51ed(struct hda_codec *codec,
alc_fixup_headset_mode_no_hp_mic(codec, fix, action);
}
+static void alc887_asus_hp_automute_hook(struct hda_codec *codec,
+ struct hda_jack_callback *jack)
+{
+ struct alc_spec *spec = codec->spec;
+ unsigned int vref;
+
+ snd_hda_gen_hp_automute(codec, jack);
+
+ if (spec->gen.hp_jack_present)
+ vref = AC_PINCTL_VREF_80;
+ else
+ vref = AC_PINCTL_VREF_HIZ;
+ snd_hda_set_pin_ctl(codec, 0x19, PIN_HP | vref);
+}
+
+static void alc887_fixup_asus_jack(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct alc_spec *spec = codec->spec;
+ if (action != HDA_FIXUP_ACT_PROBE)
+ return;
+ snd_hda_set_pin_ctl_cache(codec, 0x1b, PIN_HP);
+ spec->gen.hp_automute_hook = alc887_asus_hp_automute_hook;
+}
+
static const struct hda_fixup alc882_fixups[] = {
[ALC882_FIXUP_ABIT_AW9D_MAX] = {
.type = HDA_FIXUP_PINS,
@@ -2183,6 +2217,15 @@ static const struct hda_fixup alc882_fixups[] = {
{ }
}
},
+ [ALC886_FIXUP_EAPD] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ /* change to EAPD mode */
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x07 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0068 },
+ { }
+ }
+ },
[ALC882_FIXUP_EAPD] = {
.type = HDA_FIXUP_VERBS,
.v.verbs = (const struct hda_verb[]) {
@@ -2373,6 +2416,20 @@ static const struct hda_fixup alc882_fixups[] = {
.chained = true,
.chain_id = ALC1220_FIXUP_CLEVO_PB51ED,
},
+ [ALC887_FIXUP_ASUS_AUDIO] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x15, 0x02a14150 }, /* use as headset mic, without its own jack detect */
+ { 0x19, 0x22219420 },
+ {}
+ },
+ },
+ [ALC887_FIXUP_ASUS_HMIC] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc887_fixup_asus_jack,
+ .chained = true,
+ .chain_id = ALC887_FIXUP_ASUS_AUDIO,
+ },
};
static const struct snd_pci_quirk alc882_fixup_tbl[] = {
@@ -2391,13 +2448,13 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
ALC882_FIXUP_ACER_ASPIRE_8930G),
SND_PCI_QUIRK(0x1025, 0x0146, "Acer Aspire 6935G",
ALC882_FIXUP_ACER_ASPIRE_8930G),
+ SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G",
+ ALC882_FIXUP_ACER_ASPIRE_4930G),
+ SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210),
SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G",
ALC882_FIXUP_ACER_ASPIRE_4930G),
SND_PCI_QUIRK(0x1025, 0x0166, "Acer Aspire 6530G",
ALC882_FIXUP_ACER_ASPIRE_4930G),
- SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G",
- ALC882_FIXUP_ACER_ASPIRE_4930G),
- SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210),
SND_PCI_QUIRK(0x1025, 0x021e, "Acer Aspire 5739G",
ALC882_FIXUP_ACER_ASPIRE_4930G),
SND_PCI_QUIRK(0x1025, 0x0259, "Acer Aspire 5935", ALC889_FIXUP_DAC_ROUTE),
@@ -2406,14 +2463,15 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x13c2, "Asus A7M", ALC882_FIXUP_EAPD),
SND_PCI_QUIRK(0x1043, 0x1873, "ASUS W90V", ALC882_FIXUP_ASUS_W90V),
SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC),
+ SND_PCI_QUIRK(0x1043, 0x2390, "Asus D700SA", ALC887_FIXUP_ASUS_HMIC),
SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS),
SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
+ SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
+ SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
SND_PCI_QUIRK(0x104d, 0x9060, "Sony Vaio VPCL14M1R", ALC882_FIXUP_NO_PRIMARY_HP),
- SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
- SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
/* All Apple entries are in codec SSIDs */
SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
@@ -2440,23 +2498,44 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF),
SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
+ SND_PCI_QUIRK(0x13fe, 0x1009, "Advantech MIT-W101", ALC886_FIXUP_EAPD),
SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1462, 0x1229, "MSI-GP73", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1462, 0x1275, "MSI-GL63", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
+ SND_PCI_QUIRK(0x1462, 0xcc34, "MSI Godlike X570", ALC1220_FIXUP_GB_DUAL_CODECS),
SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
+ SND_PCI_QUIRK(0x1558, 0x50d3, "Clevo PC50[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1558, 0x9506, "Clevo P955HQ", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1558, 0x950a, "Clevo P955H[PR]", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1558, 0x95e3, "Clevo P955[ER]T", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1558, 0x95e4, "Clevo P955ER", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1558, 0x95e5, "Clevo P955EE6", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1558, 0x95e6, "Clevo P950R[CDF]", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1558, 0x96e1, "Clevo P960[ER][CDFN]-K", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1558, 0x97e1, "Clevo P970[ER][CDFN]", ALC1220_FIXUP_CLEVO_P950),
- SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
- SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ SND_PCI_QUIRK(0x1558, 0x97e2, "Clevo P970RC-M", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
@@ -3249,7 +3328,13 @@ static void alc256_init(struct hda_codec *codec)
alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */
alc_update_coefex_idx(codec, 0x53, 0x02, 0x8000, 1 << 15); /* Clear bit */
alc_update_coefex_idx(codec, 0x53, 0x02, 0x8000, 0 << 15);
- alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
+ /*
+ * Expose headphone mic (or possibly Line In on some machines) instead
+ * of PC Beep on 1Ah, and disable 1Ah loopback for all outputs. See
+ * Documentation/sound/hd-audio/realtek-pc-beep.rst for details of
+ * this register.
+ */
+ alc_write_coef_idx(codec, 0x36, 0x5757);
}
static void alc256_shutup(struct hda_codec *codec)
@@ -3276,7 +3361,11 @@ static void alc256_shutup(struct hda_codec *codec)
/* 3k pull low control for Headset jack. */
/* NOTE: call this before clearing the pin, otherwise codec stalls */
- alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
+ /* If disable 3k pulldown control for alc257, the Mic detection will not work correctly
+ * when booting with headset plugged. So skip setting it for the codec alc257
+ */
+ if (codec->core.vendor_id != 0x10ec0257)
+ alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
if (!spec->no_shutup_pins)
snd_hda_codec_write(codec, hp_pin, 0,
@@ -3850,11 +3939,9 @@ static void alc269_fixup_hp_mute_led_mic3(struct hda_codec *codec,
/* update LED status via GPIO */
static void alc_update_gpio_led(struct hda_codec *codec, unsigned int mask,
- bool enabled)
+ int polarity, bool enabled)
{
- struct alc_spec *spec = codec->spec;
-
- if (spec->mute_led_polarity)
+ if (polarity)
enabled = !enabled;
alc_update_gpio_data(codec, mask, !enabled); /* muted -> LED on */
}
@@ -3865,7 +3952,8 @@ static void alc_fixup_gpio_mute_hook(void *private_data, int enabled)
struct hda_codec *codec = private_data;
struct alc_spec *spec = codec->spec;
- alc_update_gpio_led(codec, spec->gpio_mute_led_mask, enabled);
+ alc_update_gpio_led(codec, spec->gpio_mute_led_mask,
+ spec->mute_led_polarity, enabled);
}
/* turn on/off mic-mute LED via GPIO per capture hook */
@@ -3874,6 +3962,7 @@ static void alc_gpio_micmute_update(struct hda_codec *codec)
struct alc_spec *spec = codec->spec;
alc_update_gpio_led(codec, spec->gpio_mic_led_mask,
+ spec->micmute_led_polarity,
spec->gen.micmute_led.led_value);
}
@@ -3905,6 +3994,16 @@ static void alc269_fixup_hp_gpio_led(struct hda_codec *codec,
alc_fixup_hp_gpio_led(codec, action, 0x08, 0x10);
}
+static void alc285_fixup_hp_gpio_led(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct alc_spec *spec = codec->spec;
+
+ spec->micmute_led_polarity = 1;
+
+ alc_fixup_hp_gpio_led(codec, action, 0x04, 0x01);
+}
+
static void alc286_fixup_hp_gpio_led(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
@@ -4048,6 +4147,7 @@ static void alc233_fixup_lenovo_line2_mic_hotkey(struct hda_codec *codec,
{
struct alc_spec *spec = codec->spec;
+ spec->micmute_led_polarity = 1;
alc_fixup_hp_gpio_led(codec, action, 0, 0x04);
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
spec->init_amp = ALC_INIT_DEFAULT;
@@ -4775,7 +4875,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
case 0x10ec0274:
case 0x10ec0294:
alc_process_coef_fw(codec, coef0274);
- msleep(80);
+ msleep(850);
val = alc_read_coef_idx(codec, 0x46);
is_ctia = (val & 0x00f0) == 0x00f0;
break;
@@ -4959,6 +5059,7 @@ static void alc_update_headset_jack_cb(struct hda_codec *codec,
struct alc_spec *spec = codec->spec;
spec->current_headset_type = ALC_HEADSET_TYPE_UNKNOWN;
snd_hda_gen_hp_automute(codec, jack);
+ alc_update_headset_mode(codec);
}
static void alc_probe_headset_mode(struct hda_codec *codec)
@@ -5141,18 +5242,9 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec,
{ 0x19, 0x21a11010 }, /* dock mic */
{ }
};
- /* Assure the speaker pin to be coupled with DAC NID 0x03; otherwise
- * the speaker output becomes too low by some reason on Thinkpads with
- * ALC298 codec
- */
- static hda_nid_t preferred_pairs[] = {
- 0x14, 0x03, 0x17, 0x02, 0x21, 0x02,
- 0
- };
struct alc_spec *spec = codec->spec;
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
- spec->gen.preferred_dacs = preferred_pairs;
spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
snd_hda_apply_pincfgs(codec, pincfgs);
} else if (action == HDA_FIXUP_ACT_INIT) {
@@ -5165,6 +5257,23 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec,
}
}
+static void alc_fixup_tpt470_dacs(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ /* Assure the speaker pin to be coupled with DAC NID 0x03; otherwise
+ * the speaker output becomes too low by some reason on Thinkpads with
+ * ALC298 codec
+ */
+ static hda_nid_t preferred_pairs[] = {
+ 0x14, 0x03, 0x17, 0x02, 0x21, 0x02,
+ 0
+ };
+ struct alc_spec *spec = codec->spec;
+
+ if (action == HDA_FIXUP_ACT_PRE_PROBE)
+ spec->gen.preferred_dacs = preferred_pairs;
+}
+
static void alc_shutup_dell_xps13(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
@@ -5269,17 +5378,6 @@ static void alc271_hp_gate_mic_jack(struct hda_codec *codec,
}
}
-static void alc256_fixup_dell_xps_13_headphone_noise2(struct hda_codec *codec,
- const struct hda_fixup *fix,
- int action)
-{
- if (action != HDA_FIXUP_ACT_PRE_PROBE)
- return;
-
- snd_hda_codec_amp_stereo(codec, 0x1a, HDA_INPUT, 0, HDA_AMP_VOLMASK, 1);
- snd_hda_override_wcaps(codec, 0x1a, get_wcaps(codec, 0x1a) & ~AC_WCAP_IN_AMP);
-}
-
static void alc269_fixup_limit_int_mic_boost(struct hda_codec *codec,
const struct hda_fixup *fix,
int action)
@@ -5467,7 +5565,8 @@ static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec,
snd_hda_gen_hp_automute(codec, jack);
/* mute_led_polarity is set to 0, so we pass inverted value here */
- alc_update_gpio_led(codec, 0x10, !spec->gen.hp_jack_present);
+ alc_update_gpio_led(codec, 0x10, spec->mute_led_polarity,
+ !spec->gen.hp_jack_present);
}
/* Manage GPIOs for HP EliteBook Folio 9480m.
@@ -5528,6 +5627,15 @@ static void alc233_alc662_fixup_lenovo_dual_codecs(struct hda_codec *codec,
}
}
+static void alc225_fixup_s3_pop_noise(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ if (action != HDA_FIXUP_ACT_PRE_PROBE)
+ return;
+
+ codec->power_save_node = 1;
+}
+
/* Forcibly assign NID 0x03 to HP/LO while NID 0x02 to SPK for EQ */
static void alc274_fixup_bind_dacs(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
@@ -5580,6 +5688,7 @@ static void alc_fixup_thinkpad_acpi(struct hda_codec *codec,
#include "hp_x360_helper.c"
enum {
+ ALC269_FIXUP_GPIO2,
ALC269_FIXUP_SONY_VAIO,
ALC275_FIXUP_SONY_VAIO_GPIO2,
ALC269_FIXUP_DELL_M101Z,
@@ -5611,6 +5720,7 @@ enum {
ALC269_FIXUP_HP_LINE1_MIC1_LED,
ALC269_FIXUP_INV_DMIC,
ALC269_FIXUP_LENOVO_DOCK,
+ ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST,
ALC269_FIXUP_NO_SHUTUP,
ALC286_FIXUP_SONY_MIC_NO_PRESENCE,
ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT,
@@ -5671,8 +5781,6 @@ enum {
ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
ALC275_FIXUP_DELL_XPS,
- ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
- ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2,
ALC293_FIXUP_LENOVO_SPK_NOISE,
ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
ALC255_FIXUP_DELL_SPK_NOISE,
@@ -5684,6 +5792,7 @@ enum {
ALC221_FIXUP_HP_FRONT_MIC,
ALC292_FIXUP_TPT460,
ALC298_FIXUP_SPK_VOLUME,
+ ALC298_FIXUP_LENOVO_SPK_VOLUME,
ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER,
ALC269_FIXUP_ATIV_BOOK_8,
ALC221_FIXUP_HP_MIC_NO_PRESENCE,
@@ -5696,9 +5805,11 @@ enum {
ALC233_FIXUP_ACER_HEADSET_MIC,
ALC294_FIXUP_LENOVO_MIC_LOCATION,
ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE,
+ ALC225_FIXUP_S3_POP_NOISE,
ALC700_FIXUP_INTEL_REFERENCE,
ALC274_FIXUP_DELL_BIND_DACS,
ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
+ ALC298_FIXUP_TPT470_DOCK_FIX,
ALC298_FIXUP_TPT470_DOCK,
ALC255_FIXUP_DUMMY_LINEOUT_VERB,
ALC255_FIXUP_DELL_HEADSET_MIC,
@@ -5726,10 +5837,15 @@ enum {
ALC289_FIXUP_DUAL_SPK,
ALC294_FIXUP_SPK2_TO_DAC1,
ALC294_FIXUP_ASUS_DUAL_SPK,
-
+ ALC294_FIXUP_ASUS_HPE,
+ ALC285_FIXUP_HP_GPIO_LED,
};
static const struct hda_fixup alc269_fixups[] = {
+ [ALC269_FIXUP_GPIO2] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_gpio2,
+ },
[ALC269_FIXUP_SONY_VAIO] = {
.type = HDA_FIXUP_PINCTLS,
.v.pins = (const struct hda_pintbl[]) {
@@ -5924,6 +6040,12 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT
},
+ [ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc269_fixup_limit_int_mic_boost,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_LENOVO_DOCK,
+ },
[ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc269_fixup_pincfg_no_hp_to_lineout,
@@ -6384,23 +6506,6 @@ static const struct hda_fixup alc269_fixups[] = {
{}
}
},
- [ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE] = {
- .type = HDA_FIXUP_VERBS,
- .v.verbs = (const struct hda_verb[]) {
- /* Disable pass-through path for FRONT 14h */
- {0x20, AC_VERB_SET_COEF_INDEX, 0x36},
- {0x20, AC_VERB_SET_PROC_COEF, 0x1737},
- {}
- },
- .chained = true,
- .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
- },
- [ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2] = {
- .type = HDA_FIXUP_FUNC,
- .v.func = alc256_fixup_dell_xps_13_headphone_noise2,
- .chained = true,
- .chain_id = ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE
- },
[ALC293_FIXUP_LENOVO_SPK_NOISE] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_disable_aamix,
@@ -6459,6 +6564,10 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
},
+ [ALC298_FIXUP_LENOVO_SPK_VOLUME] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc298_fixup_speaker_volume,
+ },
[ALC295_FIXUP_DISABLE_DAC3] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc295_fixup_disable_dac3,
@@ -6536,6 +6645,8 @@ static const struct hda_fixup alc269_fixups[] = {
[ALC233_FIXUP_LENOVO_MULTI_CODECS] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc233_alc662_fixup_lenovo_dual_codecs,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_GPIO2
},
[ALC233_FIXUP_ACER_HEADSET_MIC] = {
.type = HDA_FIXUP_VERBS,
@@ -6569,6 +6680,12 @@ static const struct hda_fixup alc269_fixups[] = {
{ }
},
.chained = true,
+ .chain_id = ALC225_FIXUP_S3_POP_NOISE
+ },
+ [ALC225_FIXUP_S3_POP_NOISE] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc225_fixup_s3_pop_noise,
+ .chained = true,
.chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
},
[ALC700_FIXUP_INTEL_REFERENCE] = {
@@ -6601,12 +6718,18 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC274_FIXUP_DELL_BIND_DACS
},
- [ALC298_FIXUP_TPT470_DOCK] = {
+ [ALC298_FIXUP_TPT470_DOCK_FIX] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_tpt470_dock,
.chained = true,
.chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE
},
+ [ALC298_FIXUP_TPT470_DOCK] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_tpt470_dacs,
+ .chained = true,
+ .chain_id = ALC298_FIXUP_TPT470_DOCK_FIX
+ },
[ALC255_FIXUP_DUMMY_LINEOUT_VERB] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
@@ -6667,7 +6790,7 @@ static const struct hda_fixup alc269_fixups[] = {
{ }
},
.chained = true,
- .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+ .chain_id = ALC269_FIXUP_HEADSET_MIC
},
[ALC294_FIXUP_ASUS_HEADSET_MIC] = {
.type = HDA_FIXUP_PINS,
@@ -6676,7 +6799,7 @@ static const struct hda_fixup alc269_fixups[] = {
{ }
},
.chained = true,
- .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+ .chain_id = ALC269_FIXUP_HEADSET_MIC
},
[ALC294_FIXUP_ASUS_SPK] = {
.type = HDA_FIXUP_VERBS,
@@ -6684,6 +6807,8 @@ static const struct hda_fixup alc269_fixups[] = {
/* Set EAPD high */
{ 0x20, AC_VERB_SET_COEF_INDEX, 0x40 },
{ 0x20, AC_VERB_SET_PROC_COEF, 0x8800 },
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x0f },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x7774 },
{ }
},
.chained = true,
@@ -6814,7 +6939,21 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC294_FIXUP_SPK2_TO_DAC1
},
-
+ [ALC294_FIXUP_ASUS_HPE] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ /* Set EAPD high */
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x0f },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x7774 },
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
+ },
+ [ALC285_FIXUP_HP_GPIO_LED] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc285_fixup_hp_gpio_led,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6868,17 +7007,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
- SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2),
SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
SND_PCI_QUIRK(0x1028, 0x0738, "Dell Precision 5820", ALC269_FIXUP_NO_SHUTUP),
- SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2),
SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
- SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
+ SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
SND_PCI_QUIRK(0x1028, 0x080c, "Dell WYSE", ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2),
SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
@@ -6887,8 +7023,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
- SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
SND_PCI_QUIRK(0x1028, 0x097d, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
+ SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
SND_PCI_QUIRK(0x1028, 0x098d, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x09bf, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -6896,35 +7032,18 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED),
- SND_PCI_QUIRK(0x103c, 0x225f, "HP", ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY),
- /* ALC282 */
SND_PCI_QUIRK(0x103c, 0x21f9, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2210, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2214, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x221b, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x103c, 0x2221, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x2236, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x2237, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x2238, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x2239, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x224b, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
- SND_PCI_QUIRK(0x103c, 0x2268, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x226a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x226b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x226e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x2271, "HP", ALC286_FIXUP_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC280_FIXUP_HP_DOCK_PINS),
- SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC280_FIXUP_HP_DOCK_PINS),
- SND_PCI_QUIRK(0x103c, 0x229e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x22b2, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x22bf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x22db, "HP", ALC280_FIXUP_HP_9480M),
- SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
- SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
- /* ALC290 */
- SND_PCI_QUIRK(0x103c, 0x221b, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
- SND_PCI_QUIRK(0x103c, 0x2221, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
- SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x2253, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x2254, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x2255, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
@@ -6932,36 +7051,53 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED),
+ SND_PCI_QUIRK(0x103c, 0x225f, "HP", ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY),
SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2265, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2268, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x226a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x226b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x226e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2271, "HP", ALC286_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC280_FIXUP_HP_DOCK_PINS),
SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC280_FIXUP_HP_DOCK_PINS),
SND_PCI_QUIRK(0x103c, 0x2278, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x227f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2282, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x228b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x228e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x229e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22b2, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22bf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22c4, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x22c5, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x22c7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x22c8, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x22c4, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22db, "HP", ALC280_FIXUP_HP_9480M),
+ SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x2334, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2335, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
SND_PCI_QUIRK(0x103c, 0x802e, "HP Z240 SFF", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x802f, "HP Z240", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
+ SND_PCI_QUIRK(0x103c, 0x827f, "HP x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -6970,6 +7106,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1043, 0x1271, "ASUS X430UN", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
@@ -6980,11 +7117,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE),
SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
@@ -6992,12 +7131,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101),
- SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2),
SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX),
+ SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT),
SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
@@ -7011,20 +7150,76 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x152d, 0x1082, "Quanta NL3", ALC269_FIXUP_LIFEBOOK),
+ SND_PCI_QUIRK(0x1558, 0x1323, "Clevo N130ZU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x1325, "System76 Darter Pro (darp5)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x1401, "Clevo L140[CZ]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x1403, "Clevo N140CU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x1404, "Clevo N150CU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x14a1, "Clevo L141MU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x4018, "Clevo NV40M[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x4019, "Clevo NV40MZ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x4020, "Clevo NV40MB", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x40a1, "Clevo NL40GU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x40c1, "Clevo NL40[CZ]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x40d1, "Clevo NL41DU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x50a3, "Clevo NJ51GU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x50b3, "Clevo NK50S[BEZ]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x50b6, "Clevo NK50S5", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x50b8, "Clevo NK50SZ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x50d5, "Clevo NP50D5", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x50f0, "Clevo NH50A[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x50f2, "Clevo NH50E[PR]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x50f3, "Clevo NH58DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x50f5, "Clevo NH55EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x50f6, "Clevo NH55DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x5101, "Clevo S510WU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x5157, "Clevo W517GU1", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x51a1, "Clevo NS50MU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x70a1, "Clevo NB70T[HJK]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x70b3, "Clevo NK70SB", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x70f2, "Clevo NH79EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x70f3, "Clevo NH77DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x70f4, "Clevo NH77EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x70f6, "Clevo NH77DPQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x8228, "Clevo NR40BU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x8520, "Clevo NH50D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x8521, "Clevo NH77D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x8535, "Clevo NH50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x8536, "Clevo NH79D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x8550, "System76 Gazelle (gaze14)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x8551, "System76 Gazelle (gaze14)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x8560, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1558, 0x8561, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1558, 0x8562, "Clevo NH[5|7][0-9]RZ[Q]", ALC269_FIXUP_DMIC),
+ SND_PCI_QUIRK(0x1558, 0x8668, "Clevo NP50B[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x8680, "Clevo NJ50LU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x8686, "Clevo NH50[CZ]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x8a20, "Clevo NH55DCQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x8a51, "Clevo NH70RCQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x8d50, "Clevo NH55RCQ-M", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x951d, "Clevo N950T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x9600, "Clevo N960K[PR]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x961d, "Clevo N960S[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x971d, "Clevo N970T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xa500, "Clevo NL53RU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xa600, "Clevo NL5XNU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xb018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xb019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xb022, "Clevo NH77D[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xc018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xc019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xc022, "Clevo NH77[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
+ SND_PCI_QUIRK(0x17aa, 0x1048, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
- SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK),
- SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK),
@@ -7062,9 +7257,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
@@ -7083,7 +7280,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
- SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MBXP", ALC256_FIXUP_HUAWEI_MBXP_PINS),
SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
@@ -7159,6 +7355,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC269_FIXUP_HEADSET_MODE, .name = "headset-mode"},
{.id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, .name = "headset-mode-no-hp-mic"},
{.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"},
+ {.id = ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST, .name = "lenovo-dock-limit-boost"},
{.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
{.id = ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, .name = "hp-dock-gpio-mic1-led"},
{.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
@@ -7170,6 +7367,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"},
{.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
{.id = ALC292_FIXUP_TPT460, .name = "tpt460"},
+ {.id = ALC298_FIXUP_TPT470_DOCK_FIX, .name = "tpt470-dock-fix"},
{.id = ALC298_FIXUP_TPT470_DOCK, .name = "tpt470-dock"},
{.id = ALC233_FIXUP_LENOVO_MULTI_CODECS, .name = "dual-codecs"},
{.id = ALC700_FIXUP_INTEL_REFERENCE, .name = "alc700-ref"},
@@ -7229,7 +7427,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc298-dell1"},
{.id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE, .name = "alc298-dell-aio"},
{.id = ALC275_FIXUP_DELL_XPS, .name = "alc275-dell-xps"},
- {.id = ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE, .name = "alc256-dell-xps13"},
{.id = ALC293_FIXUP_LENOVO_SPK_NOISE, .name = "lenovo-spk-noise"},
{.id = ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, .name = "lenovo-hotkey"},
{.id = ALC255_FIXUP_DELL_SPK_NOISE, .name = "dell-spk-noise"},
@@ -7624,6 +7821,9 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC292_STANDARD_PINS,
{0x13, 0x90a60140}),
+ SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_HPE,
+ {0x17, 0x90170110},
+ {0x21, 0x04211020}),
SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_MIC,
{0x14, 0x90170110},
{0x1b, 0x90a70130},
@@ -7675,6 +7875,12 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
ALC225_STANDARD_PINS,
{0x12, 0xb7a60130},
{0x17, 0x90170110}),
+ SND_HDA_PIN_QUIRK(0x10ec0623, 0x17aa, "Lenovo", ALC283_FIXUP_HEADSET_MIC,
+ {0x14, 0x01014010},
+ {0x17, 0x90170120},
+ {0x18, 0x02a11030},
+ {0x19, 0x02a1103f},
+ {0x21, 0x0221101f}),
{}
};
@@ -7820,7 +8026,9 @@ static int patch_alc269(struct hda_codec *codec)
spec->gen.mixer_nid = 0;
break;
case 0x10ec0215:
+ case 0x10ec0245:
case 0x10ec0285:
+ case 0x10ec0287:
case 0x10ec0289:
spec->codec_variant = ALC269_TYPE_ALC215;
spec->shutup = alc225_shutup;
@@ -7828,8 +8036,6 @@ static int patch_alc269(struct hda_codec *codec)
spec->gen.mixer_nid = 0;
break;
case 0x10ec0225:
- codec->power_save_node = 1;
- /* fall through */
case 0x10ec0295:
case 0x10ec0299:
spec->codec_variant = ALC269_TYPE_ALC225;
@@ -7987,8 +8193,7 @@ static const struct snd_pci_quirk alc861_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1393, "ASUS A6Rp", ALC861_FIXUP_ASUS_A6RP),
SND_PCI_QUIRK_VENDOR(0x1043, "ASUS laptop", ALC861_FIXUP_AMP_VREF_0F),
SND_PCI_QUIRK(0x1462, 0x7254, "HP DX2200", ALC861_FIXUP_NO_JACK_DETECT),
- SND_PCI_QUIRK(0x1584, 0x2b01, "Haier W18", ALC861_FIXUP_AMP_VREF_0F),
- SND_PCI_QUIRK(0x1584, 0x0000, "Uniwill ECS M31EI", ALC861_FIXUP_AMP_VREF_0F),
+ SND_PCI_QUIRK_VENDOR(0x1584, "Haier/Uniwill", ALC861_FIXUP_AMP_VREF_0F),
SND_PCI_QUIRK(0x1734, 0x10c7, "FSC Amilo Pi1505", ALC861_FIXUP_FSC_AMILO_PI1505),
{}
};
@@ -8941,6 +9146,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0236, "ALC236", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0245, "ALC245", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0257, "ALC257", patch_alc269),
@@ -8960,6 +9166,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
HDA_CODEC_ENTRY(0x10ec0284, "ALC284", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0285, "ALC285", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0286, "ALC286", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0287, "ALC287", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0288, "ALC288", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0289, "ALC289", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0290, "ALC290", patch_alc269),
@@ -9001,6 +9208,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
HDA_CODEC_ENTRY(0x10ec0888, "ALC888", patch_alc882),
HDA_CODEC_ENTRY(0x10ec0889, "ALC889", patch_alc882),
HDA_CODEC_ENTRY(0x10ec0892, "ALC892", patch_alc662),
+ HDA_CODEC_ENTRY(0x10ec0897, "ALC897", patch_alc662),
HDA_CODEC_ENTRY(0x10ec0899, "ALC898", patch_alc882),
HDA_CODEC_ENTRY(0x10ec0900, "ALC1150", patch_alc882),
HDA_CODEC_ENTRY(0x10ec0b00, "ALCS1200A", patch_alc882),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index d8168aa2cef3..85c33f528d7b 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -845,7 +845,7 @@ static int stac_auto_create_beep_ctls(struct hda_codec *codec,
static struct snd_kcontrol_new beep_vol_ctl =
HDA_CODEC_VOLUME(NULL, 0, 0, 0);
- /* check for mute support for the the amp */
+ /* check for mute support for the amp */
if ((caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT) {
const struct snd_kcontrol_new *temp;
if (spec->anabeep_nid == nid)
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index 6b9617aee0e6..0046ea78abd2 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -126,6 +126,7 @@ static struct via_spec *via_new_spec(struct hda_codec *codec)
spec->codec_type = VT1708S;
spec->gen.indep_hp = 1;
spec->gen.keep_eapd_on = 1;
+ spec->gen.dac_min_mute = 1;
spec->gen.pcm_playback_hook = via_playback_pcm_hook;
spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_AUTO;
codec->power_save_node = 1;
@@ -1015,6 +1016,7 @@ static const struct hda_verb vt1802_init_verbs[] = {
enum {
VIA_FIXUP_INTMIC_BOOST,
VIA_FIXUP_ASUS_G75,
+ VIA_FIXUP_POWER_SAVE,
};
static void via_fixup_intmic_boost(struct hda_codec *codec,
@@ -1024,6 +1026,13 @@ static void via_fixup_intmic_boost(struct hda_codec *codec,
override_mic_boost(codec, 0x30, 0, 2, 40);
}
+static void via_fixup_power_save(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ if (action == HDA_FIXUP_ACT_PRE_PROBE)
+ codec->power_save_node = 0;
+}
+
static const struct hda_fixup via_fixups[] = {
[VIA_FIXUP_INTMIC_BOOST] = {
.type = HDA_FIXUP_FUNC,
@@ -1038,11 +1047,16 @@ static const struct hda_fixup via_fixups[] = {
{ }
}
},
+ [VIA_FIXUP_POWER_SAVE] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = via_fixup_power_save,
+ },
};
static const struct snd_pci_quirk vt2002p_fixups[] = {
SND_PCI_QUIRK(0x1043, 0x1487, "Asus G75", VIA_FIXUP_ASUS_G75),
SND_PCI_QUIRK(0x1043, 0x8532, "Asus X202E", VIA_FIXUP_INTMIC_BOOST),
+ SND_PCI_QUIRK_VENDOR(0x1558, "Clevo", VIA_FIXUP_POWER_SAVE),
{}
};
diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c
index f1fe497c2f9d..7702b62dc18b 100644
--- a/sound/pci/ice1712/ice1712.c
+++ b/sound/pci/ice1712/ice1712.c
@@ -2377,7 +2377,8 @@ static int snd_ice1712_chip_init(struct snd_ice1712 *ice)
pci_write_config_byte(ice->pci, 0x61, ice->eeprom.data[ICE_EEP1_ACLINK]);
pci_write_config_byte(ice->pci, 0x62, ice->eeprom.data[ICE_EEP1_I2SID]);
pci_write_config_byte(ice->pci, 0x63, ice->eeprom.data[ICE_EEP1_SPDIF]);
- if (ice->eeprom.subvendor != ICE1712_SUBDEVICE_STDSP24) {
+ if (ice->eeprom.subvendor != ICE1712_SUBDEVICE_STDSP24 &&
+ ice->eeprom.subvendor != ICE1712_SUBDEVICE_STAUDIO_ADCIII) {
ice->gpio.write_mask = ice->eeprom.gpiomask;
ice->gpio.direction = ice->eeprom.gpiodir;
snd_ice1712_write(ice, ICE1712_IREG_GPIO_WRITE_MASK,
diff --git a/sound/pci/ice1712/prodigy192.c b/sound/pci/ice1712/prodigy192.c
index 3919aed39ca0..5e52086d7b98 100644
--- a/sound/pci/ice1712/prodigy192.c
+++ b/sound/pci/ice1712/prodigy192.c
@@ -31,7 +31,7 @@
* Experimentally I found out that only a combination of
* OCKS0=1, OCKS1=1 (128fs, 64fs output) and ice1724 -
* VT1724_MT_I2S_MCLK_128X=0 (256fs input) yields correct
- * sampling rate. That means the the FPGA doubles the
+ * sampling rate. That means that the FPGA doubles the
* MCK01 rate.
*
* Copyright (c) 2003 Takashi Iwai <tiwai@suse.de>
diff --git a/sound/pci/ice1712/prodigy_hifi.c b/sound/pci/ice1712/prodigy_hifi.c
index c97b5528e4b8..317bbb725b29 100644
--- a/sound/pci/ice1712/prodigy_hifi.c
+++ b/sound/pci/ice1712/prodigy_hifi.c
@@ -550,7 +550,7 @@ static int wm_adc_mux_enum_get(struct snd_kcontrol *kcontrol,
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
mutex_lock(&ice->gpio_mutex);
- ucontrol->value.integer.value[0] = wm_get(ice, WM_ADC_MUX) & 0x1f;
+ ucontrol->value.enumerated.item[0] = wm_get(ice, WM_ADC_MUX) & 0x1f;
mutex_unlock(&ice->gpio_mutex);
return 0;
}
@@ -564,7 +564,7 @@ static int wm_adc_mux_enum_put(struct snd_kcontrol *kcontrol,
mutex_lock(&ice->gpio_mutex);
oval = wm_get(ice, WM_ADC_MUX);
- nval = (oval & 0xe0) | ucontrol->value.integer.value[0];
+ nval = (oval & 0xe0) | ucontrol->value.enumerated.item[0];
if (nval != oval) {
wm_put(ice, WM_ADC_MUX, nval);
change = 1;
diff --git a/sound/pci/lx6464es/lx6464es.c b/sound/pci/lx6464es/lx6464es.c
index 54f6252faca6..daf25655f635 100644
--- a/sound/pci/lx6464es/lx6464es.c
+++ b/sound/pci/lx6464es/lx6464es.c
@@ -65,6 +65,14 @@ static const struct pci_device_id snd_lx6464es_ids[] = {
PCI_VENDOR_ID_DIGIGRAM,
PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_CAE_SERIAL_SUBSYSTEM),
}, /* LX6464ES-CAE */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_LX6464ES,
+ PCI_VENDOR_ID_DIGIGRAM,
+ PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ESE_SERIAL_SUBSYSTEM),
+ }, /* LX6464ESe */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_LX6464ES,
+ PCI_VENDOR_ID_DIGIGRAM,
+ PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ESE_CAE_SERIAL_SUBSYSTEM),
+ }, /* LX6464ESe-CAE */
{ 0, },
};
diff --git a/sound/pci/mixart/mixart_core.c b/sound/pci/mixart/mixart_core.c
index 71776bfe0485..5a956f8bfa9e 100644
--- a/sound/pci/mixart/mixart_core.c
+++ b/sound/pci/mixart/mixart_core.c
@@ -83,7 +83,6 @@ static int get_msg(struct mixart_mgr *mgr, struct mixart_msg *resp,
unsigned int i;
#endif
- mutex_lock(&mgr->msg_lock);
err = 0;
/* copy message descriptor from miXart to driver */
@@ -132,8 +131,6 @@ static int get_msg(struct mixart_mgr *mgr, struct mixart_msg *resp,
writel_be(headptr, MIXART_MEM(mgr, MSG_OUTBOUND_FREE_HEAD));
_clean_exit:
- mutex_unlock(&mgr->msg_lock);
-
return err;
}
@@ -271,7 +268,9 @@ int snd_mixart_send_msg(struct mixart_mgr *mgr, struct mixart_msg *request, int
resp.data = resp_data;
resp.size = max_resp_size;
+ mutex_lock(&mgr->msg_lock);
err = get_msg(mgr, &resp, msg_frame);
+ mutex_unlock(&mgr->msg_lock);
if( request->message_id != resp.message_id )
dev_err(&mgr->pci->dev, "RESPONSE ERROR!\n");
diff --git a/sound/pci/oxygen/xonar_dg.c b/sound/pci/oxygen/xonar_dg.c
index 4cf3200e988b..df44135e1b0c 100644
--- a/sound/pci/oxygen/xonar_dg.c
+++ b/sound/pci/oxygen/xonar_dg.c
@@ -39,7 +39,7 @@
* GPIO 4 <- headphone detect
* GPIO 5 -> enable ADC analog circuit for the left channel
* GPIO 6 -> enable ADC analog circuit for the right channel
- * GPIO 7 -> switch green rear output jack between CS4245 and and the first
+ * GPIO 7 -> switch green rear output jack between CS4245 and the first
* channel of CS4361 (mechanical relay)
* GPIO 8 -> enable output to speakers
*
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index ba99ff0e93e0..a0797fc17d95 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -5343,7 +5343,8 @@ static int snd_hdsp_free(struct hdsp *hdsp)
if (hdsp->port)
pci_release_regions(hdsp->pci);
- pci_disable_device(hdsp->pci);
+ if (pci_is_enabled(hdsp->pci))
+ pci_disable_device(hdsp->pci);
return 0;
}
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 11b5b5e0e058..5dfddade1bae 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -6913,7 +6913,8 @@ static int snd_hdspm_free(struct hdspm * hdspm)
if (hdspm->port)
pci_release_regions(hdspm->pci);
- pci_disable_device(hdspm->pci);
+ if (pci_is_enabled(hdspm->pci))
+ pci_disable_device(hdspm->pci);
return 0;
}
diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
index edd765e22377..f82fa5be7d33 100644
--- a/sound/pci/rme9652/rme9652.c
+++ b/sound/pci/rme9652/rme9652.c
@@ -1761,7 +1761,8 @@ static int snd_rme9652_free(struct snd_rme9652 *rme9652)
if (rme9652->port)
pci_release_regions(rme9652->pci);
- pci_disable_device(rme9652->pci);
+ if (pci_is_enabled(rme9652->pci))
+ pci_disable_device(rme9652->pci);
return 0;
}
diff --git a/sound/soc/codecs/ak4458.c b/sound/soc/codecs/ak4458.c
index 3bd57c02e6fd..0b35b84abb61 100644
--- a/sound/soc/codecs/ak4458.c
+++ b/sound/soc/codecs/ak4458.c
@@ -642,6 +642,7 @@ static const struct of_device_id ak4458_of_match[] = {
{ .compatible = "asahi-kasei,ak4458", },
{ },
};
+MODULE_DEVICE_TABLE(of, ak4458_of_match);
static struct i2c_driver ak4458_i2c_driver = {
.driver = {
diff --git a/sound/soc/codecs/ak5558.c b/sound/soc/codecs/ak5558.c
index 448bb90c9c8e..dda165b14222 100644
--- a/sound/soc/codecs/ak5558.c
+++ b/sound/soc/codecs/ak5558.c
@@ -271,7 +271,7 @@ static void ak5558_power_off(struct ak5558_priv *ak5558)
if (!ak5558->reset_gpiod)
return;
- gpiod_set_value_cansleep(ak5558->reset_gpiod, 0);
+ gpiod_set_value_cansleep(ak5558->reset_gpiod, 1);
usleep_range(1000, 2000);
}
@@ -280,7 +280,7 @@ static void ak5558_power_on(struct ak5558_priv *ak5558)
if (!ak5558->reset_gpiod)
return;
- gpiod_set_value_cansleep(ak5558->reset_gpiod, 1);
+ gpiod_set_value_cansleep(ak5558->reset_gpiod, 0);
usleep_range(1000, 2000);
}
@@ -396,6 +396,7 @@ static const struct of_device_id ak5558_i2c_dt_ids[] = {
{ .compatible = "asahi-kasei,ak5558"},
{ }
};
+MODULE_DEVICE_TABLE(of, ak5558_i2c_dt_ids);
static struct i2c_driver ak5558_i2c_driver = {
.driver = {
diff --git a/sound/soc/codecs/cpcap.c b/sound/soc/codecs/cpcap.c
index d7f05b384f1f..1902689c5ea2 100644
--- a/sound/soc/codecs/cpcap.c
+++ b/sound/soc/codecs/cpcap.c
@@ -1263,12 +1263,12 @@ static int cpcap_voice_hw_params(struct snd_pcm_substream *substream,
if (direction == SNDRV_PCM_STREAM_CAPTURE) {
mask = 0x0000;
- mask |= CPCAP_BIT_MIC1_RX_TIMESLOT0;
- mask |= CPCAP_BIT_MIC1_RX_TIMESLOT1;
- mask |= CPCAP_BIT_MIC1_RX_TIMESLOT2;
- mask |= CPCAP_BIT_MIC2_TIMESLOT0;
- mask |= CPCAP_BIT_MIC2_TIMESLOT1;
- mask |= CPCAP_BIT_MIC2_TIMESLOT2;
+ mask |= BIT(CPCAP_BIT_MIC1_RX_TIMESLOT0);
+ mask |= BIT(CPCAP_BIT_MIC1_RX_TIMESLOT1);
+ mask |= BIT(CPCAP_BIT_MIC1_RX_TIMESLOT2);
+ mask |= BIT(CPCAP_BIT_MIC2_TIMESLOT0);
+ mask |= BIT(CPCAP_BIT_MIC2_TIMESLOT1);
+ mask |= BIT(CPCAP_BIT_MIC2_TIMESLOT2);
val = 0x0000;
if (channels >= 2)
val = BIT(CPCAP_BIT_MIC1_RX_TIMESLOT0);
diff --git a/sound/soc/codecs/cs35l33.c b/sound/soc/codecs/cs35l33.c
index 668cd3754209..73fa784646e5 100644
--- a/sound/soc/codecs/cs35l33.c
+++ b/sound/soc/codecs/cs35l33.c
@@ -1204,6 +1204,7 @@ static int cs35l33_i2c_probe(struct i2c_client *i2c_client,
dev_err(&i2c_client->dev,
"CS35L33 Device ID (%X). Expected ID %X\n",
devid, CS35L33_CHIP_ID);
+ ret = -EINVAL;
goto err_enable;
}
diff --git a/sound/soc/codecs/cs42l42.c b/sound/soc/codecs/cs42l42.c
index 651329bf9743..fddfd227a9c0 100644
--- a/sound/soc/codecs/cs42l42.c
+++ b/sound/soc/codecs/cs42l42.c
@@ -405,7 +405,7 @@ static const struct regmap_config cs42l42_regmap = {
};
static DECLARE_TLV_DB_SCALE(adc_tlv, -9600, 100, false);
-static DECLARE_TLV_DB_SCALE(mixer_tlv, -6200, 100, false);
+static DECLARE_TLV_DB_SCALE(mixer_tlv, -6300, 100, true);
static const char * const cs42l42_hpf_freq_text[] = {
"1.86Hz", "120Hz", "235Hz", "466Hz"
@@ -462,7 +462,7 @@ static const struct snd_kcontrol_new cs42l42_snd_controls[] = {
CS42L42_DAC_HPF_EN_SHIFT, true, false),
SOC_DOUBLE_R_TLV("Mixer Volume", CS42L42_MIXER_CHA_VOL,
CS42L42_MIXER_CHB_VOL, CS42L42_MIXER_CH_VOL_SHIFT,
- 0x3e, 1, mixer_tlv)
+ 0x3f, 1, mixer_tlv)
};
static int cs42l42_hpdrv_evt(struct snd_soc_dapm_widget *w,
@@ -695,24 +695,6 @@ static int cs42l42_pll_config(struct snd_soc_component *component)
CS42L42_CLK_OASRC_SEL_MASK,
CS42L42_CLK_OASRC_SEL_12 <<
CS42L42_CLK_OASRC_SEL_SHIFT);
- /* channel 1 on low LRCLK, 32 bit */
- snd_soc_component_update_bits(component,
- CS42L42_ASP_RX_DAI0_CH1_AP_RES,
- CS42L42_ASP_RX_CH_AP_MASK |
- CS42L42_ASP_RX_CH_RES_MASK,
- (CS42L42_ASP_RX_CH_AP_LOW <<
- CS42L42_ASP_RX_CH_AP_SHIFT) |
- (CS42L42_ASP_RX_CH_RES_32 <<
- CS42L42_ASP_RX_CH_RES_SHIFT));
- /* Channel 2 on high LRCLK, 32 bit */
- snd_soc_component_update_bits(component,
- CS42L42_ASP_RX_DAI0_CH2_AP_RES,
- CS42L42_ASP_RX_CH_AP_MASK |
- CS42L42_ASP_RX_CH_RES_MASK,
- (CS42L42_ASP_RX_CH_AP_HI <<
- CS42L42_ASP_RX_CH_AP_SHIFT) |
- (CS42L42_ASP_RX_CH_RES_32 <<
- CS42L42_ASP_RX_CH_RES_SHIFT));
if (pll_ratio_table[i].mclk_src_sel == 0) {
/* Pass the clock straight through */
snd_soc_component_update_bits(component,
@@ -801,27 +783,23 @@ static int cs42l42_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
/* Bitclock/frame inversion */
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
+ asp_cfg_val |= CS42L42_ASP_SCPOL_NOR << CS42L42_ASP_SCPOL_SHIFT;
break;
case SND_SOC_DAIFMT_NB_IF:
- asp_cfg_val |= CS42L42_ASP_POL_INV <<
- CS42L42_ASP_LCPOL_IN_SHIFT;
+ asp_cfg_val |= CS42L42_ASP_SCPOL_NOR << CS42L42_ASP_SCPOL_SHIFT;
+ asp_cfg_val |= CS42L42_ASP_LCPOL_INV << CS42L42_ASP_LCPOL_SHIFT;
break;
case SND_SOC_DAIFMT_IB_NF:
- asp_cfg_val |= CS42L42_ASP_POL_INV <<
- CS42L42_ASP_SCPOL_IN_DAC_SHIFT;
break;
case SND_SOC_DAIFMT_IB_IF:
- asp_cfg_val |= CS42L42_ASP_POL_INV <<
- CS42L42_ASP_LCPOL_IN_SHIFT;
- asp_cfg_val |= CS42L42_ASP_POL_INV <<
- CS42L42_ASP_SCPOL_IN_DAC_SHIFT;
+ asp_cfg_val |= CS42L42_ASP_LCPOL_INV << CS42L42_ASP_LCPOL_SHIFT;
break;
}
- snd_soc_component_update_bits(component, CS42L42_ASP_CLK_CFG,
- CS42L42_ASP_MODE_MASK |
- CS42L42_ASP_SCPOL_IN_DAC_MASK |
- CS42L42_ASP_LCPOL_IN_MASK, asp_cfg_val);
+ snd_soc_component_update_bits(component, CS42L42_ASP_CLK_CFG, CS42L42_ASP_MODE_MASK |
+ CS42L42_ASP_SCPOL_MASK |
+ CS42L42_ASP_LCPOL_MASK,
+ asp_cfg_val);
return 0;
}
@@ -832,14 +810,29 @@ static int cs42l42_pcm_hw_params(struct snd_pcm_substream *substream,
{
struct snd_soc_component *component = dai->component;
struct cs42l42_private *cs42l42 = snd_soc_component_get_drvdata(component);
- int retval;
+ unsigned int width = (params_width(params) / 8) - 1;
+ unsigned int val = 0;
cs42l42->srate = params_rate(params);
- cs42l42->swidth = params_width(params);
- retval = cs42l42_pll_config(component);
+ switch(substream->stream) {
+ case SNDRV_PCM_STREAM_PLAYBACK:
+ val |= width << CS42L42_ASP_RX_CH_RES_SHIFT;
+ /* channel 1 on low LRCLK */
+ snd_soc_component_update_bits(component, CS42L42_ASP_RX_DAI0_CH1_AP_RES,
+ CS42L42_ASP_RX_CH_AP_MASK |
+ CS42L42_ASP_RX_CH_RES_MASK, val);
+ /* Channel 2 on high LRCLK */
+ val |= CS42L42_ASP_RX_CH_AP_HI << CS42L42_ASP_RX_CH_AP_SHIFT;
+ snd_soc_component_update_bits(component, CS42L42_ASP_RX_DAI0_CH2_AP_RES,
+ CS42L42_ASP_RX_CH_AP_MASK |
+ CS42L42_ASP_RX_CH_RES_MASK, val);
+ break;
+ default:
+ break;
+ }
- return retval;
+ return cs42l42_pll_config(component);
}
static int cs42l42_set_sysclk(struct snd_soc_dai *dai,
@@ -904,9 +897,9 @@ static int cs42l42_digital_mute(struct snd_soc_dai *dai, int mute)
return 0;
}
-#define CS42L42_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S18_3LE | \
- SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE | \
- SNDRV_PCM_FMTBIT_S32_LE)
+#define CS42L42_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S24_LE |\
+ SNDRV_PCM_FMTBIT_S32_LE )
static const struct snd_soc_dai_ops cs42l42_ops = {
@@ -1807,7 +1800,7 @@ static int cs42l42_i2c_probe(struct i2c_client *i2c_client,
dev_dbg(&i2c_client->dev, "Found reset GPIO\n");
gpiod_set_value_cansleep(cs42l42->reset_gpio, 1);
}
- mdelay(3);
+ usleep_range(CS42L42_BOOT_TIME_US, CS42L42_BOOT_TIME_US * 2);
/* Request IRQ */
ret = devm_request_threaded_irq(&i2c_client->dev,
@@ -1932,6 +1925,7 @@ static int cs42l42_runtime_resume(struct device *dev)
}
gpiod_set_value_cansleep(cs42l42->reset_gpio, 1);
+ usleep_range(CS42L42_BOOT_TIME_US, CS42L42_BOOT_TIME_US * 2);
regcache_cache_only(cs42l42->regmap, false);
regcache_sync(cs42l42->regmap);
diff --git a/sound/soc/codecs/cs42l42.h b/sound/soc/codecs/cs42l42.h
index 09b0a93203ef..bcaf4f22408d 100644
--- a/sound/soc/codecs/cs42l42.h
+++ b/sound/soc/codecs/cs42l42.h
@@ -262,11 +262,12 @@
#define CS42L42_ASP_SLAVE_MODE 0x00
#define CS42L42_ASP_MODE_SHIFT 4
#define CS42L42_ASP_MODE_MASK (1 << CS42L42_ASP_MODE_SHIFT)
-#define CS42L42_ASP_SCPOL_IN_DAC_SHIFT 2
-#define CS42L42_ASP_SCPOL_IN_DAC_MASK (1 << CS42L42_ASP_SCPOL_IN_DAC_SHIFT)
-#define CS42L42_ASP_LCPOL_IN_SHIFT 0
-#define CS42L42_ASP_LCPOL_IN_MASK (1 << CS42L42_ASP_LCPOL_IN_SHIFT)
-#define CS42L42_ASP_POL_INV 1
+#define CS42L42_ASP_SCPOL_SHIFT 2
+#define CS42L42_ASP_SCPOL_MASK (3 << CS42L42_ASP_SCPOL_SHIFT)
+#define CS42L42_ASP_SCPOL_NOR 3
+#define CS42L42_ASP_LCPOL_SHIFT 0
+#define CS42L42_ASP_LCPOL_MASK (3 << CS42L42_ASP_LCPOL_SHIFT)
+#define CS42L42_ASP_LCPOL_INV 3
#define CS42L42_ASP_FRM_CFG (CS42L42_PAGE_12 + 0x08)
#define CS42L42_ASP_STP_SHIFT 4
@@ -743,6 +744,7 @@
#define CS42L42_FRAC2_VAL(val) (((val) & 0xff0000) >> 16)
#define CS42L42_NUM_SUPPLIES 5
+#define CS42L42_BOOT_TIME_US 3000
static const char *const cs42l42_supply_names[CS42L42_NUM_SUPPLIES] = {
"VA",
@@ -760,7 +762,6 @@ struct cs42l42_private {
struct completion pdn_done;
u32 sclk;
u32 srate;
- u32 swidth;
u8 plug_state;
u8 hs_type;
u8 ts_inv;
diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c
index a5c8736fad77..04f89b751304 100644
--- a/sound/soc/codecs/cs42l56.c
+++ b/sound/soc/codecs/cs42l56.c
@@ -1260,6 +1260,7 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client,
dev_err(&i2c_client->dev,
"CS42L56 Device ID (%X). Expected %X\n",
devid, CS42L56_DEVID);
+ ret = -EINVAL;
goto err_enable;
}
alpha_rev = reg & CS42L56_AREV_MASK;
@@ -1317,7 +1318,7 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client,
ret = devm_snd_soc_register_component(&i2c_client->dev,
&soc_component_dev_cs42l56, &cs42l56_dai, 1);
if (ret < 0)
- return ret;
+ goto err_enable;
return 0;
diff --git a/sound/soc/codecs/cs43130.c b/sound/soc/codecs/cs43130.c
index 80dc42197154..cf29dec28b5e 100644
--- a/sound/soc/codecs/cs43130.c
+++ b/sound/soc/codecs/cs43130.c
@@ -1738,6 +1738,14 @@ static DEVICE_ATTR(hpload_dc_r, 0444, cs43130_show_dc_r, NULL);
static DEVICE_ATTR(hpload_ac_l, 0444, cs43130_show_ac_l, NULL);
static DEVICE_ATTR(hpload_ac_r, 0444, cs43130_show_ac_r, NULL);
+static struct attribute *hpload_attrs[] = {
+ &dev_attr_hpload_dc_l.attr,
+ &dev_attr_hpload_dc_r.attr,
+ &dev_attr_hpload_ac_l.attr,
+ &dev_attr_hpload_ac_r.attr,
+};
+ATTRIBUTE_GROUPS(hpload);
+
static struct reg_sequence hp_en_cal_seq[] = {
{CS43130_INT_MASK_4, CS43130_INT_MASK_ALL},
{CS43130_HP_MEAS_LOAD_1, 0},
@@ -2305,23 +2313,15 @@ static int cs43130_probe(struct snd_soc_component *component)
cs43130->hpload_done = false;
if (cs43130->dc_meas) {
- ret = device_create_file(component->dev, &dev_attr_hpload_dc_l);
- if (ret < 0)
- return ret;
-
- ret = device_create_file(component->dev, &dev_attr_hpload_dc_r);
- if (ret < 0)
- return ret;
-
- ret = device_create_file(component->dev, &dev_attr_hpload_ac_l);
- if (ret < 0)
- return ret;
-
- ret = device_create_file(component->dev, &dev_attr_hpload_ac_r);
- if (ret < 0)
+ ret = sysfs_create_groups(&component->dev->kobj, hpload_groups);
+ if (ret)
return ret;
cs43130->wq = create_singlethread_workqueue("cs43130_hp");
+ if (!cs43130->wq) {
+ sysfs_remove_groups(&component->dev->kobj, hpload_groups);
+ return -ENOMEM;
+ }
INIT_WORK(&cs43130->work, cs43130_imp_meas);
}
diff --git a/sound/soc/codecs/es8316.c b/sound/soc/codecs/es8316.c
index 9ebe77c3784a..57130edaf3ab 100644
--- a/sound/soc/codecs/es8316.c
+++ b/sound/soc/codecs/es8316.c
@@ -56,13 +56,8 @@ static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(adc_pga_gain_tlv,
1, 1, TLV_DB_SCALE_ITEM(0, 0, 0),
2, 2, TLV_DB_SCALE_ITEM(250, 0, 0),
3, 3, TLV_DB_SCALE_ITEM(450, 0, 0),
- 4, 4, TLV_DB_SCALE_ITEM(700, 0, 0),
- 5, 5, TLV_DB_SCALE_ITEM(1000, 0, 0),
- 6, 6, TLV_DB_SCALE_ITEM(1300, 0, 0),
- 7, 7, TLV_DB_SCALE_ITEM(1600, 0, 0),
- 8, 8, TLV_DB_SCALE_ITEM(1800, 0, 0),
- 9, 9, TLV_DB_SCALE_ITEM(2100, 0, 0),
- 10, 10, TLV_DB_SCALE_ITEM(2400, 0, 0),
+ 4, 7, TLV_DB_SCALE_ITEM(700, 300, 0),
+ 8, 10, TLV_DB_SCALE_ITEM(1800, 300, 0),
);
static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(hpout_vol_tlv,
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index be2473166bfa..4594b1447900 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -148,14 +148,14 @@ static struct hdac_hdmi_pcm *
hdac_hdmi_get_pcm_from_cvt(struct hdac_hdmi_priv *hdmi,
struct hdac_hdmi_cvt *cvt)
{
- struct hdac_hdmi_pcm *pcm = NULL;
+ struct hdac_hdmi_pcm *pcm;
list_for_each_entry(pcm, &hdmi->pcm_list, head) {
if (pcm->cvt == cvt)
- break;
+ return pcm;
}
- return pcm;
+ return NULL;
}
static void hdac_hdmi_jack_report(struct hdac_hdmi_pcm *pcm,
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index 89b6e187ac23..a5b0c40ee545 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -2130,10 +2130,16 @@ static void max98090_pll_work(struct max98090_priv *max98090)
dev_info_ratelimited(component->dev, "PLL unlocked\n");
+ /*
+ * As the datasheet suggested, the maximum PLL lock time should be
+ * 7 msec. The workaround resets the codec softly by toggling SHDN
+ * off and on if PLL failed to lock for 10 msec. Notably, there is
+ * no suggested hold time for SHDN off.
+ */
+
/* Toggle shutdown OFF then ON */
snd_soc_component_update_bits(component, M98090_REG_DEVICE_SHUTDOWN,
M98090_SHDNN_MASK, 0);
- msleep(10);
snd_soc_component_update_bits(component, M98090_REG_DEVICE_SHUTDOWN,
M98090_SHDNN_MASK, M98090_SHDNN_MASK);
diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c
index cbdb6d4bb91e..f4aba065c925 100644
--- a/sound/soc/codecs/msm8916-wcd-analog.c
+++ b/sound/soc/codecs/msm8916-wcd-analog.c
@@ -16,8 +16,8 @@
#define CDC_D_REVISION1 (0xf000)
#define CDC_D_PERPH_SUBTYPE (0xf005)
-#define CDC_D_INT_EN_SET (0x015)
-#define CDC_D_INT_EN_CLR (0x016)
+#define CDC_D_INT_EN_SET (0xf015)
+#define CDC_D_INT_EN_CLR (0xf016)
#define MBHC_SWITCH_INT BIT(7)
#define MBHC_MIC_ELECTRICAL_INS_REM_DET BIT(6)
#define MBHC_BUTTON_PRESS_DET BIT(5)
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index 0b0f748bffbe..c29c6cf41ece 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -174,6 +174,9 @@ static bool rt286_readable_register(struct device *dev, unsigned int reg)
case RT286_PROC_COEF:
case RT286_SET_AMP_GAIN_ADC_IN1:
case RT286_SET_AMP_GAIN_ADC_IN2:
+ case RT286_SET_GPIO_MASK:
+ case RT286_SET_GPIO_DIRECTION:
+ case RT286_SET_GPIO_DATA:
case RT286_SET_POWER(RT286_DAC_OUT1):
case RT286_SET_POWER(RT286_DAC_OUT2):
case RT286_SET_POWER(RT286_ADC_IN1):
@@ -1118,12 +1121,11 @@ static const struct dmi_system_id force_combo_jack_table[] = {
{ }
};
-static const struct dmi_system_id dmi_dell_dino[] = {
+static const struct dmi_system_id dmi_dell[] = {
{
- .ident = "Dell Dino",
+ .ident = "Dell",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343")
}
},
{ }
@@ -1134,7 +1136,7 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
{
struct rt286_platform_data *pdata = dev_get_platdata(&i2c->dev);
struct rt286_priv *rt286;
- int i, ret, val;
+ int i, ret, vendor_id;
rt286 = devm_kzalloc(&i2c->dev, sizeof(*rt286),
GFP_KERNEL);
@@ -1150,14 +1152,15 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
}
ret = regmap_read(rt286->regmap,
- RT286_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &val);
+ RT286_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &vendor_id);
if (ret != 0) {
dev_err(&i2c->dev, "I2C error %d\n", ret);
return ret;
}
- if (val != RT286_VENDOR_ID && val != RT288_VENDOR_ID) {
+ if (vendor_id != RT286_VENDOR_ID && vendor_id != RT288_VENDOR_ID) {
dev_err(&i2c->dev,
- "Device with ID register %#x is not rt286\n", val);
+ "Device with ID register %#x is not rt286\n",
+ vendor_id);
return -ENODEV;
}
@@ -1181,8 +1184,8 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
if (pdata)
rt286->pdata = *pdata;
- if (dmi_check_system(force_combo_jack_table) ||
- dmi_check_system(dmi_dell_dino))
+ if ((vendor_id == RT288_VENDOR_ID && dmi_check_system(dmi_dell)) ||
+ dmi_check_system(force_combo_jack_table))
rt286->pdata.cbj_en = true;
regmap_write(rt286->regmap, RT286_SET_AUDIO_POWER, AC_PWRST_D3);
@@ -1221,7 +1224,7 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
regmap_update_bits(rt286->regmap, RT286_DEPOP_CTRL3, 0xf777, 0x4737);
regmap_update_bits(rt286->regmap, RT286_DEPOP_CTRL4, 0x00ff, 0x003f);
- if (dmi_check_system(dmi_dell_dino)) {
+ if (vendor_id == RT288_VENDOR_ID && dmi_check_system(dmi_dell)) {
regmap_update_bits(rt286->regmap,
RT286_SET_GPIO_MASK, 0x40, 0x40);
regmap_update_bits(rt286->regmap,
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index 974e1a449172..63e19a6a9790 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -342,9 +342,9 @@ static bool rt5640_readable_register(struct device *dev, unsigned int reg)
}
static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -4650, 150, 0);
-static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0);
+static const DECLARE_TLV_DB_MINMAX(dac_vol_tlv, -6562, 0);
static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0);
-static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0);
+static const DECLARE_TLV_DB_MINMAX(adc_vol_tlv, -1762, 3000);
static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
/* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 7e3b47eeea04..9185bd7c5a6d 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -3656,6 +3656,12 @@ static const struct rt5645_platform_data asus_t100ha_platform_data = {
.inv_jd1_1 = true,
};
+static const struct rt5645_platform_data asus_t101ha_platform_data = {
+ .dmic1_data_pin = RT5645_DMIC_DATA_IN2N,
+ .dmic2_data_pin = RT5645_DMIC2_DISABLE,
+ .jd_mode = 3,
+};
+
static const struct rt5645_platform_data lenovo_ideapad_miix_310_pdata = {
.jd_mode = 3,
.in2_diff = true,
@@ -3734,6 +3740,14 @@ static const struct dmi_system_id dmi_platform_data[] = {
.driver_data = (void *)&asus_t100ha_platform_data,
},
{
+ .ident = "ASUS T101HA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T101HA"),
+ },
+ .driver_data = (void *)&asus_t101ha_platform_data,
+ },
+ {
.ident = "MINIX Z83-4",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MINIX"),
diff --git a/sound/soc/codecs/rt5651.c b/sound/soc/codecs/rt5651.c
index 985852fd9723..318a4c9b380f 100644
--- a/sound/soc/codecs/rt5651.c
+++ b/sound/soc/codecs/rt5651.c
@@ -288,9 +288,9 @@ static bool rt5651_readable_register(struct device *dev, unsigned int reg)
}
static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -4650, 150, 0);
-static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0);
+static const DECLARE_TLV_DB_MINMAX(dac_vol_tlv, -6562, 0);
static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0);
-static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0);
+static const DECLARE_TLV_DB_MINMAX(adc_vol_tlv, -1762, 3000);
static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
/* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
diff --git a/sound/soc/codecs/rt5659.c b/sound/soc/codecs/rt5659.c
index 1c1a521c73cb..b331b3ba61a9 100644
--- a/sound/soc/codecs/rt5659.c
+++ b/sound/soc/codecs/rt5659.c
@@ -3466,12 +3466,17 @@ static int rt5659_set_component_sysclk(struct snd_soc_component *component, int
{
struct rt5659_priv *rt5659 = snd_soc_component_get_drvdata(component);
unsigned int reg_val = 0;
+ int ret;
if (freq == rt5659->sysclk && clk_id == rt5659->sysclk_src)
return 0;
switch (clk_id) {
case RT5659_SCLK_S_MCLK:
+ ret = clk_set_rate(rt5659->mclk, freq);
+ if (ret)
+ return ret;
+
reg_val |= RT5659_SCLK_SRC_MCLK;
break;
case RT5659_SCLK_S_PLL1:
diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
index 732ef928b25d..6a2a58e107e3 100644
--- a/sound/soc/codecs/rt5670.c
+++ b/sound/soc/codecs/rt5670.c
@@ -34,18 +34,19 @@
#include "rt5670.h"
#include "rt5670-dsp.h"
-#define RT5670_DEV_GPIO BIT(0)
-#define RT5670_IN2_DIFF BIT(1)
-#define RT5670_DMIC_EN BIT(2)
-#define RT5670_DMIC1_IN2P BIT(3)
-#define RT5670_DMIC1_GPIO6 BIT(4)
-#define RT5670_DMIC1_GPIO7 BIT(5)
-#define RT5670_DMIC2_INR BIT(6)
-#define RT5670_DMIC2_GPIO8 BIT(7)
-#define RT5670_DMIC3_GPIO5 BIT(8)
-#define RT5670_JD_MODE1 BIT(9)
-#define RT5670_JD_MODE2 BIT(10)
-#define RT5670_JD_MODE3 BIT(11)
+#define RT5670_DEV_GPIO BIT(0)
+#define RT5670_IN2_DIFF BIT(1)
+#define RT5670_DMIC_EN BIT(2)
+#define RT5670_DMIC1_IN2P BIT(3)
+#define RT5670_DMIC1_GPIO6 BIT(4)
+#define RT5670_DMIC1_GPIO7 BIT(5)
+#define RT5670_DMIC2_INR BIT(6)
+#define RT5670_DMIC2_GPIO8 BIT(7)
+#define RT5670_DMIC3_GPIO5 BIT(8)
+#define RT5670_JD_MODE1 BIT(9)
+#define RT5670_JD_MODE2 BIT(10)
+#define RT5670_JD_MODE3 BIT(11)
+#define RT5670_GPIO1_IS_EXT_SPK_EN BIT(12)
static unsigned long rt5670_quirk;
static unsigned int quirk_override;
@@ -1504,6 +1505,33 @@ static int rt5670_hp_event(struct snd_soc_dapm_widget *w,
return 0;
}
+static int rt5670_spk_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+ struct rt5670_priv *rt5670 = snd_soc_component_get_drvdata(component);
+
+ if (!rt5670->pdata.gpio1_is_ext_spk_en)
+ return 0;
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL2,
+ RT5670_GP1_OUT_MASK, RT5670_GP1_OUT_HI);
+ break;
+
+ case SND_SOC_DAPM_PRE_PMD:
+ regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL2,
+ RT5670_GP1_OUT_MASK, RT5670_GP1_OUT_LO);
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
static int rt5670_bst1_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -1917,7 +1945,9 @@ static const struct snd_soc_dapm_widget rt5670_specific_dapm_widgets[] = {
};
static const struct snd_soc_dapm_widget rt5672_specific_dapm_widgets[] = {
- SND_SOC_DAPM_PGA("SPO Amp", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA_E("SPO Amp", SND_SOC_NOPM, 0, 0, NULL, 0,
+ rt5670_spk_event, SND_SOC_DAPM_PRE_PMD |
+ SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_OUTPUT("SPOLP"),
SND_SOC_DAPM_OUTPUT("SPOLN"),
SND_SOC_DAPM_OUTPUT("SPORP"),
@@ -2901,14 +2931,14 @@ static const struct dmi_system_id dmi_platform_intel_quirks[] = {
},
{
.callback = rt5670_quirk_cb,
- .ident = "Lenovo Thinkpad Tablet 10",
+ .ident = "Lenovo Miix 2 10",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Miix 2 10"),
},
.driver_data = (unsigned long *)(RT5670_DMIC_EN |
RT5670_DMIC1_IN2P |
- RT5670_DEV_GPIO |
+ RT5670_GPIO1_IS_EXT_SPK_EN |
RT5670_JD_MODE2),
},
{
@@ -2956,6 +2986,10 @@ static int rt5670_i2c_probe(struct i2c_client *i2c,
rt5670->pdata.dev_gpio = true;
dev_info(&i2c->dev, "quirk dev_gpio\n");
}
+ if (rt5670_quirk & RT5670_GPIO1_IS_EXT_SPK_EN) {
+ rt5670->pdata.gpio1_is_ext_spk_en = true;
+ dev_info(&i2c->dev, "quirk GPIO1 is external speaker enable\n");
+ }
if (rt5670_quirk & RT5670_IN2_DIFF) {
rt5670->pdata.in2_diff = true;
dev_info(&i2c->dev, "quirk IN2_DIFF\n");
@@ -3055,6 +3089,13 @@ static int rt5670_i2c_probe(struct i2c_client *i2c,
RT5670_GP1_PF_MASK, RT5670_GP1_PF_OUT);
}
+ if (rt5670->pdata.gpio1_is_ext_spk_en) {
+ regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL1,
+ RT5670_GP1_PIN_MASK, RT5670_GP1_PIN_GPIO1);
+ regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL2,
+ RT5670_GP1_PF_MASK, RT5670_GP1_PF_OUT);
+ }
+
if (rt5670->pdata.jd_mode) {
regmap_update_bits(rt5670->regmap, RT5670_GLB_CLK,
RT5670_SCLK_SRC_MASK, RT5670_SCLK_SRC_RCCLK);
diff --git a/sound/soc/codecs/rt5670.h b/sound/soc/codecs/rt5670.h
index 97e8eebe63fa..563edd60fd04 100644
--- a/sound/soc/codecs/rt5670.h
+++ b/sound/soc/codecs/rt5670.h
@@ -760,7 +760,7 @@
#define RT5670_PWR_VREF2_BIT 4
#define RT5670_PWR_FV2 (0x1 << 3)
#define RT5670_PWR_FV2_BIT 3
-#define RT5670_LDO_SEL_MASK (0x3)
+#define RT5670_LDO_SEL_MASK (0x7)
#define RT5670_LDO_SEL_SFT 0
/* Power Management for Analog 2 (0x64) */
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 896412d11a31..17255e9683f5 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -71,7 +71,7 @@ static const struct reg_default sgtl5000_reg_defaults[] = {
{ SGTL5000_DAP_EQ_BASS_BAND4, 0x002f },
{ SGTL5000_DAP_MAIN_CHAN, 0x8000 },
{ SGTL5000_DAP_MIX_CHAN, 0x0000 },
- { SGTL5000_DAP_AVC_CTRL, 0x0510 },
+ { SGTL5000_DAP_AVC_CTRL, 0x5100 },
{ SGTL5000_DAP_AVC_THRESHOLD, 0x1473 },
{ SGTL5000_DAP_AVC_ATTACK, 0x0028 },
{ SGTL5000_DAP_AVC_DECAY, 0x0050 },
@@ -1633,6 +1633,40 @@ static int sgtl5000_i2c_probe(struct i2c_client *client,
dev_err(&client->dev,
"Error %d initializing CHIP_CLK_CTRL\n", ret);
+ /* Mute everything to avoid pop from the following power-up */
+ ret = regmap_write(sgtl5000->regmap, SGTL5000_CHIP_ANA_CTRL,
+ SGTL5000_CHIP_ANA_CTRL_DEFAULT);
+ if (ret) {
+ dev_err(&client->dev,
+ "Error %d muting outputs via CHIP_ANA_CTRL\n", ret);
+ goto disable_clk;
+ }
+
+ /*
+ * If VAG is powered-on (e.g. from previous boot), it would be disabled
+ * by the write to ANA_POWER in later steps of the probe code. This
+ * may create a loud pop even with all outputs muted. The proper way
+ * to circumvent this is disabling the bit first and waiting the proper
+ * cool-down time.
+ */
+ ret = regmap_read(sgtl5000->regmap, SGTL5000_CHIP_ANA_POWER, &value);
+ if (ret) {
+ dev_err(&client->dev, "Failed to read ANA_POWER: %d\n", ret);
+ goto disable_clk;
+ }
+ if (value & SGTL5000_VAG_POWERUP) {
+ ret = regmap_update_bits(sgtl5000->regmap,
+ SGTL5000_CHIP_ANA_POWER,
+ SGTL5000_VAG_POWERUP,
+ 0);
+ if (ret) {
+ dev_err(&client->dev, "Error %d disabling VAG\n", ret);
+ goto disable_clk;
+ }
+
+ msleep(SGTL5000_VAG_POWERDOWN_DELAY);
+ }
+
/* Follow section 2.2.1.1 of AN3663 */
ana_pwr = SGTL5000_ANA_POWER_DEFAULT;
if (sgtl5000->num_supplies <= VDDD) {
diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h
index 18cae08bbd3a..066517e352a7 100644
--- a/sound/soc/codecs/sgtl5000.h
+++ b/sound/soc/codecs/sgtl5000.h
@@ -233,6 +233,7 @@
/*
* SGTL5000_CHIP_ANA_CTRL
*/
+#define SGTL5000_CHIP_ANA_CTRL_DEFAULT 0x0133
#define SGTL5000_LINE_OUT_MUTE 0x0100
#define SGTL5000_HP_SEL_MASK 0x0040
#define SGTL5000_HP_SEL_SHIFT 6
diff --git a/sound/soc/codecs/tas571x.c b/sound/soc/codecs/tas571x.c
index ca2dfe12344e..354f17a2fa5c 100644
--- a/sound/soc/codecs/tas571x.c
+++ b/sound/soc/codecs/tas571x.c
@@ -824,8 +824,10 @@ static int tas571x_i2c_probe(struct i2c_client *client,
priv->regmap = devm_regmap_init(dev, NULL, client,
priv->chip->regmap_config);
- if (IS_ERR(priv->regmap))
- return PTR_ERR(priv->regmap);
+ if (IS_ERR(priv->regmap)) {
+ ret = PTR_ERR(priv->regmap);
+ goto disable_regs;
+ }
priv->pdn_gpio = devm_gpiod_get_optional(dev, "pdn", GPIOD_OUT_LOW);
if (IS_ERR(priv->pdn_gpio)) {
@@ -849,7 +851,7 @@ static int tas571x_i2c_probe(struct i2c_client *client,
ret = regmap_write(priv->regmap, TAS571X_OSC_TRIM_REG, 0);
if (ret)
- return ret;
+ goto disable_regs;
usleep_range(50000, 60000);
@@ -865,12 +867,20 @@ static int tas571x_i2c_probe(struct i2c_client *client,
*/
ret = regmap_update_bits(priv->regmap, TAS571X_MVOL_REG, 1, 0);
if (ret)
- return ret;
+ goto disable_regs;
}
- return devm_snd_soc_register_component(&client->dev,
+ ret = devm_snd_soc_register_component(&client->dev,
&priv->component_driver,
&tas571x_dai, 1);
+ if (ret)
+ goto disable_regs;
+
+ return ret;
+
+disable_regs:
+ regulator_bulk_disable(priv->chip->num_supply_names, priv->supplies);
+ return ret;
}
static int tas571x_i2c_remove(struct i2c_client *client)
diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
index 108e8bf42a34..f0a409504a13 100644
--- a/sound/soc/codecs/wm8958-dsp2.c
+++ b/sound/soc/codecs/wm8958-dsp2.c
@@ -419,8 +419,12 @@ int wm8958_aif_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+ struct wm8994 *control = dev_get_drvdata(component->dev->parent);
int i;
+ if (control->type != WM8958)
+ return 0;
+
switch (event) {
case SND_SOC_DAPM_POST_PMU:
case SND_SOC_DAPM_PRE_PMU:
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index 8dc1f3d6a988..88e869d16714 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -710,7 +710,13 @@ int wm8960_configure_pll(struct snd_soc_component *component, int freq_in,
best_freq_out = -EINVAL;
*sysclk_idx = *dac_idx = *bclk_idx = -1;
- for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) {
+ /*
+ * From Datasheet, the PLL performs best when f2 is between
+ * 90MHz and 100MHz, the desired sysclk output is 11.2896MHz
+ * or 12.288MHz, then sysclkdiv = 2 is the best choice.
+ * So search sysclk_divs from 2 to 1 other than from 1 to 2.
+ */
+ for (i = ARRAY_SIZE(sysclk_divs) - 1; i >= 0; --i) {
if (sysclk_divs[i] == -1)
continue;
for (j = 0; j < ARRAY_SIZE(dac_divs); ++j) {
@@ -863,8 +869,7 @@ static int wm8960_hw_params(struct snd_pcm_substream *substream,
wm8960->is_stream_in_use[tx] = true;
- if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_ON &&
- !wm8960->is_stream_in_use[!tx])
+ if (!wm8960->is_stream_in_use[!tx])
return wm8960_configure_clocking(component);
return 0;
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 01acb8da2f48..e3e069277a3f 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -3376,6 +3376,8 @@ int wm8994_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *
return -EINVAL;
}
+ pm_runtime_get_sync(component->dev);
+
switch (micbias) {
case 1:
micdet = &wm8994->micdet[0];
@@ -3423,6 +3425,8 @@ int wm8994_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *
snd_soc_dapm_sync(dapm);
+ pm_runtime_put(component->dev);
+
return 0;
}
EXPORT_SYMBOL_GPL(wm8994_mic_detect);
@@ -3790,6 +3794,8 @@ int wm8958_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *
return -EINVAL;
}
+ pm_runtime_get_sync(component->dev);
+
if (jack) {
snd_soc_dapm_force_enable_pin(dapm, "CLK_SYS");
snd_soc_dapm_sync(dapm);
@@ -3858,6 +3864,8 @@ int wm8958_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *
snd_soc_dapm_sync(dapm);
}
+ pm_runtime_put(component->dev);
+
return 0;
}
EXPORT_SYMBOL_GPL(wm8958_mic_detect);
@@ -4051,11 +4059,13 @@ static int wm8994_component_probe(struct snd_soc_component *component)
wm8994->hubs.dcs_readback_mode = 2;
break;
}
+ wm8994->hubs.micd_scthr = true;
break;
case WM8958:
wm8994->hubs.dcs_readback_mode = 1;
wm8994->hubs.hp_startup_mode = 1;
+ wm8994->hubs.micd_scthr = true;
switch (control->revision) {
case 0:
diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c
index df5b36b8fc5a..bb6a95be8726 100644
--- a/sound/soc/codecs/wm8997.c
+++ b/sound/soc/codecs/wm8997.c
@@ -1180,6 +1180,8 @@ static int wm8997_probe(struct platform_device *pdev)
goto err_spk_irqs;
}
+ return ret;
+
err_spk_irqs:
arizona_free_spk_irqs(arizona);
diff --git a/sound/soc/codecs/wm8998.c b/sound/soc/codecs/wm8998.c
index 61294c787f27..17dc5780ab68 100644
--- a/sound/soc/codecs/wm8998.c
+++ b/sound/soc/codecs/wm8998.c
@@ -1378,7 +1378,7 @@ static int wm8998_probe(struct platform_device *pdev)
ret = arizona_init_spk_irqs(arizona);
if (ret < 0)
- return ret;
+ goto err_pm_disable;
ret = devm_snd_soc_register_component(&pdev->dev,
&soc_component_dev_wm8998,
@@ -1393,6 +1393,8 @@ static int wm8998_probe(struct platform_device *pdev)
err_spk_irqs:
arizona_free_spk_irqs(arizona);
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
return ret;
}
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index b114fc7b2a95..02c557e1f779 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -1379,7 +1379,7 @@ static int wm_adsp_create_control(struct wm_adsp *dsp,
ctl_work = kzalloc(sizeof(*ctl_work), GFP_KERNEL);
if (!ctl_work) {
ret = -ENOMEM;
- goto err_ctl_cache;
+ goto err_list_del;
}
ctl_work->dsp = dsp;
@@ -1389,7 +1389,8 @@ static int wm_adsp_create_control(struct wm_adsp *dsp,
return 0;
-err_ctl_cache:
+err_list_del:
+ list_del(&ctl->list);
kfree(ctl->cache);
err_ctl_name:
kfree(ctl->name);
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index fed6ea9b019f..da7fa6f5459e 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -1227,6 +1227,9 @@ int wm_hubs_handle_analogue_pdata(struct snd_soc_component *component,
snd_soc_component_update_bits(component, WM8993_ADDITIONAL_CONTROL,
WM8993_LINEOUT2_FB, WM8993_LINEOUT2_FB);
+ if (!hubs->micd_scthr)
+ return 0;
+
snd_soc_component_update_bits(component, WM8993_MICBIAS,
WM8993_JD_SCTHR_MASK | WM8993_JD_THR_MASK |
WM8993_MICB1_LVL | WM8993_MICB2_LVL,
diff --git a/sound/soc/codecs/wm_hubs.h b/sound/soc/codecs/wm_hubs.h
index ee339ad8514d..1433d73e09bf 100644
--- a/sound/soc/codecs/wm_hubs.h
+++ b/sound/soc/codecs/wm_hubs.h
@@ -31,6 +31,7 @@ struct wm_hubs_data {
int hp_startup_mode;
int series_startup;
int no_series_update;
+ bool micd_scthr;
bool no_cache_dac_hp_direct;
struct list_head dcs_cache;
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 14ab16e1369f..1203de8aab99 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -1758,8 +1758,10 @@ static int davinci_mcasp_get_dma_type(struct davinci_mcasp *mcasp)
PTR_ERR(chan));
return PTR_ERR(chan);
}
- if (WARN_ON(!chan->device || !chan->device->dev))
+ if (WARN_ON(!chan->device || !chan->device->dev)) {
+ dma_release_channel(chan);
return -EINVAL;
+ }
if (chan->device->dev->of_node)
ret = of_property_read_string(chan->device->dev->of_node,
diff --git a/sound/soc/fsl/fsl_asrc_dma.c b/sound/soc/fsl/fsl_asrc_dma.c
index 1033ac6631b0..b9ac448989ed 100644
--- a/sound/soc/fsl/fsl_asrc_dma.c
+++ b/sound/soc/fsl/fsl_asrc_dma.c
@@ -241,6 +241,7 @@ static int fsl_asrc_dma_hw_params(struct snd_pcm_substream *substream,
ret = dmaengine_slave_config(pair->dma_chan[dir], &config_be);
if (ret) {
dev_err(dev, "failed to config DMA channel for Back-End\n");
+ dma_release_channel(pair->dma_chan[dir]);
return ret;
}
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index ff96db91f818..baa76337c33f 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -497,11 +497,13 @@ static int fsl_esai_startup(struct snd_pcm_substream *substream,
ESAI_SAICR_SYNC, esai_priv->synchronous ?
ESAI_SAICR_SYNC : 0);
- /* Set a default slot number -- 2 */
+ /* Set slots count */
regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR,
- ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(2));
+ ESAI_xCCR_xDC_MASK,
+ ESAI_xCCR_xDC(esai_priv->slots));
regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR,
- ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(2));
+ ESAI_xCCR_xDC_MASK,
+ ESAI_xCCR_xDC(esai_priv->slots));
}
return 0;
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index d83be26d6446..d6a1573b457a 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -678,8 +678,9 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
struct regmap *regs = ssi->regs;
u32 pm = 999, div2, psr, stccr, mask, afreq, factor, i;
unsigned long clkrate, baudrate, tmprate;
- unsigned int slots = params_channels(hw_params);
- unsigned int slot_width = 32;
+ unsigned int channels = params_channels(hw_params);
+ unsigned int slot_width = params_width(hw_params);
+ unsigned int slots = 2;
u64 sub, savesub = 100000;
unsigned int freq;
bool baudclk_is_used;
@@ -688,10 +689,14 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
/* Override slots and slot_width if being specifically set... */
if (ssi->slots)
slots = ssi->slots;
- /* ...but keep 32 bits if slots is 2 -- I2S Master mode */
- if (ssi->slot_width && slots != 2)
+ if (ssi->slot_width)
slot_width = ssi->slot_width;
+ /* ...but force 32 bits for stereo audio using I2S Master Mode */
+ if (channels == 2 &&
+ (ssi->i2s_net & SSI_SCR_I2S_MODE_MASK) == SSI_SCR_I2S_MODE_MASTER)
+ slot_width = 32;
+
/* Generate bit clock based on the slot number and slot width */
freq = slots * slot_width * params_rate(hw_params);
@@ -868,6 +873,7 @@ static int fsl_ssi_hw_free(struct snd_pcm_substream *substream,
static int _fsl_ssi_set_dai_fmt(struct fsl_ssi *ssi, unsigned int fmt)
{
u32 strcr = 0, scr = 0, stcr, srcr, mask;
+ unsigned int slots;
ssi->dai_fmt = fmt;
@@ -899,10 +905,11 @@ static int _fsl_ssi_set_dai_fmt(struct fsl_ssi *ssi, unsigned int fmt)
return -EINVAL;
}
+ slots = ssi->slots ? : 2;
regmap_update_bits(ssi->regs, REG_SSI_STCCR,
- SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(2));
+ SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(slots));
regmap_update_bits(ssi->regs, REG_SSI_SRCCR,
- SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(2));
+ SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(slots));
/* Data on rising edge of bclk, frame low, 1clk before data */
strcr |= SSI_STCR_TFSI | SSI_STCR_TSCKP | SSI_STCR_TEFS;
diff --git a/sound/soc/img/img-i2s-in.c b/sound/soc/img/img-i2s-in.c
index 388cefd7340a..7e48c740bf55 100644
--- a/sound/soc/img/img-i2s-in.c
+++ b/sound/soc/img/img-i2s-in.c
@@ -346,8 +346,10 @@ static int img_i2s_in_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
chan_control_mask = IMG_I2S_IN_CH_CTL_CLK_TRANS_MASK;
ret = pm_runtime_get_sync(i2s->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(i2s->dev);
return ret;
+ }
for (i = 0; i < i2s->active_channels; i++)
img_i2s_in_ch_disable(i2s, i);
@@ -485,6 +487,7 @@ static int img_i2s_in_probe(struct platform_device *pdev)
if (IS_ERR(rst)) {
if (PTR_ERR(rst) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
+ pm_runtime_put(&pdev->dev);
goto err_suspend;
}
diff --git a/sound/soc/img/img-i2s-out.c b/sound/soc/img/img-i2s-out.c
index fc2d1dac6333..798ab579564c 100644
--- a/sound/soc/img/img-i2s-out.c
+++ b/sound/soc/img/img-i2s-out.c
@@ -350,8 +350,10 @@ static int img_i2s_out_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
chan_control_mask = IMG_I2S_OUT_CHAN_CTL_CLKT_MASK;
ret = pm_runtime_get_sync(i2s->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(i2s->dev);
return ret;
+ }
img_i2s_out_disable(i2s);
@@ -491,8 +493,10 @@ static int img_i2s_out_probe(struct platform_device *pdev)
goto err_pm_disable;
}
ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
goto err_suspend;
+ }
reg = IMG_I2S_OUT_CTL_FRM_SIZE_MASK;
img_i2s_out_writel(i2s, reg, IMG_I2S_OUT_CTL);
diff --git a/sound/soc/img/img-parallel-out.c b/sound/soc/img/img-parallel-out.c
index acc005217be0..f56752662b19 100644
--- a/sound/soc/img/img-parallel-out.c
+++ b/sound/soc/img/img-parallel-out.c
@@ -166,8 +166,10 @@ static int img_prl_out_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
}
ret = pm_runtime_get_sync(prl->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(prl->dev);
return ret;
+ }
reg = img_prl_out_readl(prl, IMG_PRL_OUT_CTL);
reg = (reg & ~IMG_PRL_OUT_CTL_EDGE_MASK) | control_set;
diff --git a/sound/soc/intel/atom/sst-atom-controls.c b/sound/soc/intel/atom/sst-atom-controls.c
index 3672d36b4b66..a1d7f93a0805 100644
--- a/sound/soc/intel/atom/sst-atom-controls.c
+++ b/sound/soc/intel/atom/sst-atom-controls.c
@@ -974,7 +974,9 @@ static int sst_set_be_modules(struct snd_soc_dapm_widget *w,
dev_dbg(c->dev, "Enter: widget=%s\n", w->name);
if (SND_SOC_DAPM_EVENT_ON(event)) {
+ mutex_lock(&drv->lock);
ret = sst_send_slot_map(drv);
+ mutex_unlock(&drv->lock);
if (ret)
return ret;
ret = sst_send_pipe_module_params(w, k);
@@ -1341,7 +1343,7 @@ int sst_send_pipe_gains(struct snd_soc_dai *dai, int stream, int mute)
dai->capture_widget->name);
w = dai->capture_widget;
snd_soc_dapm_widget_for_each_source_path(w, p) {
- if (p->connected && !p->connected(w, p->sink))
+ if (p->connected && !p->connected(w, p->source))
continue;
if (p->connect && p->source->power &&
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
index 6868e71e3a3f..be773101d876 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
@@ -339,7 +339,7 @@ static int sst_media_open(struct snd_pcm_substream *substream,
ret_val = power_up_sst(stream);
if (ret_val < 0)
- return ret_val;
+ goto out_power_up;
/* Make sure, that the period size is always even */
snd_pcm_hw_constraint_step(substream->runtime, 0,
@@ -348,8 +348,9 @@ static int sst_media_open(struct snd_pcm_substream *substream,
return snd_pcm_hw_constraint_integer(runtime,
SNDRV_PCM_HW_PARAM_PERIODS);
out_ops:
- kfree(stream);
mutex_unlock(&sst_lock);
+out_power_up:
+ kfree(stream);
return ret_val;
}
@@ -507,14 +508,14 @@ static struct snd_soc_dai_driver sst_platform_dai[] = {
.channels_min = SST_STEREO,
.channels_max = SST_STEREO,
.rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
},
.capture = {
.stream_name = "Headset Capture",
.channels_min = 1,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
},
},
{
@@ -525,7 +526,7 @@ static struct snd_soc_dai_driver sst_platform_dai[] = {
.channels_min = SST_STEREO,
.channels_max = SST_STEREO,
.rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
},
},
{
diff --git a/sound/soc/intel/atom/sst/sst_pci.c b/sound/soc/intel/atom/sst/sst_pci.c
index 6906ee624cf6..438c7bcd8c4c 100644
--- a/sound/soc/intel/atom/sst/sst_pci.c
+++ b/sound/soc/intel/atom/sst/sst_pci.c
@@ -107,7 +107,7 @@ static int sst_platform_get_resources(struct intel_sst_drv *ctx)
dev_dbg(ctx->dev, "DRAM Ptr %p\n", ctx->dram);
do_release_regions:
pci_release_regions(pci);
- return 0;
+ return ret;
}
/*
diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c
index 27308337ab12..ba76e37a4b09 100644
--- a/sound/soc/intel/boards/bxt_rt298.c
+++ b/sound/soc/intel/boards/bxt_rt298.c
@@ -544,6 +544,7 @@ static int bxt_card_late_probe(struct snd_soc_card *card)
/* broxton audio machine driver for SPT + RT298S */
static struct snd_soc_card broxton_rt298 = {
.name = "broxton-rt298",
+ .owner = THIS_MODULE,
.dai_link = broxton_rt298_dais,
.num_links = ARRAY_SIZE(broxton_rt298_dais),
.controls = broxton_controls,
@@ -559,6 +560,7 @@ static struct snd_soc_card broxton_rt298 = {
static struct snd_soc_card geminilake_rt298 = {
.name = "geminilake-rt298",
+ .owner = THIS_MODULE,
.dai_link = broxton_rt298_dais,
.num_links = ARRAY_SIZE(broxton_rt298_dais),
.controls = broxton_controls,
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index e58240e18b30..4ebc023f1507 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -409,6 +409,19 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
BYT_RT5640_SSP0_AIF1 |
BYT_RT5640_MCLK_EN),
},
+ { /* Acer One 10 S1002 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "One S1002"),
+ },
+ .driver_data = (void *)(BYT_RT5640_IN1_MAP |
+ BYT_RT5640_JD_SRC_JD2_IN4N |
+ BYT_RT5640_OVCD_TH_2000UA |
+ BYT_RT5640_OVCD_SF_0P75 |
+ BYT_RT5640_DIFF_MIC |
+ BYT_RT5640_SSP0_AIF2 |
+ BYT_RT5640_MCLK_EN),
+ },
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
@@ -433,6 +446,18 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
},
{
.matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ARCHOS"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ARCHOS 140 CESIUM"),
+ },
+ .driver_data = (void *)(BYT_RT5640_IN1_MAP |
+ BYT_RT5640_JD_SRC_JD2_IN4N |
+ BYT_RT5640_OVCD_TH_2000UA |
+ BYT_RT5640_OVCD_SF_0P75 |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
+ {
+ .matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"),
},
@@ -448,6 +473,9 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TAF"),
},
.driver_data = (void *)(BYT_RT5640_IN1_MAP |
+ BYT_RT5640_JD_SRC_JD2_IN4N |
+ BYT_RT5640_OVCD_TH_2000UA |
+ BYT_RT5640_OVCD_SF_0P75 |
BYT_RT5640_MONO_SPEAKER |
BYT_RT5640_DIFF_MIC |
BYT_RT5640_SSP0_AIF2 |
@@ -482,6 +510,23 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
BYT_RT5640_MCLK_EN),
},
{
+ /* Chuwi Hi8 (CWI509) */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_BOARD_NAME, "BYT-PA03C"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ilife"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "S806"),
+ },
+ .driver_data = (void *)(BYT_RT5640_IN1_MAP |
+ BYT_RT5640_JD_SRC_JD2_IN4N |
+ BYT_RT5640_OVCD_TH_2000UA |
+ BYT_RT5640_OVCD_SF_0P75 |
+ BYT_RT5640_MONO_SPEAKER |
+ BYT_RT5640_DIFF_MIC |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
+ {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Circuitco"),
DMI_MATCH(DMI_PRODUCT_NAME, "Minnowboard Max B3 PLATFORM"),
@@ -510,6 +555,16 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
BYT_RT5640_MONO_SPEAKER |
BYT_RT5640_MCLK_EN),
},
+ { /* Estar Beauty HD MID 7316R */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Estar"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "eSTAR BEAUTY HD Intel Quad core"),
+ },
+ .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
+ BYT_RT5640_MONO_SPEAKER |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
{
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
@@ -588,6 +643,27 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
BYT_RT5640_SSP0_AIF1 |
BYT_RT5640_MCLK_EN),
},
+ { /* MPMAN Converter 9, similar hw as the I.T.Works TW891 2-in-1 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MPMAN"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Converter9"),
+ },
+ .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
+ BYT_RT5640_MONO_SPEAKER |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
+ {
+ /* MPMAN MPWIN895CL */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MPMAN"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MPWIN8900CL"),
+ },
+ .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
+ BYT_RT5640_MONO_SPEAKER |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
{ /* MSI S100 tablet */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."),
@@ -701,6 +777,44 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
BYT_RT5640_SSP0_AIF1 |
BYT_RT5640_MCLK_EN),
},
+ { /* Toshiba Encore WT8-A */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "TOSHIBA WT8-A"),
+ },
+ .driver_data = (void *)(BYT_RT5640_DMIC1_MAP |
+ BYT_RT5640_JD_SRC_JD2_IN4N |
+ BYT_RT5640_OVCD_TH_2000UA |
+ BYT_RT5640_OVCD_SF_0P75 |
+ BYT_RT5640_JD_NOT_INV |
+ BYT_RT5640_MCLK_EN),
+ },
+ { /* Toshiba Encore WT10-A */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "TOSHIBA WT10-A-103"),
+ },
+ .driver_data = (void *)(BYT_RT5640_DMIC1_MAP |
+ BYT_RT5640_JD_SRC_JD1_IN4P |
+ BYT_RT5640_OVCD_TH_2000UA |
+ BYT_RT5640_OVCD_SF_0P75 |
+ BYT_RT5640_SSP0_AIF2 |
+ BYT_RT5640_MCLK_EN),
+ },
+ { /* Voyo Winpad A15 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ /* Above strings are too generic, also match on BIOS date */
+ DMI_MATCH(DMI_BIOS_DATE, "11/20/2014"),
+ },
+ .driver_data = (void *)(BYT_RT5640_IN1_MAP |
+ BYT_RT5640_JD_SRC_JD2_IN4N |
+ BYT_RT5640_OVCD_TH_2000UA |
+ BYT_RT5640_OVCD_SF_0P75 |
+ BYT_RT5640_DIFF_MIC |
+ BYT_RT5640_MCLK_EN),
+ },
{ /* Catch-all for generic Insyde tablets, must be last */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
diff --git a/sound/soc/intel/boards/haswell.c b/sound/soc/intel/boards/haswell.c
index a4022983a7ce..67eb4a446c3c 100644
--- a/sound/soc/intel/boards/haswell.c
+++ b/sound/soc/intel/boards/haswell.c
@@ -198,6 +198,7 @@ static struct platform_driver haswell_audio = {
.probe = haswell_audio_probe,
.driver = {
.name = "haswell-audio",
+ .pm = &snd_soc_pm_ops,
},
};
diff --git a/sound/soc/intel/skylake/cnl-sst.c b/sound/soc/intel/skylake/cnl-sst.c
index 245df1067ba8..7b429c0fb4e5 100644
--- a/sound/soc/intel/skylake/cnl-sst.c
+++ b/sound/soc/intel/skylake/cnl-sst.c
@@ -212,6 +212,7 @@ static int cnl_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id)
"dsp boot timeout, status=%#x error=%#x\n",
sst_dsp_shim_read(ctx, CNL_ADSP_FW_STATUS),
sst_dsp_shim_read(ctx, CNL_ADSP_ERROR_CODE));
+ ret = -ETIMEDOUT;
goto err;
}
} else {
diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c
index e099c0505b76..2c6b0ac97c68 100644
--- a/sound/soc/jz4740/jz4740-i2s.c
+++ b/sound/soc/jz4740/jz4740-i2s.c
@@ -318,10 +318,14 @@ static int jz4740_i2s_set_sysclk(struct snd_soc_dai *dai, int clk_id,
switch (clk_id) {
case JZ4740_I2S_CLKSRC_EXT:
parent = clk_get(NULL, "ext");
+ if (IS_ERR(parent))
+ return PTR_ERR(parent);
clk_set_parent(i2s->clk_i2s, parent);
break;
case JZ4740_I2S_CLKSRC_PLL:
parent = clk_get(NULL, "pll half");
+ if (IS_ERR(parent))
+ return PTR_ERR(parent);
clk_set_parent(i2s->clk_i2s, parent);
ret = clk_set_rate(i2s->clk_i2s, freq);
break;
diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c
index c6a58520d377..255cc45905b8 100644
--- a/sound/soc/kirkwood/kirkwood-dma.c
+++ b/sound/soc/kirkwood/kirkwood-dma.c
@@ -136,7 +136,7 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream)
err = request_irq(priv->irq, kirkwood_dma_irq, IRQF_SHARED,
"kirkwood-i2s", priv);
if (err)
- return -EBUSY;
+ return err;
/*
* Enable Error interrupts. We're only ack'ing them but
diff --git a/sound/soc/meson/Kconfig b/sound/soc/meson/Kconfig
index 8af8bc358a90..19fd4d583b86 100644
--- a/sound/soc/meson/Kconfig
+++ b/sound/soc/meson/Kconfig
@@ -1,5 +1,5 @@
menu "ASoC support for Amlogic platforms"
- depends on ARCH_MESON || COMPILE_TEST
+ depends on ARCH_MESON || (COMPILE_TEST && COMMON_CLK)
config SND_MESON_AXG_FIFO
tristate
diff --git a/sound/soc/meson/axg-fifo.c b/sound/soc/meson/axg-fifo.c
index 0e4f65e654c4..b229c182b7c3 100644
--- a/sound/soc/meson/axg-fifo.c
+++ b/sound/soc/meson/axg-fifo.c
@@ -209,7 +209,7 @@ static int axg_fifo_pcm_open(struct snd_pcm_substream *ss)
/* Enable pclk to access registers and clock the fifo ip */
ret = clk_prepare_enable(fifo->pclk);
if (ret)
- return ret;
+ goto free_irq;
/* Setup status2 so it reports the memory pointer */
regmap_update_bits(fifo->map, FIFO_CTRL1,
@@ -229,8 +229,14 @@ static int axg_fifo_pcm_open(struct snd_pcm_substream *ss)
/* Take memory arbitror out of reset */
ret = reset_control_deassert(fifo->arb);
if (ret)
- clk_disable_unprepare(fifo->pclk);
+ goto free_clk;
+
+ return 0;
+free_clk:
+ clk_disable_unprepare(fifo->pclk);
+free_irq:
+ free_irq(fifo->irq, ss);
return ret;
}
diff --git a/sound/soc/meson/axg-tdm-interface.c b/sound/soc/meson/axg-tdm-interface.c
index 7b8baf46d968..01cc551a8e3f 100644
--- a/sound/soc/meson/axg-tdm-interface.c
+++ b/sound/soc/meson/axg-tdm-interface.c
@@ -111,18 +111,25 @@ static int axg_tdm_iface_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
struct axg_tdm_iface *iface = snd_soc_dai_get_drvdata(dai);
- /* These modes are not supported */
- if (fmt & (SND_SOC_DAIFMT_CBS_CFM | SND_SOC_DAIFMT_CBM_CFS)) {
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ if (!iface->mclk) {
+ dev_err(dai->dev, "cpu clock master: mclk missing\n");
+ return -ENODEV;
+ }
+ break;
+
+ case SND_SOC_DAIFMT_CBM_CFM:
+ break;
+
+ case SND_SOC_DAIFMT_CBS_CFM:
+ case SND_SOC_DAIFMT_CBM_CFS:
dev_err(dai->dev, "only CBS_CFS and CBM_CFM are supported\n");
+ /* Fall-through */
+ default:
return -EINVAL;
}
- /* If the TDM interface is the clock master, it requires mclk */
- if (!iface->mclk && (fmt & SND_SOC_DAIFMT_CBS_CFS)) {
- dev_err(dai->dev, "cpu clock master: mclk missing\n");
- return -ENODEV;
- }
-
iface->fmt = fmt;
return 0;
}
@@ -311,7 +318,8 @@ static int axg_tdm_iface_hw_params(struct snd_pcm_substream *substream,
if (ret)
return ret;
- if (iface->fmt & SND_SOC_DAIFMT_CBS_CFS) {
+ if ((iface->fmt & SND_SOC_DAIFMT_MASTER_MASK) ==
+ SND_SOC_DAIFMT_CBS_CFS) {
ret = axg_tdm_iface_set_sclk(dai, params);
if (ret)
return ret;
@@ -451,8 +459,20 @@ static int axg_tdm_iface_set_bias_level(struct snd_soc_component *component,
return ret;
}
+static const struct snd_soc_dapm_widget axg_tdm_iface_dapm_widgets[] = {
+ SND_SOC_DAPM_SIGGEN("Playback Signal"),
+};
+
+static const struct snd_soc_dapm_route axg_tdm_iface_dapm_routes[] = {
+ { "Loopback", NULL, "Playback Signal" },
+};
+
static const struct snd_soc_component_driver axg_tdm_iface_component_drv = {
- .set_bias_level = axg_tdm_iface_set_bias_level,
+ .dapm_widgets = axg_tdm_iface_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(axg_tdm_iface_dapm_widgets),
+ .dapm_routes = axg_tdm_iface_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(axg_tdm_iface_dapm_routes),
+ .set_bias_level = axg_tdm_iface_set_bias_level,
};
static const struct of_device_id axg_tdm_iface_of_match[] = {
diff --git a/sound/soc/qcom/Kconfig b/sound/soc/qcom/Kconfig
index 2a4c912d1e48..57941413de8b 100644
--- a/sound/soc/qcom/Kconfig
+++ b/sound/soc/qcom/Kconfig
@@ -70,7 +70,7 @@ config SND_SOC_QDSP6_ASM_DAI
config SND_SOC_QDSP6
tristate "SoC ALSA audio driver for QDSP6"
- depends on QCOM_APR && HAS_DMA
+ depends on QCOM_APR
select SND_SOC_QDSP6_COMMON
select SND_SOC_QDSP6_CORE
select SND_SOC_QDSP6_AFE
diff --git a/sound/soc/qcom/apq8016_sbc.c b/sound/soc/qcom/apq8016_sbc.c
index 4b559932adc3..121460db8eac 100644
--- a/sound/soc/qcom/apq8016_sbc.c
+++ b/sound/soc/qcom/apq8016_sbc.c
@@ -233,6 +233,7 @@ static int apq8016_sbc_platform_probe(struct platform_device *pdev)
return -ENOMEM;
card->dev = dev;
+ card->owner = THIS_MODULE;
card->dapm_widgets = apq8016_sbc_dapm_widgets;
card->num_dapm_widgets = ARRAY_SIZE(apq8016_sbc_dapm_widgets);
data = apq8016_sbc_parse_of(card);
diff --git a/sound/soc/qcom/apq8096.c b/sound/soc/qcom/apq8096.c
index 1543e85629f8..04f814a0a7d5 100644
--- a/sound/soc/qcom/apq8096.c
+++ b/sound/soc/qcom/apq8096.c
@@ -46,6 +46,7 @@ static int apq8096_platform_probe(struct platform_device *pdev)
return -ENOMEM;
card->dev = dev;
+ card->owner = THIS_MODULE;
dev_set_drvdata(dev, card);
ret = qcom_snd_parse_of(card);
if (ret) {
diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
index 292b103abada..475579a9830a 100644
--- a/sound/soc/qcom/lpass-cpu.c
+++ b/sound/soc/qcom/lpass-cpu.c
@@ -182,21 +182,6 @@ static int lpass_cpu_daiops_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int lpass_cpu_daiops_hw_free(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
- int ret;
-
- ret = regmap_write(drvdata->lpaif_map,
- LPAIF_I2SCTL_REG(drvdata->variant, dai->driver->id),
- 0);
- if (ret)
- dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret);
-
- return ret;
-}
-
static int lpass_cpu_daiops_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
@@ -277,7 +262,6 @@ const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
.startup = lpass_cpu_daiops_startup,
.shutdown = lpass_cpu_daiops_shutdown,
.hw_params = lpass_cpu_daiops_hw_params,
- .hw_free = lpass_cpu_daiops_hw_free,
.prepare = lpass_cpu_daiops_prepare,
.trigger = lpass_cpu_daiops_trigger,
};
diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
index d07271ea4c45..1d06e2b7bb63 100644
--- a/sound/soc/qcom/lpass-platform.c
+++ b/sound/soc/qcom/lpass-platform.c
@@ -69,7 +69,7 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream)
int ret, dma_ch, dir = substream->stream;
struct lpass_pcm_data *data;
- data = devm_kzalloc(soc_runtime->dev, sizeof(*data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -81,8 +81,10 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream)
else
dma_ch = 0;
- if (dma_ch < 0)
+ if (dma_ch < 0) {
+ kfree(data);
return dma_ch;
+ }
drvdata->substream[dma_ch] = substream;
@@ -103,6 +105,7 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream)
ret = snd_pcm_hw_constraint_integer(runtime,
SNDRV_PCM_HW_PARAM_PERIODS);
if (ret < 0) {
+ kfree(data);
dev_err(soc_runtime->dev, "setting constraints failed: %d\n",
ret);
return -EINVAL;
@@ -127,6 +130,7 @@ static int lpass_platform_pcmops_close(struct snd_pcm_substream *substream)
if (v->free_dma_channel)
v->free_dma_channel(drvdata, data->dma_ch);
+ kfree(data);
return 0;
}
diff --git a/sound/soc/qcom/qdsp6/q6afe-dai.c b/sound/soc/qcom/qdsp6/q6afe-dai.c
index 8f6c8fc073a9..1fc1939b90c2 100644
--- a/sound/soc/qcom/qdsp6/q6afe-dai.c
+++ b/sound/soc/qcom/qdsp6/q6afe-dai.c
@@ -899,6 +899,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
SNDRV_PCM_RATE_16000,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE,
+ .channels_min = 1,
+ .channels_max = 8,
.rate_min = 8000,
.rate_max = 48000,
},
@@ -914,6 +916,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
SNDRV_PCM_RATE_16000,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE,
+ .channels_min = 1,
+ .channels_max = 8,
.rate_min = 8000,
.rate_max = 48000,
},
@@ -928,6 +932,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
SNDRV_PCM_RATE_16000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 8,
.rate_min = 8000,
.rate_max = 48000,
},
@@ -943,6 +949,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
SNDRV_PCM_RATE_16000,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE,
+ .channels_min = 1,
+ .channels_max = 8,
.rate_min = 8000,
.rate_max = 48000,
},
@@ -957,6 +965,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
SNDRV_PCM_RATE_16000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 8,
.rate_min = 8000,
.rate_max = 48000,
},
@@ -972,6 +982,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
SNDRV_PCM_RATE_16000,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE,
+ .channels_min = 1,
+ .channels_max = 8,
.rate_min = 8000,
.rate_max = 48000,
},
@@ -986,6 +998,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
SNDRV_PCM_RATE_16000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 8,
.rate_min = 8000,
.rate_max = 48000,
},
@@ -1001,6 +1015,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
SNDRV_PCM_RATE_16000,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE,
+ .channels_min = 1,
+ .channels_max = 8,
.rate_min = 8000,
.rate_max = 48000,
},
diff --git a/sound/soc/qcom/qdsp6/q6asm.c b/sound/soc/qcom/qdsp6/q6asm.c
index 2b2c7233bb5f..1bdacf797613 100644
--- a/sound/soc/qcom/qdsp6/q6asm.c
+++ b/sound/soc/qcom/qdsp6/q6asm.c
@@ -25,6 +25,7 @@
#define ASM_STREAM_CMD_FLUSH 0x00010BCE
#define ASM_SESSION_CMD_PAUSE 0x00010BD3
#define ASM_DATA_CMD_EOS 0x00010BDB
+#define ASM_DATA_EVENT_RENDERED_EOS 0x00010C1C
#define ASM_NULL_POPP_TOPOLOGY 0x00010C68
#define ASM_STREAM_CMD_FLUSH_READBUFS 0x00010C09
#define ASM_STREAM_CMD_SET_ENCDEC_PARAM 0x00010C10
@@ -545,9 +546,6 @@ static int32_t q6asm_stream_callback(struct apr_device *adev,
case ASM_SESSION_CMD_SUSPEND:
client_event = ASM_CLIENT_EVENT_CMD_SUSPEND_DONE;
break;
- case ASM_DATA_CMD_EOS:
- client_event = ASM_CLIENT_EVENT_CMD_EOS_DONE;
- break;
case ASM_STREAM_CMD_FLUSH:
client_event = ASM_CLIENT_EVENT_CMD_FLUSH_DONE;
break;
@@ -651,6 +649,9 @@ static int32_t q6asm_stream_callback(struct apr_device *adev,
}
break;
+ case ASM_DATA_EVENT_RENDERED_EOS:
+ client_event = ASM_CLIENT_EVENT_CMD_EOS_DONE;
+ break;
}
if (ac->cb)
diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
index c6b51571be94..44eee18c658a 100644
--- a/sound/soc/qcom/qdsp6/q6routing.c
+++ b/sound/soc/qcom/qdsp6/q6routing.c
@@ -968,6 +968,20 @@ static int msm_routing_probe(struct snd_soc_component *c)
return 0;
}
+static unsigned int q6routing_reg_read(struct snd_soc_component *component,
+ unsigned int reg)
+{
+ /* default value */
+ return 0;
+}
+
+static int q6routing_reg_write(struct snd_soc_component *component,
+ unsigned int reg, unsigned int val)
+{
+ /* dummy */
+ return 0;
+}
+
static const struct snd_soc_component_driver msm_soc_routing_component = {
.ops = &q6pcm_routing_ops,
.probe = msm_routing_probe,
@@ -976,6 +990,8 @@ static const struct snd_soc_component_driver msm_soc_routing_component = {
.num_dapm_widgets = ARRAY_SIZE(msm_qdsp6_widgets),
.dapm_routes = intercon,
.num_dapm_routes = ARRAY_SIZE(intercon),
+ .read = q6routing_reg_read,
+ .write = q6routing_reg_write,
};
static int q6pcm_routing_probe(struct platform_device *pdev)
diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c
index 2a781d87ee65..5fdbfa363ab1 100644
--- a/sound/soc/qcom/sdm845.c
+++ b/sound/soc/qcom/sdm845.c
@@ -226,6 +226,7 @@ static int sdm845_snd_platform_probe(struct platform_device *pdev)
}
card->dev = dev;
+ card->owner = THIS_MODULE;
dev_set_drvdata(dev, card);
ret = qcom_snd_parse_of(card);
if (ret) {
diff --git a/sound/soc/qcom/storm.c b/sound/soc/qcom/storm.c
index a9fa972466ad..00a3f4c1b6fe 100644
--- a/sound/soc/qcom/storm.c
+++ b/sound/soc/qcom/storm.c
@@ -99,6 +99,7 @@ static int storm_platform_probe(struct platform_device *pdev)
return -ENOMEM;
card->dev = &pdev->dev;
+ card->owner = THIS_MODULE;
ret = snd_soc_of_parse_card_name(card, "qcom,model");
if (ret) {
diff --git a/sound/soc/rockchip/rockchip_pdm.c b/sound/soc/rockchip/rockchip_pdm.c
index 8a2e3bbce3a1..ad16c8310dd3 100644
--- a/sound/soc/rockchip/rockchip_pdm.c
+++ b/sound/soc/rockchip/rockchip_pdm.c
@@ -478,8 +478,10 @@ static int rockchip_pdm_resume(struct device *dev)
int ret;
ret = pm_runtime_get_sync(dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put(dev);
return ret;
+ }
ret = regcache_sync(pdm->regmap);
diff --git a/sound/soc/samsung/tm2_wm5110.c b/sound/soc/samsung/tm2_wm5110.c
index 43332c32d7e9..e5890485fc5e 100644
--- a/sound/soc/samsung/tm2_wm5110.c
+++ b/sound/soc/samsung/tm2_wm5110.c
@@ -541,7 +541,7 @@ static int tm2_probe(struct platform_device *pdev)
ret = of_parse_phandle_with_args(dev->of_node, "i2s-controller",
cells_name, i, &args);
- if (!args.np) {
+ if (ret) {
dev_err(dev, "i2s-controller property parse error: %d\n", i);
ret = -EINVAL;
goto dai_node_put;
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 33dc8d6ad35b..a6cf2ac223e4 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -566,10 +566,16 @@ static int rsnd_ssi_stop(struct rsnd_mod *mod,
* Capture: It might not receave data. Do nothing
*/
if (rsnd_io_is_play(io)) {
- rsnd_mod_write(mod, SSICR, cr | EN);
+ rsnd_mod_write(mod, SSICR, cr | ssi->cr_en);
rsnd_ssi_status_check(mod, DIRQ);
}
+ /* In multi-SSI mode, stop is performed by setting ssi0129 in
+ * SSI_CONTROL to 0 (in rsnd_ssio_stop_gen2). Do nothing here.
+ */
+ if (rsnd_ssi_multi_slaves_runtime(io))
+ return 0;
+
/*
* disable SSI,
* and, wait idle state
@@ -674,6 +680,9 @@ static void rsnd_ssi_parent_attach(struct rsnd_mod *mod,
if (!rsnd_rdai_is_clk_master(rdai))
return;
+ if (rsnd_ssi_is_multi_slave(mod, io))
+ return;
+
switch (rsnd_mod_id(mod)) {
case 1:
case 2:
diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c
index 016fbf5ac242..7b5eb316c366 100644
--- a/sound/soc/sh/rcar/ssiu.c
+++ b/sound/soc/sh/rcar/ssiu.c
@@ -172,7 +172,7 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod,
i;
for_each_rsnd_mod_array(i, pos, io, rsnd_ssi_array) {
- shift = (i * 4) + 16;
+ shift = (i * 4) + 20;
val = (val & ~(0xF << shift)) |
rsnd_mod_id(pos) << shift;
}
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index e45dfcb62b6a..595fe20bbc6d 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1920,9 +1920,25 @@ static void soc_check_tplg_fes(struct snd_soc_card *card)
dai_link->platform_name = component->name;
/* convert non BE into BE */
- dai_link->no_pcm = 1;
- dai_link->dpcm_playback = 1;
- dai_link->dpcm_capture = 1;
+ if (!dai_link->no_pcm) {
+ dai_link->no_pcm = 1;
+
+ if (dai_link->dpcm_playback)
+ dev_warn(card->dev,
+ "invalid configuration, dailink %s has flags no_pcm=0 and dpcm_playback=1\n",
+ dai_link->name);
+ if (dai_link->dpcm_capture)
+ dev_warn(card->dev,
+ "invalid configuration, dailink %s has flags no_pcm=0 and dpcm_capture=1\n",
+ dai_link->name);
+
+ /* convert normal link into DPCM one */
+ if (!(dai_link->dpcm_playback ||
+ dai_link->dpcm_capture)) {
+ dai_link->dpcm_playback = !dai_link->capture_only;
+ dai_link->dpcm_capture = !dai_link->playback_only;
+ }
+ }
/* override any BE fixups */
dai_link->be_hw_params_fixup =
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index db5b005f4b1e..4e99d9986f11 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -410,7 +410,7 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
memset(&template, 0, sizeof(template));
template.reg = e->reg;
- template.mask = e->mask << e->shift_l;
+ template.mask = e->mask;
template.shift = e->shift_l;
template.off_val = snd_soc_enum_item_to_val(e, 0);
template.on_val = template.off_val;
@@ -536,8 +536,22 @@ static bool dapm_kcontrol_set_value(const struct snd_kcontrol *kcontrol,
if (data->value == value)
return false;
- if (data->widget)
- data->widget->on_val = value;
+ if (data->widget) {
+ switch (dapm_kcontrol_get_wlist(kcontrol)->widgets[0]->id) {
+ case snd_soc_dapm_switch:
+ case snd_soc_dapm_mixer:
+ case snd_soc_dapm_mixer_named_ctl:
+ data->widget->on_val = value & data->widget->mask;
+ break;
+ case snd_soc_dapm_demux:
+ case snd_soc_dapm_mux:
+ data->widget->on_val = value >> data->widget->shift;
+ break;
+ default:
+ data->widget->on_val = value;
+ break;
+ }
+ }
data->value = value;
@@ -792,7 +806,13 @@ static void dapm_set_mixer_path_status(struct snd_soc_dapm_path *p, int i,
val = max - val;
p->connect = !!val;
} else {
- p->connect = 0;
+ /* since a virtual mixer has no backing registers to
+ * decide which path to connect, it will try to match
+ * with initial state. This is to ensure
+ * that the default mixer choice will be
+ * correctly powered up during initialization.
+ */
+ p->connect = invert;
}
}
@@ -2434,6 +2454,7 @@ void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w)
enum snd_soc_dapm_direction dir;
list_del(&w->list);
+ list_del(&w->dirty);
/*
* remove source and sink paths associated to this widget.
* While removing the path, remove reference to it from both
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
index f4dc3d445aae..95fc24580f85 100644
--- a/sound/soc/soc-ops.c
+++ b/sound/soc/soc-ops.c
@@ -832,7 +832,7 @@ int snd_soc_get_xr_sx(struct snd_kcontrol *kcontrol,
unsigned int regbase = mc->regbase;
unsigned int regcount = mc->regcount;
unsigned int regwshift = component->val_bytes * BITS_PER_BYTE;
- unsigned int regwmask = (1<<regwshift)-1;
+ unsigned int regwmask = (1UL<<regwshift)-1;
unsigned int invert = mc->invert;
unsigned long mask = (1UL<<mc->nbits)-1;
long min = mc->min;
@@ -881,7 +881,7 @@ int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol,
unsigned int regbase = mc->regbase;
unsigned int regcount = mc->regcount;
unsigned int regwshift = component->val_bytes * BITS_PER_BYTE;
- unsigned int regwmask = (1<<regwshift)-1;
+ unsigned int regwmask = (1UL<<regwshift)-1;
unsigned int invert = mc->invert;
unsigned long mask = (1UL<<mc->nbits)-1;
long max = mc->max;
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 356d4e754561..af14304645ce 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -2266,7 +2266,8 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) &&
- (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP))
+ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
+ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED))
continue;
ret = dpcm_do_trigger(dpcm, be_substream, cmd);
@@ -2296,7 +2297,8 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
be->dpcm[stream].state = SND_SOC_DPCM_STATE_START;
break;
case SNDRV_PCM_TRIGGER_STOP:
- if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START)
+ if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_START) &&
+ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED))
continue;
if (!snd_soc_dpcm_can_be_free_stop(fe, be, stream))
@@ -2388,6 +2390,7 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ case SNDRV_PCM_TRIGGER_DRAIN:
ret = dpcm_dai_trigger_fe_be(substream, cmd, true);
break;
case SNDRV_PCM_TRIGGER_STOP:
@@ -2405,6 +2408,7 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ case SNDRV_PCM_TRIGGER_DRAIN:
ret = dpcm_dai_trigger_fe_be(substream, cmd, false);
break;
case SNDRV_PCM_TRIGGER_STOP:
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 30fc45aa1869..2c6598e07dde 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -364,7 +364,7 @@ static int soc_tplg_add_kcontrol(struct soc_tplg *tplg,
struct snd_soc_component *comp = tplg->comp;
return soc_tplg_add_dcontrol(comp->card->snd_card,
- comp->dev, k, NULL, comp, kcontrol);
+ comp->dev, k, comp->name_prefix, comp, kcontrol);
}
/* remove a mixer kcontrol */
@@ -1923,7 +1923,9 @@ static int soc_tplg_pcm_elems_load(struct soc_tplg *tplg,
_pcm = pcm;
} else {
abi_match = false;
- pcm_new_ver(tplg, pcm, &_pcm);
+ ret = pcm_new_ver(tplg, pcm, &_pcm);
+ if (ret < 0)
+ return ret;
}
/* create the FE DAIs and DAI links */
diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c
index 9a3cb7704810..16e8f74288a5 100644
--- a/sound/soc/sunxi/sun4i-codec.c
+++ b/sound/soc/sunxi/sun4i-codec.c
@@ -1235,6 +1235,7 @@ static struct snd_soc_card *sun4i_codec_create_card(struct device *dev)
return ERR_PTR(-ENOMEM);
card->dev = dev;
+ card->owner = THIS_MODULE;
card->name = "sun4i-codec";
card->dapm_widgets = sun4i_codec_card_dapm_widgets;
card->num_dapm_widgets = ARRAY_SIZE(sun4i_codec_card_dapm_widgets);
@@ -1267,6 +1268,7 @@ static struct snd_soc_card *sun6i_codec_create_card(struct device *dev)
return ERR_PTR(-ENOMEM);
card->dev = dev;
+ card->owner = THIS_MODULE;
card->name = "A31 Audio Codec";
card->dapm_widgets = sun6i_codec_card_dapm_widgets;
card->num_dapm_widgets = ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
@@ -1320,6 +1322,7 @@ static struct snd_soc_card *sun8i_a23_codec_create_card(struct device *dev)
return ERR_PTR(-ENOMEM);
card->dev = dev;
+ card->owner = THIS_MODULE;
card->name = "A23 Audio Codec";
card->dapm_widgets = sun6i_codec_card_dapm_widgets;
card->num_dapm_widgets = ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
@@ -1358,6 +1361,7 @@ static struct snd_soc_card *sun8i_h3_codec_create_card(struct device *dev)
return ERR_PTR(-ENOMEM);
card->dev = dev;
+ card->owner = THIS_MODULE;
card->name = "H3 Audio Codec";
card->dapm_widgets = sun6i_codec_card_dapm_widgets;
card->num_dapm_widgets = ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
@@ -1396,6 +1400,7 @@ static struct snd_soc_card *sun8i_v3s_codec_create_card(struct device *dev)
return ERR_PTR(-ENOMEM);
card->dev = dev;
+ card->owner = THIS_MODULE;
card->name = "V3s Audio Codec";
card->dapm_widgets = sun6i_codec_card_dapm_widgets;
card->num_dapm_widgets = ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
diff --git a/sound/soc/tegra/tegra30_ahub.c b/sound/soc/tegra/tegra30_ahub.c
index 43679aeeb12b..88e838ac937d 100644
--- a/sound/soc/tegra/tegra30_ahub.c
+++ b/sound/soc/tegra/tegra30_ahub.c
@@ -655,8 +655,10 @@ static int tegra30_ahub_resume(struct device *dev)
int ret;
ret = pm_runtime_get_sync(dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put(dev);
return ret;
+ }
ret = regcache_sync(ahub->regmap_ahub);
ret |= regcache_sync(ahub->regmap_apbif);
pm_runtime_put(dev);
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
index 0b176ea24914..bf155c5092f0 100644
--- a/sound/soc/tegra/tegra30_i2s.c
+++ b/sound/soc/tegra/tegra30_i2s.c
@@ -551,8 +551,10 @@ static int tegra30_i2s_resume(struct device *dev)
int ret;
ret = pm_runtime_get_sync(dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put(dev);
return ret;
+ }
ret = regcache_sync(i2s->regmap);
pm_runtime_put(dev);
diff --git a/sound/soc/tegra/tegra_wm8903.c b/sound/soc/tegra/tegra_wm8903.c
index 69bc9461974b..301850df368d 100644
--- a/sound/soc/tegra/tegra_wm8903.c
+++ b/sound/soc/tegra/tegra_wm8903.c
@@ -173,6 +173,7 @@ static int tegra_wm8903_init(struct snd_soc_pcm_runtime *rtd)
struct snd_soc_component *component = codec_dai->component;
struct snd_soc_card *card = rtd->card;
struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card);
+ int shrt = 0;
if (gpio_is_valid(machine->gpio_hp_det)) {
tegra_wm8903_hp_jack_gpio.gpio = machine->gpio_hp_det;
@@ -185,12 +186,15 @@ static int tegra_wm8903_init(struct snd_soc_pcm_runtime *rtd)
&tegra_wm8903_hp_jack_gpio);
}
+ if (of_property_read_bool(card->dev->of_node, "nvidia,headset"))
+ shrt = SND_JACK_MICROPHONE;
+
snd_soc_card_jack_new(rtd->card, "Mic Jack", SND_JACK_MICROPHONE,
&tegra_wm8903_mic_jack,
tegra_wm8903_mic_jack_pins,
ARRAY_SIZE(tegra_wm8903_mic_jack_pins));
wm8903_mic_detect(component, &tegra_wm8903_mic_jack, SND_JACK_MICROPHONE,
- 0);
+ shrt);
snd_soc_dapm_force_enable_pin(&card->dapm, "MICBIAS");
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 746a72e23cf9..ce8925e8419e 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -189,9 +189,8 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int
ctrlif, interface);
return -EINVAL;
}
- usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L);
-
- return 0;
+ return usb_driver_claim_interface(&usb_audio_driver, iface,
+ USB_AUDIO_IFACE_UNUSED);
}
if ((altsd->bInterfaceClass != USB_CLASS_AUDIO &&
@@ -211,7 +210,8 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int
if (! snd_usb_parse_audio_interface(chip, interface)) {
usb_set_interface(dev, interface, 0); /* reset the current interface */
- usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L);
+ return usb_driver_claim_interface(&usb_audio_driver, iface,
+ USB_AUDIO_IFACE_UNUSED);
}
return 0;
@@ -668,10 +668,14 @@ static int usb_audio_probe(struct usb_interface *intf,
goto __error;
}
- /* we are allowed to call snd_card_register() many times */
- err = snd_card_register(chip->card);
- if (err < 0)
- goto __error;
+ /* we are allowed to call snd_card_register() many times, but first
+ * check to see if a device needs to skip it or do anything special
+ */
+ if (!snd_usb_registration_quirk(chip, ifnum)) {
+ err = snd_card_register(chip->card);
+ if (err < 0)
+ goto __error;
+ }
usb_chip[chip->index] = chip;
chip->num_interfaces++;
@@ -703,7 +707,7 @@ static void usb_audio_disconnect(struct usb_interface *intf)
struct snd_card *card;
struct list_head *p;
- if (chip == (void *)-1L)
+ if (chip == USB_AUDIO_IFACE_UNUSED)
return;
card = chip->card;
@@ -803,12 +807,9 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
struct usb_mixer_interface *mixer;
struct list_head *p;
- if (chip == (void *)-1L)
+ if (chip == USB_AUDIO_IFACE_UNUSED)
return 0;
- chip->autosuspended = !!PMSG_IS_AUTO(message);
- if (!chip->autosuspended)
- snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot);
if (!chip->num_suspended_intf++) {
list_for_each_entry(as, &chip->pcm_list, list) {
snd_pcm_suspend_all(as->pcm);
@@ -822,6 +823,11 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
snd_usb_mixer_suspend(mixer);
}
+ if (!PMSG_IS_AUTO(message) && !chip->system_suspend) {
+ snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot);
+ chip->system_suspend = chip->num_suspended_intf;
+ }
+
return 0;
}
@@ -833,12 +839,12 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
struct list_head *p;
int err = 0;
- if (chip == (void *)-1L)
- return 0;
- if (--chip->num_suspended_intf)
+ if (chip == USB_AUDIO_IFACE_UNUSED)
return 0;
atomic_inc(&chip->active); /* avoid autopm */
+ if (chip->num_suspended_intf > 1)
+ goto out;
list_for_each_entry(as, &chip->pcm_list, list) {
err = snd_usb_pcm_resume(as);
@@ -860,9 +866,12 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
snd_usbmidi_resume(p);
}
- if (!chip->autosuspended)
+ out:
+ if (chip->num_suspended_intf == chip->system_suspend) {
snd_power_change_state(chip->card, SNDRV_CTL_POWER_D0);
- chip->autosuspended = 0;
+ chip->system_suspend = 0;
+ }
+ chip->num_suspended_intf--;
err_out:
atomic_dec(&chip->active); /* allow autopm after this point */
diff --git a/sound/usb/card.h b/sound/usb/card.h
index 7f11655bde50..f30fec68e2ca 100644
--- a/sound/usb/card.h
+++ b/sound/usb/card.h
@@ -129,6 +129,7 @@ struct snd_usb_substream {
unsigned int tx_length_quirk:1; /* add length specifier to transfers */
unsigned int fmt_type; /* USB audio format type (1-3) */
unsigned int pkt_offset_adj; /* Bytes to drop from beginning of packets (for non-compliant devices) */
+ unsigned int stream_offset_adj; /* Bytes to drop from beginning of stream (for non-compliant devices) */
unsigned int running: 1; /* running status */
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index bfe5540030b8..863ac42076e5 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -273,7 +273,7 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
selector = snd_usb_find_clock_selector(chip->ctrl_intf, entity_id);
if (selector) {
- int ret, i, cur;
+ int ret, i, cur, err;
/* the entity ID we are looking for is a selector.
* find out what it currently selects */
@@ -295,13 +295,17 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
ret = __uac_clock_find_source(chip, fmt,
selector->baCSourceID[ret - 1],
visited, validate);
+ if (ret > 0) {
+ err = uac_clock_selector_set_val(chip, entity_id, cur);
+ if (err < 0)
+ return err;
+ }
+
if (!validate || ret > 0 || !chip->autoclock)
return ret;
/* The current clock source is invalid, try others. */
for (i = 1; i <= selector->bNrInPins; i++) {
- int err;
-
if (i == cur)
continue;
@@ -367,7 +371,7 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip,
selector = snd_usb_find_clock_selector_v3(chip->ctrl_intf, entity_id);
if (selector) {
- int ret, i, cur;
+ int ret, i, cur, err;
/* the entity ID we are looking for is a selector.
* find out what it currently selects */
@@ -389,6 +393,12 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip,
ret = __uac3_clock_find_source(chip, fmt,
selector->baCSourceID[ret - 1],
visited, validate);
+ if (ret > 0) {
+ err = uac_clock_selector_set_val(chip, entity_id, cur);
+ if (err < 0)
+ return err;
+ }
+
if (!validate || ret > 0 || !chip->autoclock)
return ret;
@@ -508,6 +518,12 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip, int iface,
}
crate = data[0] | (data[1] << 8) | (data[2] << 16);
+ if (!crate) {
+ dev_info(&dev->dev, "failed to read current rate; disabling the check\n");
+ chip->sample_rate_read_error = 3; /* three strikes, see above */
+ return 0;
+ }
+
if (crate != rate) {
dev_warn(&dev->dev, "current rate %d is different from the runtime rate %d\n", crate, rate);
// runtime->rate = crate;
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index aeb74cc6ceff..727ef9889e94 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -334,17 +334,17 @@ static void queue_pending_output_urbs(struct snd_usb_endpoint *ep)
ep->next_packet_read_pos %= MAX_URBS;
/* take URB out of FIFO */
- if (!list_empty(&ep->ready_playback_urbs))
+ if (!list_empty(&ep->ready_playback_urbs)) {
ctx = list_first_entry(&ep->ready_playback_urbs,
struct snd_urb_ctx, ready_list);
+ list_del_init(&ctx->ready_list);
+ }
}
spin_unlock_irqrestore(&ep->lock, flags);
if (ctx == NULL)
return;
- list_del_init(&ctx->ready_list);
-
/* copy over the length information */
for (i = 0; i < packet->packets; i++)
ctx->packet_size[i] = packet->packet_size[i];
diff --git a/sound/usb/format.c b/sound/usb/format.c
index 9d27429ed403..a3daf93c565a 100644
--- a/sound/usb/format.c
+++ b/sound/usb/format.c
@@ -53,6 +53,8 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
case UAC_VERSION_1:
default: {
struct uac_format_type_i_discrete_descriptor *fmt = _fmt;
+ if (format >= 64)
+ return 0; /* invalid format */
sample_width = fmt->bBitResolution;
sample_bytes = fmt->bSubframeSize;
format = 1ULL << format;
@@ -238,6 +240,52 @@ static int parse_audio_format_rates_v1(struct snd_usb_audio *chip, struct audiof
}
/*
+ * Many Focusrite devices supports a limited set of sampling rates per
+ * altsetting. Maximum rate is exposed in the last 4 bytes of Format Type
+ * descriptor which has a non-standard bLength = 10.
+ */
+static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip,
+ struct audioformat *fp,
+ unsigned int rate)
+{
+ struct usb_interface *iface;
+ struct usb_host_interface *alts;
+ unsigned char *fmt;
+ unsigned int max_rate;
+
+ iface = usb_ifnum_to_if(chip->dev, fp->iface);
+ if (!iface)
+ return true;
+
+ alts = &iface->altsetting[fp->altset_idx];
+ fmt = snd_usb_find_csint_desc(alts->extra, alts->extralen,
+ NULL, UAC_FORMAT_TYPE);
+ if (!fmt)
+ return true;
+
+ if (fmt[0] == 10) { /* bLength */
+ max_rate = combine_quad(&fmt[6]);
+
+ /* Validate max rate */
+ if (max_rate != 48000 &&
+ max_rate != 96000 &&
+ max_rate != 192000 &&
+ max_rate != 384000) {
+
+ usb_audio_info(chip,
+ "%u:%d : unexpected max rate: %u\n",
+ fp->iface, fp->altsetting, max_rate);
+
+ return true;
+ }
+
+ return rate <= max_rate;
+ }
+
+ return true;
+}
+
+/*
* Helper function to walk the array of sample rate triplets reported by
* the device. The problem is that we need to parse whole array first to
* get to know how many sample rates we have to expect.
@@ -273,6 +321,11 @@ static int parse_uac2_sample_rate_range(struct snd_usb_audio *chip,
}
for (rate = min; rate <= max; rate += res) {
+ /* Filter out invalid rates on Focusrite devices */
+ if (USB_ID_VENDOR(chip->usb_id) == 0x1235 &&
+ !focusrite_valid_sample_rate(chip, fp, rate))
+ goto skip_rate;
+
if (fp->rate_table)
fp->rate_table[nr_rates] = rate;
if (!fp->rate_min || rate < fp->rate_min)
@@ -287,6 +340,7 @@ static int parse_uac2_sample_rate_range(struct snd_usb_audio *chip,
break;
}
+skip_rate:
/* avoid endless loop */
if (res == 0)
break;
diff --git a/sound/usb/line6/capture.c b/sound/usb/line6/capture.c
index d8a14d769f48..8efd9f00cb72 100644
--- a/sound/usb/line6/capture.c
+++ b/sound/usb/line6/capture.c
@@ -291,6 +291,8 @@ int line6_create_audio_in_urbs(struct snd_line6_pcm *line6pcm)
urb->interval = LINE6_ISO_INTERVAL;
urb->error_count = 0;
urb->complete = audio_in_callback;
+ if (usb_urb_ep_type_check(urb))
+ return -EINVAL;
}
return 0;
diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
index 0193d4989107..2163fd6dce66 100644
--- a/sound/usb/line6/driver.c
+++ b/sound/usb/line6/driver.c
@@ -705,6 +705,10 @@ static int line6_init_cap_control(struct usb_line6 *line6)
line6->buffer_message = kmalloc(LINE6_MIDI_MESSAGE_MAXLEN, GFP_KERNEL);
if (!line6->buffer_message)
return -ENOMEM;
+
+ ret = line6_init_midi(line6);
+ if (ret < 0)
+ return ret;
} else {
ret = line6_hwdep_init(line6);
if (ret < 0)
@@ -835,7 +839,7 @@ void line6_disconnect(struct usb_interface *interface)
if (WARN_ON(usbdev != line6->usbdev))
return;
- cancel_delayed_work(&line6->startup_work);
+ cancel_delayed_work_sync(&line6->startup_work);
if (line6->urb_listen != NULL)
line6_stop_listen(line6);
diff --git a/sound/usb/line6/playback.c b/sound/usb/line6/playback.c
index dec89d2beb57..3fb86318f9c6 100644
--- a/sound/usb/line6/playback.c
+++ b/sound/usb/line6/playback.c
@@ -436,6 +436,8 @@ int line6_create_audio_out_urbs(struct snd_line6_pcm *line6pcm)
urb->interval = LINE6_ISO_INTERVAL;
urb->error_count = 0;
urb->complete = audio_out_callback;
+ if (usb_urb_ep_type_check(urb))
+ return -EINVAL;
}
return 0;
diff --git a/sound/usb/line6/pod.c b/sound/usb/line6/pod.c
index 020c81818951..dff8e7d5f305 100644
--- a/sound/usb/line6/pod.c
+++ b/sound/usb/line6/pod.c
@@ -420,11 +420,6 @@ static int pod_init(struct usb_line6 *line6,
if (err < 0)
return err;
- /* initialize MIDI subsystem: */
- err = line6_init_midi(line6);
- if (err < 0)
- return err;
-
/* initialize PCM subsystem: */
err = line6_init_pcm(line6, &pod_pcm_properties);
if (err < 0)
diff --git a/sound/usb/line6/variax.c b/sound/usb/line6/variax.c
index e8c852b2ce35..163a08a8244d 100644
--- a/sound/usb/line6/variax.c
+++ b/sound/usb/line6/variax.c
@@ -217,7 +217,6 @@ static int variax_init(struct usb_line6 *line6,
const struct usb_device_id *id)
{
struct usb_line6_variax *variax = (struct usb_line6_variax *) line6;
- int err;
line6->process_message = line6_variax_process_message;
line6->disconnect = line6_variax_disconnect;
@@ -233,11 +232,6 @@ static int variax_init(struct usb_line6 *line6,
if (variax->buffer_activate == NULL)
return -ENOMEM;
- /* initialize MIDI subsystem: */
- err = line6_init_midi(&variax->line6);
- if (err < 0)
- return err;
-
/* initiate startup procedure: */
variax_startup1(variax);
return 0;
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index dcfc546d81b9..1ac8c84c3369 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -1333,7 +1333,7 @@ static int snd_usbmidi_in_endpoint_create(struct snd_usb_midi *umidi,
error:
snd_usbmidi_in_endpoint_delete(ep);
- return -ENOMEM;
+ return err;
}
/*
@@ -1500,6 +1500,8 @@ void snd_usbmidi_disconnect(struct list_head *p)
spin_unlock_irq(&umidi->disc_lock);
up_write(&umidi->disc_rwsem);
+ del_timer_sync(&umidi->error_timer);
+
for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) {
struct snd_usb_midi_endpoint *ep = &umidi->endpoints[i];
if (ep->out)
@@ -1526,7 +1528,6 @@ void snd_usbmidi_disconnect(struct list_head *p)
ep->in = NULL;
}
}
- del_timer_sync(&umidi->error_timer);
}
EXPORT_SYMBOL(snd_usbmidi_disconnect);
@@ -1827,6 +1828,28 @@ static int snd_usbmidi_create_endpoints(struct snd_usb_midi *umidi,
return 0;
}
+static struct usb_ms_endpoint_descriptor *find_usb_ms_endpoint_descriptor(
+ struct usb_host_endpoint *hostep)
+{
+ unsigned char *extra = hostep->extra;
+ int extralen = hostep->extralen;
+
+ while (extralen > 3) {
+ struct usb_ms_endpoint_descriptor *ms_ep =
+ (struct usb_ms_endpoint_descriptor *)extra;
+
+ if (ms_ep->bLength > 3 &&
+ ms_ep->bDescriptorType == USB_DT_CS_ENDPOINT &&
+ ms_ep->bDescriptorSubtype == UAC_MS_GENERAL)
+ return ms_ep;
+ if (!extra[0])
+ break;
+ extralen -= extra[0];
+ extra += extra[0];
+ }
+ return NULL;
+}
+
/*
* Returns MIDIStreaming device capabilities.
*/
@@ -1864,11 +1887,14 @@ static int snd_usbmidi_get_ms_info(struct snd_usb_midi *umidi,
ep = get_ep_desc(hostep);
if (!usb_endpoint_xfer_bulk(ep) && !usb_endpoint_xfer_int(ep))
continue;
- ms_ep = (struct usb_ms_endpoint_descriptor *)hostep->extra;
- if (hostep->extralen < 4 ||
- ms_ep->bLength < 4 ||
- ms_ep->bDescriptorType != USB_DT_CS_ENDPOINT ||
- ms_ep->bDescriptorSubtype != UAC_MS_GENERAL)
+ ms_ep = find_usb_ms_endpoint_descriptor(hostep);
+ if (!ms_ep)
+ continue;
+ if (ms_ep->bLength <= sizeof(*ms_ep))
+ continue;
+ if (ms_ep->bNumEmbMIDIJack > 0x10)
+ continue;
+ if (ms_ep->bLength < sizeof(*ms_ep) + ms_ep->bNumEmbMIDIJack)
continue;
if (usb_endpoint_dir_out(ep)) {
if (endpoints[epidx].out_ep) {
@@ -2122,6 +2148,8 @@ static int snd_usbmidi_detect_roland(struct snd_usb_midi *umidi,
cs_desc[1] == USB_DT_CS_INTERFACE &&
cs_desc[2] == 0xf1 &&
cs_desc[3] == 0x02) {
+ if (cs_desc[4] > 0x10 || cs_desc[5] > 0x10)
+ continue;
endpoint->in_cables = (1 << cs_desc[4]) - 1;
endpoint->out_cables = (1 << cs_desc[5]) - 1;
return snd_usbmidi_detect_endpoints(umidi, endpoint, 1);
@@ -2283,16 +2311,22 @@ void snd_usbmidi_input_stop(struct list_head *p)
}
EXPORT_SYMBOL(snd_usbmidi_input_stop);
-static void snd_usbmidi_input_start_ep(struct snd_usb_midi_in_endpoint *ep)
+static void snd_usbmidi_input_start_ep(struct snd_usb_midi *umidi,
+ struct snd_usb_midi_in_endpoint *ep)
{
unsigned int i;
+ unsigned long flags;
if (!ep)
return;
for (i = 0; i < INPUT_URBS; ++i) {
struct urb *urb = ep->urbs[i];
- urb->dev = ep->umidi->dev;
- snd_usbmidi_submit_urb(urb, GFP_KERNEL);
+ spin_lock_irqsave(&umidi->disc_lock, flags);
+ if (!atomic_read(&urb->use_count)) {
+ urb->dev = ep->umidi->dev;
+ snd_usbmidi_submit_urb(urb, GFP_ATOMIC);
+ }
+ spin_unlock_irqrestore(&umidi->disc_lock, flags);
}
}
@@ -2308,7 +2342,7 @@ void snd_usbmidi_input_start(struct list_head *p)
if (umidi->input_running || !umidi->opened[1])
return;
for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i)
- snd_usbmidi_input_start_ep(umidi->endpoints[i].in);
+ snd_usbmidi_input_start_ep(umidi, umidi->endpoints[i].in);
umidi->input_running = 1;
}
EXPORT_SYMBOL(snd_usbmidi_input_start);
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index f2e173b9691d..4ad6eeb43476 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -591,8 +591,9 @@ static int check_matrix_bitmap(unsigned char *bmap,
* if failed, give up and free the control instance.
*/
-int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list,
- struct snd_kcontrol *kctl)
+int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list,
+ struct snd_kcontrol *kctl,
+ bool is_std_info)
{
struct usb_mixer_interface *mixer = list->mixer;
int err;
@@ -606,6 +607,7 @@ int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list,
return err;
}
list->kctl = kctl;
+ list->is_std_info = is_std_info;
list->next_id_elem = mixer->id_elems[list->id];
mixer->id_elems[list->id] = list;
return 0;
@@ -1043,7 +1045,7 @@ struct usb_feature_control_info {
int type_uac2; /* data type for uac2 if different from uac1, else -1 */
};
-static struct usb_feature_control_info audio_feature_info[] = {
+static const struct usb_feature_control_info audio_feature_info[] = {
{ UAC_FU_MUTE, "Mute", USB_MIXER_INV_BOOLEAN, -1 },
{ UAC_FU_VOLUME, "Volume", USB_MIXER_S16, -1 },
{ UAC_FU_BASS, "Tone Control - Bass", USB_MIXER_S8, -1 },
@@ -1186,6 +1188,14 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
cval->res = 384;
}
break;
+ case USB_ID(0x0495, 0x3042): /* ESS Technology Asus USB DAC */
+ if ((strstr(kctl->id.name, "Playback Volume") != NULL) ||
+ strstr(kctl->id.name, "Capture Volume") != NULL) {
+ cval->min >>= 8;
+ cval->max = 0;
+ cval->res = 1;
+ }
+ break;
}
}
@@ -1461,7 +1471,7 @@ error:
usb_audio_err(chip,
"cannot get connectors status: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n",
UAC_GET_CUR, validx, idx, cval->val_type);
- return ret;
+ return filter_error(cval, ret);
}
ucontrol->value.integer.value[0] = val;
@@ -1549,7 +1559,7 @@ static void check_no_speaker_on_headset(struct snd_kcontrol *kctl,
strlcpy(kctl->id.name, "Headphone", sizeof(kctl->id.name));
}
-static struct usb_feature_control_info *get_feature_control_info(int control)
+static const struct usb_feature_control_info *get_feature_control_info(int control)
{
int i;
@@ -1567,7 +1577,7 @@ static void __build_feature_ctl(struct usb_mixer_interface *mixer,
struct usb_audio_term *oterm,
int unitid, int nameid, int readonly_mask)
{
- struct usb_feature_control_info *ctl_info;
+ const struct usb_feature_control_info *ctl_info;
unsigned int len = 0;
int mapped_name = 0;
struct snd_kcontrol *kctl;
@@ -1689,6 +1699,16 @@ static void __build_feature_ctl(struct usb_mixer_interface *mixer,
/* get min/max values */
get_min_max_with_quirks(cval, 0, kctl);
+ /* skip a bogus volume range */
+ if (cval->max <= cval->min) {
+ usb_audio_dbg(mixer->chip,
+ "[%d] FU [%s] skipped due to invalid volume\n",
+ cval->head.id, kctl->id.name);
+ snd_ctl_free_one(kctl);
+ return;
+ }
+
+
if (control == UAC_FU_VOLUME) {
check_mapped_dB(map, cval);
if (cval->dBmin < cval->dBmax || !cval->initialized) {
@@ -1765,10 +1785,16 @@ static void get_connector_control_name(struct usb_mixer_interface *mixer,
/* Build a mixer control for a UAC connector control (jack-detect) */
static void build_connector_control(struct usb_mixer_interface *mixer,
+ const struct usbmix_name_map *imap,
struct usb_audio_term *term, bool is_input)
{
struct snd_kcontrol *kctl;
struct usb_mixer_elem_info *cval;
+ const struct usbmix_name_map *map;
+
+ map = find_map(imap, term->id, 0);
+ if (check_ignored_ctl(map))
+ return;
cval = kzalloc(sizeof(*cval), GFP_KERNEL);
if (!cval)
@@ -1799,8 +1825,12 @@ static void build_connector_control(struct usb_mixer_interface *mixer,
usb_mixer_elem_info_free(cval);
return;
}
- get_connector_control_name(mixer, term, is_input, kctl->id.name,
- sizeof(kctl->id.name));
+
+ if (check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name)))
+ strlcat(kctl->id.name, " Jack", sizeof(kctl->id.name));
+ else
+ get_connector_control_name(mixer, term, is_input, kctl->id.name,
+ sizeof(kctl->id.name));
kctl->private_free = snd_usb_mixer_elem_free;
snd_usb_mixer_add_control(&cval->head, kctl);
}
@@ -2107,8 +2137,9 @@ static int parse_audio_input_terminal(struct mixer_build *state, int unitid,
check_input_term(state, term_id, &iterm);
/* Check for jack detection. */
- if (uac_v2v3_control_is_readable(bmctls, control))
- build_connector_control(state->mixer, &iterm, true);
+ if ((iterm.type & 0xff00) != 0x0100 &&
+ uac_v2v3_control_is_readable(bmctls, control))
+ build_connector_control(state->mixer, state->map, &iterm, true);
return 0;
}
@@ -2225,7 +2256,7 @@ static const struct snd_kcontrol_new mixer_procunit_ctl = {
*/
struct procunit_value_info {
int control;
- char *suffix;
+ const char *suffix;
int val_type;
int min_value;
};
@@ -2233,44 +2264,44 @@ struct procunit_value_info {
struct procunit_info {
int type;
char *name;
- struct procunit_value_info *values;
+ const struct procunit_value_info *values;
};
-static struct procunit_value_info undefined_proc_info[] = {
+static const struct procunit_value_info undefined_proc_info[] = {
{ 0x00, "Control Undefined", 0 },
{ 0 }
};
-static struct procunit_value_info updown_proc_info[] = {
+static const struct procunit_value_info updown_proc_info[] = {
{ UAC_UD_ENABLE, "Switch", USB_MIXER_BOOLEAN },
{ UAC_UD_MODE_SELECT, "Mode Select", USB_MIXER_U8, 1 },
{ 0 }
};
-static struct procunit_value_info prologic_proc_info[] = {
+static const struct procunit_value_info prologic_proc_info[] = {
{ UAC_DP_ENABLE, "Switch", USB_MIXER_BOOLEAN },
{ UAC_DP_MODE_SELECT, "Mode Select", USB_MIXER_U8, 1 },
{ 0 }
};
-static struct procunit_value_info threed_enh_proc_info[] = {
+static const struct procunit_value_info threed_enh_proc_info[] = {
{ UAC_3D_ENABLE, "Switch", USB_MIXER_BOOLEAN },
{ UAC_3D_SPACE, "Spaciousness", USB_MIXER_U8 },
{ 0 }
};
-static struct procunit_value_info reverb_proc_info[] = {
+static const struct procunit_value_info reverb_proc_info[] = {
{ UAC_REVERB_ENABLE, "Switch", USB_MIXER_BOOLEAN },
{ UAC_REVERB_LEVEL, "Level", USB_MIXER_U8 },
{ UAC_REVERB_TIME, "Time", USB_MIXER_U16 },
{ UAC_REVERB_FEEDBACK, "Feedback", USB_MIXER_U8 },
{ 0 }
};
-static struct procunit_value_info chorus_proc_info[] = {
+static const struct procunit_value_info chorus_proc_info[] = {
{ UAC_CHORUS_ENABLE, "Switch", USB_MIXER_BOOLEAN },
{ UAC_CHORUS_LEVEL, "Level", USB_MIXER_U8 },
{ UAC_CHORUS_RATE, "Rate", USB_MIXER_U16 },
{ UAC_CHORUS_DEPTH, "Depth", USB_MIXER_U16 },
{ 0 }
};
-static struct procunit_value_info dcr_proc_info[] = {
+static const struct procunit_value_info dcr_proc_info[] = {
{ UAC_DCR_ENABLE, "Switch", USB_MIXER_BOOLEAN },
{ UAC_DCR_RATE, "Ratio", USB_MIXER_U16 },
{ UAC_DCR_MAXAMPL, "Max Amp", USB_MIXER_S16 },
@@ -2280,7 +2311,7 @@ static struct procunit_value_info dcr_proc_info[] = {
{ 0 }
};
-static struct procunit_info procunits[] = {
+static const struct procunit_info procunits[] = {
{ UAC_PROCESS_UP_DOWNMIX, "Up Down", updown_proc_info },
{ UAC_PROCESS_DOLBY_PROLOGIC, "Dolby Prologic", prologic_proc_info },
{ UAC_PROCESS_STEREO_EXTENDER, "3D Stereo Extender", threed_enh_proc_info },
@@ -2290,16 +2321,16 @@ static struct procunit_info procunits[] = {
{ 0 },
};
-static struct procunit_value_info uac3_updown_proc_info[] = {
+static const struct procunit_value_info uac3_updown_proc_info[] = {
{ UAC3_UD_MODE_SELECT, "Mode Select", USB_MIXER_U8, 1 },
{ 0 }
};
-static struct procunit_value_info uac3_stereo_ext_proc_info[] = {
+static const struct procunit_value_info uac3_stereo_ext_proc_info[] = {
{ UAC3_EXT_WIDTH_CONTROL, "Width Control", USB_MIXER_U8 },
{ 0 }
};
-static struct procunit_info uac3_procunits[] = {
+static const struct procunit_info uac3_procunits[] = {
{ UAC3_PROCESS_UP_DOWNMIX, "Up Down", uac3_updown_proc_info },
{ UAC3_PROCESS_STEREO_EXTENDER, "3D Stereo Extender", uac3_stereo_ext_proc_info },
{ UAC3_PROCESS_MULTI_FUNCTION, "Multi-Function", undefined_proc_info },
@@ -2309,23 +2340,23 @@ static struct procunit_info uac3_procunits[] = {
/*
* predefined data for extension units
*/
-static struct procunit_value_info clock_rate_xu_info[] = {
+static const struct procunit_value_info clock_rate_xu_info[] = {
{ USB_XU_CLOCK_RATE_SELECTOR, "Selector", USB_MIXER_U8, 0 },
{ 0 }
};
-static struct procunit_value_info clock_source_xu_info[] = {
+static const struct procunit_value_info clock_source_xu_info[] = {
{ USB_XU_CLOCK_SOURCE_SELECTOR, "External", USB_MIXER_BOOLEAN },
{ 0 }
};
-static struct procunit_value_info spdif_format_xu_info[] = {
+static const struct procunit_value_info spdif_format_xu_info[] = {
{ USB_XU_DIGITAL_FORMAT_SELECTOR, "SPDIF/AC3", USB_MIXER_BOOLEAN },
{ 0 }
};
-static struct procunit_value_info soft_limit_xu_info[] = {
+static const struct procunit_value_info soft_limit_xu_info[] = {
{ USB_XU_SOFT_LIMIT_SELECTOR, " ", USB_MIXER_BOOLEAN },
{ 0 }
};
-static struct procunit_info extunits[] = {
+static const struct procunit_info extunits[] = {
{ USB_XU_CLOCK_RATE, "Clock rate", clock_rate_xu_info },
{ USB_XU_CLOCK_SOURCE, "DigitalIn CLK source", clock_source_xu_info },
{ USB_XU_DIGITAL_IO_STATUS, "DigitalOut format:", spdif_format_xu_info },
@@ -2337,7 +2368,7 @@ static struct procunit_info extunits[] = {
* build a processing/extension unit
*/
static int build_audio_procunit(struct mixer_build *state, int unitid,
- void *raw_desc, struct procunit_info *list,
+ void *raw_desc, const struct procunit_info *list,
bool extension_unit)
{
struct uac_processing_unit_descriptor *desc = raw_desc;
@@ -2345,14 +2376,14 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
struct usb_mixer_elem_info *cval;
struct snd_kcontrol *kctl;
int i, err, nameid, type, len;
- struct procunit_info *info;
- struct procunit_value_info *valinfo;
+ const struct procunit_info *info;
+ const struct procunit_value_info *valinfo;
const struct usbmix_name_map *map;
- static struct procunit_value_info default_value_info[] = {
+ static const struct procunit_value_info default_value_info[] = {
{ 0x01, "Switch", USB_MIXER_BOOLEAN },
{ 0 }
};
- static struct procunit_info default_info = {
+ static const struct procunit_info default_info = {
0, NULL, default_value_info
};
const char *name = extension_unit ?
@@ -2830,7 +2861,7 @@ struct uac3_badd_profile {
int st_chmask; /* side tone mixing channel mask */
};
-static struct uac3_badd_profile uac3_badd_profiles[] = {
+static const struct uac3_badd_profile uac3_badd_profiles[] = {
{
/*
* BAIF, BAOF or combination of both
@@ -2891,7 +2922,7 @@ static struct uac3_badd_profile uac3_badd_profiles[] = {
};
static bool uac3_badd_func_has_valid_channels(struct usb_mixer_interface *mixer,
- struct uac3_badd_profile *f,
+ const struct uac3_badd_profile *f,
int c_chmask, int p_chmask)
{
/*
@@ -2935,7 +2966,7 @@ static int snd_usb_mixer_controls_badd(struct usb_mixer_interface *mixer,
struct usb_device *dev = mixer->chip->dev;
struct usb_interface_assoc_descriptor *assoc;
int badd_profile = mixer->chip->badd_profile;
- struct uac3_badd_profile *f;
+ const struct uac3_badd_profile *f;
const struct usbmix_ctl_map *map;
int p_chmask = 0, c_chmask = 0, st_chmask = 0;
int i;
@@ -3069,13 +3100,13 @@ static int snd_usb_mixer_controls_badd(struct usb_mixer_interface *mixer,
memset(&iterm, 0, sizeof(iterm));
iterm.id = UAC3_BADD_IT_ID4;
iterm.type = UAC_BIDIR_TERMINAL_HEADSET;
- build_connector_control(mixer, &iterm, true);
+ build_connector_control(mixer, map->map, &iterm, true);
/* Output Term - Insertion control */
memset(&oterm, 0, sizeof(oterm));
oterm.id = UAC3_BADD_OT_ID3;
oterm.type = UAC_BIDIR_TERMINAL_HEADSET;
- build_connector_control(mixer, &oterm, false);
+ build_connector_control(mixer, map->map, &oterm, false);
}
return 0;
@@ -3104,7 +3135,8 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
if (map->id == state.chip->usb_id) {
state.map = map->map;
state.selector_map = map->selector_map;
- mixer->ignore_ctl_error = map->ignore_ctl_error;
+ mixer->connector_map = map->connector_map;
+ mixer->ignore_ctl_error |= map->ignore_ctl_error;
break;
}
}
@@ -3147,10 +3179,11 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
if (err < 0 && err != -EINVAL)
return err;
- if (uac_v2v3_control_is_readable(le16_to_cpu(desc->bmControls),
+ if ((state.oterm.type & 0xff00) != 0x0100 &&
+ uac_v2v3_control_is_readable(le16_to_cpu(desc->bmControls),
UAC2_TE_CONNECTOR)) {
- build_connector_control(state.mixer, &state.oterm,
- false);
+ build_connector_control(state.mixer, state.map,
+ &state.oterm, false);
}
} else { /* UAC_VERSION_3 */
struct uac3_output_terminal_descriptor *desc = p;
@@ -3172,10 +3205,11 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
if (err < 0 && err != -EINVAL)
return err;
- if (uac_v2v3_control_is_readable(le32_to_cpu(desc->bmControls),
+ if ((state.oterm.type & 0xff00) != 0x0100 &&
+ uac_v2v3_control_is_readable(le32_to_cpu(desc->bmControls),
UAC3_TE_INSERTION)) {
- build_connector_control(state.mixer, &state.oterm,
- false);
+ build_connector_control(state.mixer, state.map,
+ &state.oterm, false);
}
}
}
@@ -3183,13 +3217,38 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
return 0;
}
+static int delegate_notify(struct usb_mixer_interface *mixer, int unitid,
+ u8 *control, u8 *channel)
+{
+ const struct usbmix_connector_map *map = mixer->connector_map;
+
+ if (!map)
+ return unitid;
+
+ for (; map->id; map++) {
+ if (map->id == unitid) {
+ if (control && map->control)
+ *control = map->control;
+ if (channel && map->channel)
+ *channel = map->channel;
+ return map->delegated_id;
+ }
+ }
+ return unitid;
+}
+
void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid)
{
struct usb_mixer_elem_list *list;
+ unitid = delegate_notify(mixer, unitid, NULL, NULL);
+
for_each_mixer_elem(list, mixer, unitid) {
- struct usb_mixer_elem_info *info =
- mixer_elem_list_to_info(list);
+ struct usb_mixer_elem_info *info;
+
+ if (!list->is_std_info)
+ continue;
+ info = mixer_elem_list_to_info(list);
/* invalidate cache, so the value is read from the device */
info->cached = 0;
snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
@@ -3201,7 +3260,7 @@ static void snd_usb_mixer_dump_cval(struct snd_info_buffer *buffer,
struct usb_mixer_elem_list *list)
{
struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list);
- static char *val_types[] = {"BOOLEAN", "INV_BOOLEAN",
+ static const char * const val_types[] = {"BOOLEAN", "INV_BOOLEAN",
"S8", "U8", "S16", "U16"};
snd_iprintf(buffer, " Info: id=%i, control=%i, cmask=0x%x, "
"channels=%i, type=\"%s\"\n", cval->head.id,
@@ -3256,6 +3315,8 @@ static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer,
return;
}
+ unitid = delegate_notify(mixer, unitid, &control, &channel);
+
for_each_mixer_elem(list, mixer, unitid)
count++;
@@ -3267,6 +3328,8 @@ static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer,
if (!list->kctl)
continue;
+ if (!list->is_std_info)
+ continue;
info = mixer_elem_list_to_info(list);
if (count > 1 && info->control != control)
diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
index 3d12af8bf191..f7e6fe1a96f9 100644
--- a/sound/usb/mixer.h
+++ b/sound/usb/mixer.h
@@ -4,6 +4,13 @@
#include <sound/info.h>
+struct usbmix_connector_map {
+ u8 id;
+ u8 delegated_id;
+ u8 control;
+ u8 channel;
+};
+
struct usb_mixer_interface {
struct snd_usb_audio *chip;
struct usb_host_interface *hostif;
@@ -16,6 +23,9 @@ struct usb_mixer_interface {
/* the usb audio specification version this interface complies to */
int protocol;
+ /* optional connector delegation map */
+ const struct usbmix_connector_map *connector_map;
+
/* Sound Blaster remote control stuff */
const struct rc_config *rc_cfg;
u32 rc_code;
@@ -49,6 +59,7 @@ struct usb_mixer_elem_list {
struct usb_mixer_elem_list *next_id_elem; /* list of controls with same id */
struct snd_kcontrol *kctl;
unsigned int id;
+ bool is_std_info;
usb_mixer_elem_dump_func_t dump;
usb_mixer_elem_resume_func_t resume;
};
@@ -86,8 +97,12 @@ void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid);
int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval,
int request, int validx, int value_set);
-int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list,
- struct snd_kcontrol *kctl);
+int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list,
+ struct snd_kcontrol *kctl,
+ bool is_std_info);
+
+#define snd_usb_mixer_add_control(list, kctl) \
+ snd_usb_mixer_add_list(list, kctl, true)
void snd_usb_mixer_elem_init_std(struct usb_mixer_elem_list *list,
struct usb_mixer_interface *mixer,
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index 71069e110897..1d4e535e2c11 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -28,7 +28,7 @@ struct usbmix_name_map {
int id;
const char *name;
int control;
- struct usbmix_dB_map *dB;
+ const struct usbmix_dB_map *dB;
};
struct usbmix_selector_map {
@@ -41,6 +41,7 @@ struct usbmix_ctl_map {
u32 id;
const struct usbmix_name_map *map;
const struct usbmix_selector_map *selector_map;
+ const struct usbmix_connector_map *connector_map;
int ignore_ctl_error;
};
@@ -66,7 +67,7 @@ Mic-IN[9] --+->FU[10]----------------------------+ |
++--+->SU[11]-->FU[12] --------------------------------------------------------------------------------------> USB_OUT[13]
*/
-static struct usbmix_name_map extigy_map[] = {
+static const struct usbmix_name_map extigy_map[] = {
/* 1: IT pcm */
{ 2, "PCM Playback" }, /* FU */
/* 3: IT pcm */
@@ -107,12 +108,12 @@ static struct usbmix_name_map extigy_map[] = {
* e.g. no Master and fake PCM volume
* Pavel Mihaylov <bin@bash.info>
*/
-static struct usbmix_dB_map mp3plus_dB_1 = {.min = -4781, .max = 0};
+static const struct usbmix_dB_map mp3plus_dB_1 = {.min = -4781, .max = 0};
/* just guess */
-static struct usbmix_dB_map mp3plus_dB_2 = {.min = -1781, .max = 618};
+static const struct usbmix_dB_map mp3plus_dB_2 = {.min = -1781, .max = 618};
/* just guess */
-static struct usbmix_name_map mp3plus_map[] = {
+static const struct usbmix_name_map mp3plus_map[] = {
/* 1: IT pcm */
/* 2: IT mic */
/* 3: IT line */
@@ -153,7 +154,7 @@ Lin_IN[7]-+--->FU[8]---+ +->EU[23]->FU[28]------------->Spk_OUT[19]
| ^
+->FU[13]--------------------------------------+
*/
-static struct usbmix_name_map audigy2nx_map[] = {
+static const struct usbmix_name_map audigy2nx_map[] = {
/* 1: IT pcm playback */
/* 4: IT digital in */
{ 6, "Digital In Playback" }, /* FU */
@@ -181,12 +182,12 @@ static struct usbmix_name_map audigy2nx_map[] = {
{ 0 } /* terminator */
};
-static struct usbmix_name_map mbox1_map[] = {
+static const struct usbmix_name_map mbox1_map[] = {
{ 1, "Clock" },
{ 0 } /* terminator */
};
-static struct usbmix_selector_map c400_selectors[] = {
+static const struct usbmix_selector_map c400_selectors[] = {
{
.id = 0x80,
.count = 2,
@@ -195,7 +196,7 @@ static struct usbmix_selector_map c400_selectors[] = {
{ 0 } /* terminator */
};
-static struct usbmix_selector_map audigy2nx_selectors[] = {
+static const struct usbmix_selector_map audigy2nx_selectors[] = {
{
.id = 14, /* Capture Source */
.count = 3,
@@ -215,21 +216,21 @@ static struct usbmix_selector_map audigy2nx_selectors[] = {
};
/* Creative SoundBlaster Live! 24-bit External */
-static struct usbmix_name_map live24ext_map[] = {
+static const struct usbmix_name_map live24ext_map[] = {
/* 2: PCM Playback Volume */
{ 5, "Mic Capture" }, /* FU, default PCM Capture Volume */
{ 0 } /* terminator */
};
/* LineX FM Transmitter entry - needed to bypass controls bug */
-static struct usbmix_name_map linex_map[] = {
+static const struct usbmix_name_map linex_map[] = {
/* 1: IT pcm */
/* 2: OT Speaker */
{ 3, "Master" }, /* FU: master volume - left / right / mute */
{ 0 } /* terminator */
};
-static struct usbmix_name_map maya44_map[] = {
+static const struct usbmix_name_map maya44_map[] = {
/* 1: IT line */
{ 2, "Line Playback" }, /* FU */
/* 3: IT line */
@@ -252,7 +253,7 @@ static struct usbmix_name_map maya44_map[] = {
* so this map removes all unwanted sliders from alsamixer
*/
-static struct usbmix_name_map justlink_map[] = {
+static const struct usbmix_name_map justlink_map[] = {
/* 1: IT pcm playback */
/* 2: Not present */
{ 3, NULL}, /* IT mic (No mic input on device) */
@@ -269,7 +270,7 @@ static struct usbmix_name_map justlink_map[] = {
};
/* TerraTec Aureon 5.1 MkII USB */
-static struct usbmix_name_map aureon_51_2_map[] = {
+static const struct usbmix_name_map aureon_51_2_map[] = {
/* 1: IT USB */
/* 2: IT Mic */
/* 3: IT Line */
@@ -288,7 +289,7 @@ static struct usbmix_name_map aureon_51_2_map[] = {
{} /* terminator */
};
-static struct usbmix_name_map scratch_live_map[] = {
+static const struct usbmix_name_map scratch_live_map[] = {
/* 1: IT Line 1 (USB streaming) */
/* 2: OT Line 1 (Speaker) */
/* 3: IT Line 1 (Line connector) */
@@ -304,7 +305,7 @@ static struct usbmix_name_map scratch_live_map[] = {
{ 0 } /* terminator */
};
-static struct usbmix_name_map ebox44_map[] = {
+static const struct usbmix_name_map ebox44_map[] = {
{ 4, NULL }, /* FU */
{ 6, NULL }, /* MU */
{ 7, NULL }, /* FU */
@@ -319,7 +320,7 @@ static struct usbmix_name_map ebox44_map[] = {
* FIXME: or mp3plus_map should use "Capture Source" too,
* so this maps can be merget
*/
-static struct usbmix_name_map hercules_usb51_map[] = {
+static const struct usbmix_name_map hercules_usb51_map[] = {
{ 8, "Capture Source" }, /* SU, default "PCM Capture Source" */
{ 9, "Master Playback" }, /* FU, default "Speaker Playback" */
{ 10, "Mic Boost", 7 }, /* FU, default "Auto Gain Input" */
@@ -330,7 +331,7 @@ static struct usbmix_name_map hercules_usb51_map[] = {
};
/* Plantronics Gamecom 780 has a broken volume control, better to disable it */
-static struct usbmix_name_map gamecom780_map[] = {
+static const struct usbmix_name_map gamecom780_map[] = {
{ 9, NULL }, /* FU, speaker out */
{}
};
@@ -344,12 +345,19 @@ static const struct usbmix_name_map scms_usb3318_map[] = {
};
/* Bose companion 5, the dB conversion factor is 16 instead of 256 */
-static struct usbmix_dB_map bose_companion5_dB = {-5006, -6};
-static struct usbmix_name_map bose_companion5_map[] = {
+static const struct usbmix_dB_map bose_companion5_dB = {-5006, -6};
+static const struct usbmix_name_map bose_companion5_map[] = {
{ 3, NULL, .dB = &bose_companion5_dB },
{ 0 } /* terminator */
};
+/* Sennheiser Communications Headset [PC 8], the dB value is reported as -6 negative maximum */
+static const struct usbmix_dB_map sennheiser_pc8_dB = {-9500, 0};
+static const struct usbmix_name_map sennheiser_pc8_map[] = {
+ { 9, NULL, .dB = &sennheiser_pc8_dB },
+ { 0 } /* terminator */
+};
+
/*
* Dell usb dock with ALC4020 codec had a firmware problem where it got
* screwed up when zero volume is passed; just skip it as a workaround
@@ -363,11 +371,63 @@ static const struct usbmix_name_map dell_alc4020_map[] = {
{ 0 }
};
+/* Some mobos shipped with a dummy HD-audio show the invalid GET_MIN/GET_MAX
+ * response for Input Gain Pad (id=19, control=12) and the connector status
+ * for SPDIF terminal (id=18). Skip them.
+ */
+static const struct usbmix_name_map asus_rog_map[] = {
+ { 18, NULL }, /* OT, connector control */
+ { 19, NULL, 12 }, /* FU, Input Gain Pad */
+ {}
+};
+
+/* TRX40 mobos with Realtek ALC1220-VB */
+static const struct usbmix_name_map trx40_mobo_map[] = {
+ { 18, NULL }, /* OT, IEC958 - broken response, disabled */
+ { 19, NULL, 12 }, /* FU, Input Gain Pad - broken response, disabled */
+ { 16, "Speaker" }, /* OT */
+ { 22, "Speaker Playback" }, /* FU */
+ { 7, "Line" }, /* IT */
+ { 19, "Line Capture" }, /* FU */
+ { 17, "Front Headphone" }, /* OT */
+ { 23, "Front Headphone Playback" }, /* FU */
+ { 8, "Mic" }, /* IT */
+ { 20, "Mic Capture" }, /* FU */
+ { 9, "Front Mic" }, /* IT */
+ { 21, "Front Mic Capture" }, /* FU */
+ { 24, "IEC958 Playback" }, /* FU */
+ {}
+};
+
+static const struct usbmix_connector_map trx40_mobo_connector_map[] = {
+ { 10, 16 }, /* (Back) Speaker */
+ { 11, 17 }, /* Front Headphone */
+ { 13, 7 }, /* Line */
+ { 14, 8 }, /* Mic */
+ { 15, 9 }, /* Front Mic */
+ {}
+};
+
+/* Rear panel + front mic on Gigabyte TRX40 Aorus Master with ALC1220-VB */
+static const struct usbmix_name_map aorus_master_alc1220vb_map[] = {
+ { 17, NULL }, /* OT, IEC958?, disabled */
+ { 19, NULL, 12 }, /* FU, Input Gain Pad - broken response, disabled */
+ { 16, "Line Out" }, /* OT */
+ { 22, "Line Out Playback" }, /* FU */
+ { 7, "Line" }, /* IT */
+ { 19, "Line Capture" }, /* FU */
+ { 8, "Mic" }, /* IT */
+ { 20, "Mic Capture" }, /* FU */
+ { 9, "Front Mic" }, /* IT */
+ { 21, "Front Mic Capture" }, /* FU */
+ {}
+};
+
/*
* Control map entries
*/
-static struct usbmix_ctl_map usbmix_ctl_maps[] = {
+static const struct usbmix_ctl_map usbmix_ctl_maps[] = {
{
.id = USB_ID(0x041e, 0x3000),
.map = extigy_map,
@@ -482,6 +542,38 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
.id = USB_ID(0x05a7, 0x1020),
.map = bose_companion5_map,
},
+ { /* Gigabyte TRX40 Aorus Master (rear panel + front mic) */
+ .id = USB_ID(0x0414, 0xa001),
+ .map = aorus_master_alc1220vb_map,
+ },
+ { /* Gigabyte TRX40 Aorus Pro WiFi */
+ .id = USB_ID(0x0414, 0xa002),
+ .map = trx40_mobo_map,
+ .connector_map = trx40_mobo_connector_map,
+ },
+ { /* ASUS ROG Zenith II */
+ .id = USB_ID(0x0b05, 0x1916),
+ .map = asus_rog_map,
+ },
+ { /* ASUS ROG Strix */
+ .id = USB_ID(0x0b05, 0x1917),
+ .map = asus_rog_map,
+ },
+ { /* MSI TRX40 Creator */
+ .id = USB_ID(0x0db0, 0x0d64),
+ .map = trx40_mobo_map,
+ .connector_map = trx40_mobo_connector_map,
+ },
+ { /* MSI TRX40 */
+ .id = USB_ID(0x0db0, 0x543d),
+ .map = trx40_mobo_map,
+ .connector_map = trx40_mobo_connector_map,
+ },
+ { /* Asrock TRX40 Creator */
+ .id = USB_ID(0x26ce, 0x0a01),
+ .map = trx40_mobo_map,
+ .connector_map = trx40_mobo_connector_map,
+ },
{ 0 } /* terminator */
};
@@ -489,37 +581,37 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
* Control map entries for UAC3 BADD profiles
*/
-static struct usbmix_name_map uac3_badd_generic_io_map[] = {
+static const struct usbmix_name_map uac3_badd_generic_io_map[] = {
{ UAC3_BADD_FU_ID2, "Generic Out Playback" },
{ UAC3_BADD_FU_ID5, "Generic In Capture" },
{ 0 } /* terminator */
};
-static struct usbmix_name_map uac3_badd_headphone_map[] = {
+static const struct usbmix_name_map uac3_badd_headphone_map[] = {
{ UAC3_BADD_FU_ID2, "Headphone Playback" },
{ 0 } /* terminator */
};
-static struct usbmix_name_map uac3_badd_speaker_map[] = {
+static const struct usbmix_name_map uac3_badd_speaker_map[] = {
{ UAC3_BADD_FU_ID2, "Speaker Playback" },
{ 0 } /* terminator */
};
-static struct usbmix_name_map uac3_badd_microphone_map[] = {
+static const struct usbmix_name_map uac3_badd_microphone_map[] = {
{ UAC3_BADD_FU_ID5, "Mic Capture" },
{ 0 } /* terminator */
};
/* Covers also 'headset adapter' profile */
-static struct usbmix_name_map uac3_badd_headset_map[] = {
+static const struct usbmix_name_map uac3_badd_headset_map[] = {
{ UAC3_BADD_FU_ID2, "Headset Playback" },
{ UAC3_BADD_FU_ID5, "Headset Capture" },
{ UAC3_BADD_FU_ID7, "Sidetone Mixing" },
{ 0 } /* terminator */
};
-static struct usbmix_name_map uac3_badd_speakerphone_map[] = {
+static const struct usbmix_name_map uac3_badd_speakerphone_map[] = {
{ UAC3_BADD_FU_ID2, "Speaker Playback" },
{ UAC3_BADD_FU_ID5, "Mic Capture" },
{ 0 } /* terminator */
};
-static struct usbmix_ctl_map uac3_badd_usbmix_ctl_maps[] = {
+static const struct usbmix_ctl_map uac3_badd_usbmix_ctl_maps[] = {
{
.id = UAC3_FUNCTION_SUBCLASS_GENERIC_IO,
.map = uac3_badd_generic_io_map,
@@ -548,5 +640,10 @@ static struct usbmix_ctl_map uac3_badd_usbmix_ctl_maps[] = {
.id = UAC3_FUNCTION_SUBCLASS_SPEAKERPHONE,
.map = uac3_badd_speakerphone_map,
},
+ {
+ /* Sennheiser Communications Headset [PC 8] */
+ .id = USB_ID(0x1395, 0x0025),
+ .map = sennheiser_pc8_map,
+ },
{ 0 } /* terminator */
};
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 10c6971cf477..169679419b39 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -130,7 +130,7 @@ static int snd_create_std_mono_ctl(struct usb_mixer_interface *mixer,
* Create a set of standard UAC controls from a table
*/
static int snd_create_std_mono_table(struct usb_mixer_interface *mixer,
- struct std_mono_table *t)
+ const struct std_mono_table *t)
{
int err;
@@ -168,7 +168,8 @@ static int add_single_ctl_with_resume(struct usb_mixer_interface *mixer,
return -ENOMEM;
}
kctl->private_free = snd_usb_mixer_elem_free;
- return snd_usb_mixer_add_control(list, kctl);
+ /* don't use snd_usb_mixer_add_control() here, this is a special list element */
+ return snd_usb_mixer_add_list(list, kctl, false);
}
/*
@@ -194,6 +195,7 @@ static const struct rc_config {
{ USB_ID(0x041e, 0x3042), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 */
{ USB_ID(0x041e, 0x30df), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */
{ USB_ID(0x041e, 0x3237), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */
+ { USB_ID(0x041e, 0x3263), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */
{ USB_ID(0x041e, 0x3048), 2, 2, 6, 6, 2, 0x6e91 }, /* Toshiba SB0500 */
};
@@ -1397,7 +1399,7 @@ static int snd_c400_create_mixer(struct usb_mixer_interface *mixer)
* are valid they presents mono controls as L and R channels of
* stereo. So we provide a good mixer here.
*/
-static struct std_mono_table ebox44_table[] = {
+static const struct std_mono_table ebox44_table[] = {
{
.unitid = 4,
.control = 1,
@@ -1519,11 +1521,15 @@ static int snd_microii_spdif_default_get(struct snd_kcontrol *kcontrol,
/* use known values for that card: interface#1 altsetting#1 */
iface = usb_ifnum_to_if(chip->dev, 1);
- if (!iface || iface->num_altsetting < 2)
- return -EINVAL;
+ if (!iface || iface->num_altsetting < 2) {
+ err = -EINVAL;
+ goto end;
+ }
alts = &iface->altsetting[1];
- if (get_iface_desc(alts)->bNumEndpoints < 1)
- return -EINVAL;
+ if (get_iface_desc(alts)->bNumEndpoints < 1) {
+ err = -EINVAL;
+ goto end;
+ }
ep = get_endpoint(alts, 0)->bEndpointAddress;
err = snd_usb_ctl_msg(chip->dev,
@@ -1702,7 +1708,7 @@ static struct snd_kcontrol_new snd_microii_mixer_spdif[] = {
static int snd_microii_controls_create(struct usb_mixer_interface *mixer)
{
int err, i;
- static usb_mixer_elem_resume_func_t resume_funcs[] = {
+ const static usb_mixer_elem_resume_func_t resume_funcs[] = {
snd_microii_spdif_default_update,
NULL,
snd_microii_spdif_switch_update
diff --git a/sound/usb/mixer_scarlett.c b/sound/usb/mixer_scarlett.c
index 4aeb9488a0c9..2e93c0b2e8e3 100644
--- a/sound/usb/mixer_scarlett.c
+++ b/sound/usb/mixer_scarlett.c
@@ -633,7 +633,7 @@ static int add_output_ctls(struct usb_mixer_interface *mixer,
/********************** device-specific config *************************/
/* untested... */
-static struct scarlett_device_info s6i6_info = {
+static const struct scarlett_device_info s6i6_info = {
.matrix_in = 18,
.matrix_out = 8,
.input_len = 6,
@@ -675,7 +675,7 @@ static struct scarlett_device_info s6i6_info = {
};
/* untested... */
-static struct scarlett_device_info s8i6_info = {
+static const struct scarlett_device_info s8i6_info = {
.matrix_in = 18,
.matrix_out = 6,
.input_len = 8,
@@ -714,7 +714,7 @@ static struct scarlett_device_info s8i6_info = {
}
};
-static struct scarlett_device_info s18i6_info = {
+static const struct scarlett_device_info s18i6_info = {
.matrix_in = 18,
.matrix_out = 6,
.input_len = 18,
@@ -751,7 +751,7 @@ static struct scarlett_device_info s18i6_info = {
}
};
-static struct scarlett_device_info s18i8_info = {
+static const struct scarlett_device_info s18i8_info = {
.matrix_in = 18,
.matrix_out = 8,
.input_len = 18,
@@ -793,7 +793,7 @@ static struct scarlett_device_info s18i8_info = {
}
};
-static struct scarlett_device_info s18i20_info = {
+static const struct scarlett_device_info s18i20_info = {
.matrix_in = 18,
.matrix_out = 8,
.input_len = 18,
@@ -843,7 +843,7 @@ static struct scarlett_device_info s18i20_info = {
static int scarlett_controls_create_generic(struct usb_mixer_interface *mixer,
- struct scarlett_device_info *info)
+ const struct scarlett_device_info *info)
{
int i, err;
char mx[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
@@ -906,7 +906,7 @@ int snd_scarlett_controls_create(struct usb_mixer_interface *mixer)
{
int err, i, o;
char mx[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
- struct scarlett_device_info *info;
+ const struct scarlett_device_info *info;
struct usb_mixer_elem_info *elem;
static char sample_rate_buffer[4] = { '\x80', '\xbb', '\x00', '\x00' };
diff --git a/sound/usb/mixer_us16x08.c b/sound/usb/mixer_us16x08.c
index 26ed23b18b77..7db3032e723a 100644
--- a/sound/usb/mixer_us16x08.c
+++ b/sound/usb/mixer_us16x08.c
@@ -617,7 +617,7 @@ static int snd_us16x08_eq_put(struct snd_kcontrol *kcontrol,
static int snd_us16x08_meter_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- uinfo->count = 1;
+ uinfo->count = 34;
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->value.integer.max = 0x7FFF;
uinfo->value.integer.min = 0;
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 8b91be394407..4c9ab611aa3e 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -334,6 +334,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
switch (subs->stream->chip->usb_id) {
case USB_ID(0x0763, 0x2030): /* M-Audio Fast Track C400 */
case USB_ID(0x0763, 0x2031): /* M-Audio Fast Track C600 */
+ case USB_ID(0x22f0, 0x0006): /* Allen&Heath Qu-16 */
ep = 0x81;
ifnum = 3;
goto add_sync_ep_from_ifnum;
@@ -343,6 +344,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
ifnum = 2;
goto add_sync_ep_from_ifnum;
case USB_ID(0x2466, 0x8003): /* Fractal Audio Axe-Fx II */
+ case USB_ID(0x0499, 0x172a): /* Yamaha MODX */
ep = 0x86;
ifnum = 2;
goto add_sync_ep_from_ifnum;
@@ -350,6 +352,10 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
ep = 0x81;
ifnum = 2;
goto add_sync_ep_from_ifnum;
+ case USB_ID(0x1686, 0xf029): /* Zoom UAC-2 */
+ ep = 0x82;
+ ifnum = 2;
+ goto add_sync_ep_from_ifnum;
case USB_ID(0x1397, 0x0001): /* Behringer UFX1604 */
case USB_ID(0x1397, 0x0002): /* Behringer UFX1204 */
ep = 0x81;
@@ -1387,6 +1393,12 @@ static void retire_capture_urb(struct snd_usb_substream *subs,
// continue;
}
bytes = urb->iso_frame_desc[i].actual_length;
+ if (subs->stream_offset_adj > 0) {
+ unsigned int adj = min(subs->stream_offset_adj, bytes);
+ cp += adj;
+ bytes -= adj;
+ subs->stream_offset_adj -= adj;
+ }
frames = bytes / stride;
if (!subs->txfr_quirk)
bytes = frames * stride;
@@ -1841,7 +1853,7 @@ void snd_usb_preallocate_buffer(struct snd_usb_substream *subs)
{
struct snd_pcm *pcm = subs->stream->pcm;
struct snd_pcm_substream *s = pcm->streams[subs->direction].substream;
- struct device *dev = subs->dev->bus->controller;
+ struct device *dev = subs->dev->bus->sysdev;
if (!snd_usb_use_vmalloc)
snd_pcm_lib_preallocate_pages(s, SNDRV_DMA_TYPE_DEV_SG,
diff --git a/sound/usb/proc.c b/sound/usb/proc.c
index 0ac89e294d31..28192b0b2548 100644
--- a/sound/usb/proc.c
+++ b/sound/usb/proc.c
@@ -74,7 +74,7 @@ void snd_usb_audio_create_proc(struct snd_usb_audio *chip)
static void proc_dump_substream_formats(struct snd_usb_substream *subs, struct snd_info_buffer *buffer)
{
struct audioformat *fp;
- static char *sync_types[4] = {
+ static const char * const sync_types[4] = {
"NONE", "ASYNC", "ADAPTIVE", "SYNC"
};
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 90d4f61cc230..81304c2c1124 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -39,6 +39,26 @@
.idProduct = prod, \
.bInterfaceClass = USB_CLASS_VENDOR_SPEC
+/* HP Thunderbolt Dock Audio Headset */
+{
+ USB_DEVICE(0x03f0, 0x0269),
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .vendor_name = "HP",
+ .product_name = "Thunderbolt Dock Audio Headset",
+ .profile_name = "HP-Thunderbolt-Dock-Audio-Headset",
+ .ifnum = QUIRK_NO_INTERFACE
+ }
+},
+/* HP Thunderbolt Dock Audio Module */
+{
+ USB_DEVICE(0x03f0, 0x0567),
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .vendor_name = "HP",
+ .product_name = "Thunderbolt Dock Audio Module",
+ .profile_name = "HP-Thunderbolt-Dock-Audio-Module",
+ .ifnum = QUIRK_NO_INTERFACE
+ }
+},
/* FTDI devices */
{
USB_DEVICE(0x0403, 0xb8d8),
@@ -2479,6 +2499,16 @@ YAMAHA_DEVICE(0x7010, "UB99"),
}
},
+{
+ USB_DEVICE_VENDOR_SPEC(0x0944, 0x0204),
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ .vendor_name = "KORG, Inc.",
+ /* .product_name = "ToneLab EX", */
+ .ifnum = 3,
+ .type = QUIRK_MIDI_STANDARD_INTERFACE,
+ }
+},
+
/* AKAI devices */
{
USB_DEVICE(0x09e8, 0x0062),
@@ -3399,5 +3429,159 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
.type = QUIRK_SETUP_FMT_AFTER_RESUME
}
},
+{
+ /*
+ * PIONEER DJ DDJ-RB
+ * PCM is 4 channels out, 2 dummy channels in @ 44.1 fixed
+ * The feedback for the output is the dummy input.
+ */
+ USB_DEVICE_VENDOR_SPEC(0x2b73, 0x000e),
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = (const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 4,
+ .iface = 0,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .endpoint = 0x01,
+ .ep_attr = USB_ENDPOINT_XFER_ISOC|
+ USB_ENDPOINT_SYNC_ASYNC,
+ .rates = SNDRV_PCM_RATE_44100,
+ .rate_min = 44100,
+ .rate_max = 44100,
+ .nr_rates = 1,
+ .rate_table = (unsigned int[]) { 44100 }
+ }
+ },
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 2,
+ .iface = 0,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .endpoint = 0x82,
+ .ep_attr = USB_ENDPOINT_XFER_ISOC|
+ USB_ENDPOINT_SYNC_ASYNC|
+ USB_ENDPOINT_USAGE_IMPLICIT_FB,
+ .rates = SNDRV_PCM_RATE_44100,
+ .rate_min = 44100,
+ .rate_max = 44100,
+ .nr_rates = 1,
+ .rate_table = (unsigned int[]) { 44100 }
+ }
+ },
+ {
+ .ifnum = -1
+ }
+ }
+ }
+},
+
+#define ALC1220_VB_DESKTOP(vend, prod) { \
+ USB_DEVICE(vend, prod), \
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { \
+ .vendor_name = "Realtek", \
+ .product_name = "ALC1220-VB-DT", \
+ .profile_name = "Realtek-ALC1220-VB-Desktop", \
+ .ifnum = QUIRK_NO_INTERFACE \
+ } \
+}
+ALC1220_VB_DESKTOP(0x0414, 0xa002), /* Gigabyte TRX40 Aorus Pro WiFi */
+ALC1220_VB_DESKTOP(0x0db0, 0x0d64), /* MSI TRX40 Creator */
+ALC1220_VB_DESKTOP(0x0db0, 0x543d), /* MSI TRX40 */
+ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* Asrock TRX40 Creator */
+#undef ALC1220_VB_DESKTOP
+
+/* Two entries for Gigabyte TRX40 Aorus Master:
+ * TRX40 Aorus Master has two USB-audio devices, one for the front headphone
+ * with ESS SABRE9218 DAC chip, while another for the rest I/O (the rear
+ * panel and the front mic) with Realtek ALC1220-VB.
+ * Here we provide two distinct names for making UCM profiles easier.
+ */
+{
+ USB_DEVICE(0x0414, 0xa000),
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ .vendor_name = "Gigabyte",
+ .product_name = "Aorus Master Front Headphone",
+ .profile_name = "Gigabyte-Aorus-Master-Front-Headphone",
+ .ifnum = QUIRK_NO_INTERFACE
+ }
+},
+{
+ USB_DEVICE(0x0414, 0xa001),
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ .vendor_name = "Gigabyte",
+ .product_name = "Aorus Master Main Audio",
+ .profile_name = "Gigabyte-Aorus-Master-Main-Audio",
+ .ifnum = QUIRK_NO_INTERFACE
+ }
+},
+
+/*
+ * MacroSilicon MS2109 based HDMI capture cards
+ *
+ * These claim 96kHz 1ch in the descriptors, but are actually 48kHz 2ch.
+ * They also need QUIRK_AUDIO_ALIGN_TRANSFER, which makes one wonder if
+ * they pretend to be 96kHz mono as a workaround for stereo being broken
+ * by that...
+ *
+ * They also have an issue with initial stream alignment that causes the
+ * channels to be swapped and out of phase, which is dealt with in quirks.c.
+ */
+{
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+ USB_DEVICE_ID_MATCH_INT_CLASS |
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+ .idVendor = 0x534d,
+ .idProduct = 0x2109,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .vendor_name = "MacroSilicon",
+ .product_name = "MS2109",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = &(const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 2,
+ .type = QUIRK_AUDIO_ALIGN_TRANSFER,
+ },
+ {
+ .ifnum = 2,
+ .type = QUIRK_AUDIO_STANDARD_MIXER,
+ },
+ {
+ .ifnum = 3,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels = 2,
+ .iface = 3,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .attributes = 0,
+ .endpoint = 0x82,
+ .ep_attr = USB_ENDPOINT_XFER_ISOC |
+ USB_ENDPOINT_SYNC_ASYNC,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ }
+ },
+ {
+ .ifnum = -1
+ }
+ }
+ }
+},
#undef USB_DEVICE_VENDOR_SPEC
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 5bbfd7577b33..b5f2b18b8b42 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -66,8 +66,12 @@ static int create_composite_quirk(struct snd_usb_audio *chip,
if (!iface)
continue;
if (quirk->ifnum != probed_ifnum &&
- !usb_interface_claimed(iface))
- usb_driver_claim_interface(driver, iface, (void *)-1L);
+ !usb_interface_claimed(iface)) {
+ err = usb_driver_claim_interface(driver, iface,
+ USB_AUDIO_IFACE_UNUSED);
+ if (err < 0)
+ return err;
+ }
}
return 0;
@@ -401,8 +405,12 @@ static int create_autodetect_quirks(struct snd_usb_audio *chip,
continue;
err = create_autodetect_quirk(chip, iface, driver);
- if (err >= 0)
- usb_driver_claim_interface(driver, iface, (void *)-1L);
+ if (err >= 0) {
+ err = usb_driver_claim_interface(driver, iface,
+ USB_AUDIO_IFACE_UNUSED);
+ if (err < 0)
+ return err;
+ }
}
return 0;
@@ -1166,6 +1174,9 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
case USB_ID(0x041e, 0x3f19): /* E-Mu 0204 USB */
set_format_emu_quirk(subs, fmt);
break;
+ case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */
+ subs->stream_offset_adj = 2;
+ break;
}
}
@@ -1183,6 +1194,8 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
case USB_ID(0x2912, 0x30c8): /* Audioengine D1 */
+ case USB_ID(0x413c, 0xa506): /* Dell AE515 sound bar */
+ case USB_ID(0x046d, 0x084c): /* Logitech ConferenceCam Connect */
return true;
}
@@ -1203,6 +1216,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
static bool is_itf_usb_dsd_dac(unsigned int id)
{
switch (id) {
+ case USB_ID(0x154e, 0x1002): /* Denon DCD-1500RE */
case USB_ID(0x154e, 0x1003): /* Denon DA-300USB */
case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */
case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */
@@ -1334,15 +1348,33 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
&& (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
msleep(20);
- /* Zoom R16/24, Logitech H650e, Jabra 550a needs a tiny delay here,
- * otherwise requests like get/set frequency return as failed despite
- * actually succeeding.
+ /*
+ * Plantronics headsets (C320, C320-M, etc) need a delay to avoid
+ * random microhpone failures.
+ */
+ if (USB_ID_VENDOR(chip->usb_id) == 0x047f &&
+ (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+ msleep(20);
+
+ /* Zoom R16/24, many Logitech(at least H650e/H570e/BCC950),
+ * Jabra 550a, Kingston HyperX needs a tiny delay here,
+ * otherwise requests like get/set frequency return
+ * as failed despite actually succeeding.
*/
if ((chip->usb_id == USB_ID(0x1686, 0x00dd) ||
- chip->usb_id == USB_ID(0x046d, 0x0a46) ||
- chip->usb_id == USB_ID(0x0b0e, 0x0349)) &&
+ USB_ID_VENDOR(chip->usb_id) == 0x046d || /* Logitech */
+ chip->usb_id == USB_ID(0x0b0e, 0x0349) ||
+ chip->usb_id == USB_ID(0x0951, 0x16ad)) &&
(requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
usleep_range(1000, 2000);
+
+ /*
+ * Samsung USBC Headset (AKG) need a tiny delay after each
+ * class compliant request. (Model number: AAM625R or AAM627R)
+ */
+ if (chip->usb_id == USB_ID(0x04e8, 0xa051) &&
+ (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+ usleep_range(5000, 6000);
}
/*
@@ -1385,7 +1417,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */
case USB_ID(0x10cb, 0x0103): /* The Bit Opus #3; with fp->dsd_raw */
- case USB_ID(0x16b0, 0x06b2): /* NuPrime DAC-10 */
+ case USB_ID(0x16d0, 0x06b2): /* NuPrime DAC-10 */
case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
case USB_ID(0x16d0, 0x0733): /* Furutech ADL Stratos */
case USB_ID(0x16d0, 0x09db): /* NuPrime Audio DAC-9 */
@@ -1451,7 +1483,9 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
case 0x25ce: /* Mytek devices */
case 0x278b: /* Rotel? */
case 0x292b: /* Gustard/Ess based devices */
+ case 0x2972: /* FiiO devices */
case 0x2ab6: /* T+A devices */
+ case 0x3353: /* Khadas devices */
case 0x3842: /* EVGA */
case 0xc502: /* HiBy devices */
if (fp->dsd_raw)
@@ -1498,3 +1532,39 @@ void snd_usb_audioformat_attributes_quirk(struct snd_usb_audio *chip,
break;
}
}
+
+/*
+ * registration quirk:
+ * the registration is skipped if a device matches with the given ID,
+ * unless the interface reaches to the defined one. This is for delaying
+ * the registration until the last known interface, so that the card and
+ * devices appear at the same time.
+ */
+
+struct registration_quirk {
+ unsigned int usb_id; /* composed via USB_ID() */
+ unsigned int interface; /* the interface to trigger register */
+};
+
+#define REG_QUIRK_ENTRY(vendor, product, iface) \
+ { .usb_id = USB_ID(vendor, product), .interface = (iface) }
+
+static const struct registration_quirk registration_quirks[] = {
+ REG_QUIRK_ENTRY(0x0951, 0x16d8, 2), /* Kingston HyperX AMP */
+ REG_QUIRK_ENTRY(0x0951, 0x16ed, 2), /* Kingston HyperX Cloud Alpha S */
+ REG_QUIRK_ENTRY(0x0951, 0x16ea, 2), /* Kingston HyperX Cloud Flight S */
+ { 0 } /* terminator */
+};
+
+/* return true if skipping registration */
+bool snd_usb_registration_quirk(struct snd_usb_audio *chip, int iface)
+{
+ const struct registration_quirk *q;
+
+ for (q = registration_quirks; q->usb_id; q++)
+ if (chip->usb_id == q->usb_id)
+ return iface != q->interface;
+
+ /* Register as normal */
+ return false;
+}
diff --git a/sound/usb/quirks.h b/sound/usb/quirks.h
index a80e0ddd0736..1efa6c968532 100644
--- a/sound/usb/quirks.h
+++ b/sound/usb/quirks.h
@@ -46,4 +46,6 @@ void snd_usb_audioformat_attributes_quirk(struct snd_usb_audio *chip,
struct audioformat *fp,
int stream);
+bool snd_usb_registration_quirk(struct snd_usb_audio *chip, int iface);
+
#endif /* __USBAUDIO_QUIRKS_H */
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index 9d020bd0de17..9a950aaf5e35 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -99,6 +99,7 @@ static void snd_usb_init_substream(struct snd_usb_stream *as,
subs->tx_length_quirk = as->chip->tx_length_quirk;
subs->speed = snd_usb_get_speed(subs->dev);
subs->pkt_offset_adj = 0;
+ subs->stream_offset_adj = 0;
snd_usb_set_pcm_ops(as->pcm, stream);
@@ -197,16 +198,16 @@ static int usb_chmap_ctl_get(struct snd_kcontrol *kcontrol,
struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
struct snd_usb_substream *subs = info->private_data;
struct snd_pcm_chmap_elem *chmap = NULL;
- int i;
+ int i = 0;
- memset(ucontrol->value.integer.value, 0,
- sizeof(ucontrol->value.integer.value));
if (subs->cur_audiofmt)
chmap = subs->cur_audiofmt->chmap;
if (chmap) {
for (i = 0; i < chmap->channels; i++)
ucontrol->value.integer.value[i] = chmap->map[i];
}
+ for (; i < subs->channels_max; i++)
+ ucontrol->value.integer.value[i] = 0;
return 0;
}
@@ -244,7 +245,7 @@ static int add_chmap(struct snd_pcm *pcm, int stream,
static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits,
int protocol)
{
- static unsigned int uac1_maps[] = {
+ static const unsigned int uac1_maps[] = {
SNDRV_CHMAP_FL, /* left front */
SNDRV_CHMAP_FR, /* right front */
SNDRV_CHMAP_FC, /* center front */
@@ -259,7 +260,7 @@ static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits,
SNDRV_CHMAP_TC, /* top */
0 /* terminator */
};
- static unsigned int uac2_maps[] = {
+ static const unsigned int uac2_maps[] = {
SNDRV_CHMAP_FL, /* front left */
SNDRV_CHMAP_FR, /* front right */
SNDRV_CHMAP_FC, /* front center */
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index 4a0a5a071d51..0c7ea78317fc 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -37,7 +37,7 @@ struct snd_usb_audio {
struct usb_interface *pm_intf;
u32 usb_id;
struct mutex mutex;
- unsigned int autosuspended:1;
+ unsigned int system_suspend;
atomic_t active;
atomic_t shutdown;
atomic_t usage_count;
@@ -68,6 +68,8 @@ struct snd_usb_audio {
struct usb_host_interface *ctrl_intf; /* the audio control interface */
};
+#define USB_AUDIO_IFACE_UNUSED ((void *)-1L)
+
#define usb_audio_err(chip, fmt, args...) \
dev_err(&(chip)->dev->dev, fmt, ##args)
#define usb_audio_warn(chip, fmt, args...) \
diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
index 2b833054e3b0..bdb28e0229e6 100644
--- a/sound/usb/usx2y/usbusx2yaudio.c
+++ b/sound/usb/usx2y/usbusx2yaudio.c
@@ -695,6 +695,8 @@ static int usX2Y_rate_set(struct usX2Ydev *usX2Y, int rate)
us->submitted = 2*NOOF_SETRATE_URBS;
for (i = 0; i < NOOF_SETRATE_URBS; ++i) {
struct urb *urb = us->urb[i];
+ if (!urb)
+ continue;
if (urb->status) {
if (!err)
err = -ENODEV;
diff --git a/sound/usb/validate.c b/sound/usb/validate.c
index 5a3c4f7882b0..89a48d731719 100644
--- a/sound/usb/validate.c
+++ b/sound/usb/validate.c
@@ -233,7 +233,7 @@ static bool validate_midi_out_jack(const void *p,
#define FIXED(p, t, s) { .protocol = (p), .type = (t), .size = sizeof(s) }
#define FUNC(p, t, f) { .protocol = (p), .type = (t), .func = (f) }
-static struct usb_desc_validator audio_validators[] = {
+static const struct usb_desc_validator audio_validators[] = {
/* UAC1 */
FUNC(UAC_VERSION_1, UAC_HEADER, validate_uac1_header),
FIXED(UAC_VERSION_1, UAC_INPUT_TERMINAL,
@@ -288,7 +288,7 @@ static struct usb_desc_validator audio_validators[] = {
{ } /* terminator */
};
-static struct usb_desc_validator midi_validators[] = {
+static const struct usb_desc_validator midi_validators[] = {
FIXED(UAC_VERSION_ALL, USB_MS_HEADER,
struct usb_ms_header_descriptor),
FIXED(UAC_VERSION_ALL, USB_MS_MIDI_IN_JACK,
diff --git a/tools/arch/ia64/include/asm/barrier.h b/tools/arch/ia64/include/asm/barrier.h
index d808ee0e77b5..90f8bbd9aede 100644
--- a/tools/arch/ia64/include/asm/barrier.h
+++ b/tools/arch/ia64/include/asm/barrier.h
@@ -39,9 +39,6 @@
* sequential memory pages only.
*/
-/* XXX From arch/ia64/include/uapi/asm/gcc_intrin.h */
-#define ia64_mf() asm volatile ("mf" ::: "memory")
-
#define mb() ia64_mf()
#define rmb() mb()
#define wmb() mb()
diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c
index ff0cc3c17141..1e7c619228a2 100644
--- a/tools/bpf/bpftool/btf_dumper.c
+++ b/tools/bpf/bpftool/btf_dumper.c
@@ -26,7 +26,7 @@ static void btf_dumper_ptr(const void *data, json_writer_t *jw,
bool is_plain_text)
{
if (is_plain_text)
- jsonw_printf(jw, "%p", data);
+ jsonw_printf(jw, "%p", *(void **)data);
else
jsonw_printf(jw, "%lu", *(unsigned long *)data);
}
diff --git a/tools/build/Build.include b/tools/build/Build.include
index 9ec01f4454f9..585486e40995 100644
--- a/tools/build/Build.include
+++ b/tools/build/Build.include
@@ -74,7 +74,8 @@ dep-cmd = $(if $(wildcard $(fixdep)),
# dependencies in the cmd file
if_changed_dep = $(if $(strip $(any-prereq) $(arg-check)), \
@set -e; \
- $(echo-cmd) $(cmd_$(1)) && $(dep-cmd))
+ $(echo-cmd) $(cmd_$(1)); \
+ $(dep-cmd))
# if_changed - execute command if any prerequisite is newer than
# target, or command line has changed
diff --git a/tools/build/Makefile b/tools/build/Makefile
index 727050c40f09..8a55378e8b7c 100644
--- a/tools/build/Makefile
+++ b/tools/build/Makefile
@@ -15,10 +15,6 @@ endef
$(call allow-override,CC,$(CROSS_COMPILE)gcc)
$(call allow-override,LD,$(CROSS_COMPILE)ld)
-HOSTCC ?= gcc
-HOSTLD ?= ld
-HOSTAR ?= ar
-
export HOSTCC HOSTLD HOSTAR
ifeq ($(V),1)
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index 42a787856cd8..7f91b6013ddc 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -7,7 +7,7 @@ endif
feature_check = $(eval $(feature_check_code))
define feature_check_code
- feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" CXXFLAGS="$(EXTRA_CXXFLAGS) $(FEATURE_CHECK_CXXFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C $(feature_dir) $(OUTPUT_FEATURES)test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0)
+ feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CC="$(CC)" CXX="$(CXX)" CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" CXXFLAGS="$(EXTRA_CXXFLAGS) $(FEATURE_CHECK_CXXFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C $(feature_dir) $(OUTPUT_FEATURES)test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0)
endef
feature_set = $(eval $(feature_set_code))
@@ -31,8 +31,11 @@ FEATURE_TESTS_BASIC := \
backtrace \
dwarf \
dwarf_getlocations \
+ eventfd \
fortify-source \
sync-compare-and-swap \
+ get_current_dir_name \
+ gettid \
glibc \
gtk2 \
gtk2-infobar \
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index bf8a8ebcca1e..6df574750bc9 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -5,8 +5,10 @@ FILES= \
test-bionic.bin \
test-dwarf.bin \
test-dwarf_getlocations.bin \
+ test-eventfd.bin \
test-fortify-source.bin \
test-sync-compare-and-swap.bin \
+ test-get_current_dir_name.bin \
test-glibc.bin \
test-gtk2.bin \
test-gtk2-infobar.bin \
@@ -52,6 +54,7 @@ FILES= \
test-get_cpuid.bin \
test-sdt.bin \
test-cxx.bin \
+ test-gettid.bin \
test-jvmti.bin \
test-sched_getcpu.bin \
test-setns.bin \
@@ -62,8 +65,6 @@ FILES= \
FILES := $(addprefix $(OUTPUT),$(FILES))
-CC ?= $(CROSS_COMPILE)gcc
-CXX ?= $(CROSS_COMPILE)g++
PKG_CONFIG ?= $(CROSS_COMPILE)pkg-config
LLVM_CONFIG ?= llvm-config
@@ -101,6 +102,12 @@ $(OUTPUT)test-bionic.bin:
$(OUTPUT)test-libelf.bin:
$(BUILD) -lelf
+$(OUTPUT)test-eventfd.bin:
+ $(BUILD)
+
+$(OUTPUT)test-get_current_dir_name.bin:
+ $(BUILD)
+
$(OUTPUT)test-glibc.bin:
$(BUILD)
@@ -256,6 +263,9 @@ $(OUTPUT)test-sdt.bin:
$(OUTPUT)test-cxx.bin:
$(BUILDXX) -std=gnu++11
+$(OUTPUT)test-gettid.bin:
+ $(BUILD)
+
$(OUTPUT)test-jvmti.bin:
$(BUILD)
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
index 8dc20a61341f..8282bbe547c4 100644
--- a/tools/build/feature/test-all.c
+++ b/tools/build/feature/test-all.c
@@ -34,6 +34,14 @@
# include "test-libelf-mmap.c"
#undef main
+#define main main_test_get_current_dir_name
+# include "test-get_current_dir_name.c"
+#undef main
+
+#define main main_test_gettid
+# include "test-gettid.c"
+#undef main
+
#define main main_test_glibc
# include "test-glibc.c"
#undef main
@@ -46,6 +54,10 @@
# include "test-dwarf_getlocations.c"
#undef main
+#define main main_test_eventfd
+# include "test-eventfd.c"
+#undef main
+
#define main main_test_libelf_getphdrnum
# include "test-libelf-getphdrnum.c"
#undef main
@@ -174,9 +186,12 @@ int main(int argc, char *argv[])
main_test_hello();
main_test_libelf();
main_test_libelf_mmap();
+ main_test_get_current_dir_name();
+ main_test_gettid();
main_test_glibc();
main_test_dwarf();
main_test_dwarf_getlocations();
+ main_test_eventfd();
main_test_libelf_getphdrnum();
main_test_libelf_gelf_getnote();
main_test_libelf_getshdrstrndx();
diff --git a/tools/build/feature/test-eventfd.c b/tools/build/feature/test-eventfd.c
new file mode 100644
index 000000000000..f4de7ef00ccb
--- /dev/null
+++ b/tools/build/feature/test-eventfd.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+
+#include <sys/eventfd.h>
+
+int main(void)
+{
+ return eventfd(0, EFD_NONBLOCK);
+}
diff --git a/tools/build/feature/test-get_current_dir_name.c b/tools/build/feature/test-get_current_dir_name.c
new file mode 100644
index 000000000000..573000f93212
--- /dev/null
+++ b/tools/build/feature/test-get_current_dir_name.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <unistd.h>
+#include <stdlib.h>
+
+int main(void)
+{
+ free(get_current_dir_name());
+ return 0;
+}
diff --git a/tools/build/feature/test-gettid.c b/tools/build/feature/test-gettid.c
new file mode 100644
index 000000000000..ef24e42d3f1b
--- /dev/null
+++ b/tools/build/feature/test-gettid.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+#define _GNU_SOURCE
+#include <unistd.h>
+
+int main(void)
+{
+ return gettid();
+}
+
+#undef _GNU_SOURCE
diff --git a/tools/gpio/Makefile b/tools/gpio/Makefile
index 6a73c06e069c..3dbf7e8b07a5 100644
--- a/tools/gpio/Makefile
+++ b/tools/gpio/Makefile
@@ -35,7 +35,7 @@ $(OUTPUT)include/linux/gpio.h: ../../include/uapi/linux/gpio.h
prepare: $(OUTPUT)include/linux/gpio.h
-GPIO_UTILS_IN := $(output)gpio-utils-in.o
+GPIO_UTILS_IN := $(OUTPUT)gpio-utils-in.o
$(GPIO_UTILS_IN): prepare FORCE
$(Q)$(MAKE) $(build)=gpio-utils
diff --git a/tools/gpio/gpio-hammer.c b/tools/gpio/gpio-hammer.c
index 4bcb234c0fca..3da5462a0c7d 100644
--- a/tools/gpio/gpio-hammer.c
+++ b/tools/gpio/gpio-hammer.c
@@ -138,7 +138,14 @@ int main(int argc, char **argv)
device_name = optarg;
break;
case 'o':
- lines[i] = strtoul(optarg, NULL, 10);
+ /*
+ * Avoid overflow. Do not immediately error, we want to
+ * be able to accurately report on the amount of times
+ * '-o' was given to give an accurate error message
+ */
+ if (i < GPIOHANDLES_MAX)
+ lines[i] = strtoul(optarg, NULL, 10);
+
i++;
break;
case '?':
@@ -146,6 +153,14 @@ int main(int argc, char **argv)
return -1;
}
}
+
+ if (i >= GPIOHANDLES_MAX) {
+ fprintf(stderr,
+ "Only %d occurences of '-o' are allowed, %d were found\n",
+ GPIOHANDLES_MAX, i + 1);
+ return -1;
+ }
+
nlines = i;
if (!device_name || !nlines) {
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index bf4cd924aed5..13944978ada5 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -1191,8 +1191,8 @@ union bpf_attr {
* Return
* The return value depends on the result of the test, and can be:
*
- * * 0, if the *skb* task belongs to the cgroup2.
- * * 1, if the *skb* task does not belong to the cgroup2.
+ * * 0, if current task belongs to the cgroup2.
+ * * 1, if current task does not belong to the cgroup2.
* * A negative error code, if an error occurred.
*
* int bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index f35eb72739c0..a45e7b4f0316 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -1079,7 +1079,7 @@ union perf_mem_data_src {
#define PERF_MEM_SNOOPX_FWD 0x01 /* forward */
/* 1 free */
-#define PERF_MEM_SNOOPX_SHIFT 37
+#define PERF_MEM_SNOOPX_SHIFT 38
/* locked instruction */
#define PERF_MEM_LOCK_NA 0x01 /* not available */
diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c
index bd021a0eeef8..4cc69675c2a9 100644
--- a/tools/lib/api/fs/fs.c
+++ b/tools/lib/api/fs/fs.c
@@ -90,6 +90,7 @@ struct fs {
const char * const *mounts;
char path[PATH_MAX];
bool found;
+ bool checked;
long magic;
};
@@ -111,31 +112,37 @@ static struct fs fs__entries[] = {
.name = "sysfs",
.mounts = sysfs__fs_known_mountpoints,
.magic = SYSFS_MAGIC,
+ .checked = false,
},
[FS__PROCFS] = {
.name = "proc",
.mounts = procfs__known_mountpoints,
.magic = PROC_SUPER_MAGIC,
+ .checked = false,
},
[FS__DEBUGFS] = {
.name = "debugfs",
.mounts = debugfs__known_mountpoints,
.magic = DEBUGFS_MAGIC,
+ .checked = false,
},
[FS__TRACEFS] = {
.name = "tracefs",
.mounts = tracefs__known_mountpoints,
.magic = TRACEFS_MAGIC,
+ .checked = false,
},
[FS__HUGETLBFS] = {
.name = "hugetlbfs",
.mounts = hugetlbfs__known_mountpoints,
.magic = HUGETLBFS_MAGIC,
+ .checked = false,
},
[FS__BPF_FS] = {
.name = "bpf",
.mounts = bpf_fs__known_mountpoints,
.magic = BPF_FS_MAGIC,
+ .checked = false,
},
};
@@ -158,6 +165,7 @@ static bool fs__read_mounts(struct fs *fs)
}
fclose(fp);
+ fs->checked = true;
return fs->found = found;
}
@@ -220,6 +228,7 @@ static bool fs__env_override(struct fs *fs)
return false;
fs->found = true;
+ fs->checked = true;
strncpy(fs->path, override_path, sizeof(fs->path) - 1);
fs->path[sizeof(fs->path) - 1] = '\0';
return true;
@@ -246,6 +255,14 @@ static const char *fs__mountpoint(int idx)
if (fs->found)
return (const char *)fs->path;
+ /* the mount point was already checked for the mount point
+ * but and did not exist, so return NULL to avoid scanning again.
+ * This makes the found and not found paths cost equivalent
+ * in case of multiple calls.
+ */
+ if (fs->checked)
+ return NULL;
+
return fs__get_mountpoint(fs);
}
diff --git a/tools/lib/api/fs/fs.h b/tools/lib/api/fs/fs.h
index 92d03b8396b1..3b70003e7cfb 100644
--- a/tools/lib/api/fs/fs.h
+++ b/tools/lib/api/fs/fs.h
@@ -18,6 +18,18 @@
const char *name##__mount(void); \
bool name##__configured(void); \
+/*
+ * The xxxx__mountpoint() entry points find the first match mount point for each
+ * filesystems listed below, where xxxx is the filesystem type.
+ *
+ * The interface is as follows:
+ *
+ * - If a mount point is found on first call, it is cached and used for all
+ * subsequent calls.
+ *
+ * - If a mount point is not found, NULL is returned on first call and all
+ * subsequent calls.
+ */
FS(sysfs)
FS(procfs)
FS(debugfs)
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index 3624557550a1..6f57d38443c7 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -179,7 +179,7 @@ define do_install
if [ ! -d '$(DESTDIR_SQ)$2' ]; then \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
fi; \
- $(INSTALL) $1 $(if $3,-m $3,) '$(DESTDIR_SQ)$2'
+ $(INSTALL) $(if $3,-m $3,) $1 '$(DESTDIR_SQ)$2'
endef
install_lib: all_cmd
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 382e476629fb..c0fcc8af2a3e 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -2766,6 +2766,7 @@ process_dynamic_array_len(struct event_format *event, struct print_arg *arg,
if (read_expected(EVENT_DELIM, ")") < 0)
goto out_err;
+ free_token(token);
type = read_token(&token);
*tok = token;
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index 20f67fcf378d..15f32f67cf34 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -7,9 +7,6 @@ ARCH := x86
endif
# always use the host compiler
-HOSTAR ?= ar
-HOSTCC ?= gcc
-HOSTLD ?= ld
AR = $(HOSTAR)
CC = $(HOSTCC)
LD = $(HOSTLD)
diff --git a/tools/objtool/arch/x86/include/asm/insn.h b/tools/objtool/arch/x86/include/asm/insn.h
index c2c01f84df75..3e0e18d376d2 100644
--- a/tools/objtool/arch/x86/include/asm/insn.h
+++ b/tools/objtool/arch/x86/include/asm/insn.h
@@ -208,6 +208,21 @@ static inline int insn_offset_immediate(struct insn *insn)
return insn_offset_displacement(insn) + insn->displacement.nbytes;
}
+/**
+ * for_each_insn_prefix() -- Iterate prefixes in the instruction
+ * @insn: Pointer to struct insn.
+ * @idx: Index storage.
+ * @prefix: Prefix byte.
+ *
+ * Iterate prefix bytes of given @insn. Each prefix byte is stored in @prefix
+ * and the index is stored in @idx (note that this @idx is just for a cursor,
+ * do not change it.)
+ * Since prefixes.nbytes can be bigger than 4 if some prefixes
+ * are repeated, it cannot be used for looping over the prefixes.
+ */
+#define for_each_insn_prefix(insn, idx, prefix) \
+ for (idx = 0; idx < ARRAY_SIZE(insn->prefixes.bytes) && (prefix = insn->prefixes.bytes[idx]) != 0; idx++)
+
#define POP_SS_OPCODE 0x1f
#define MOV_SREG_OPCODE 0x8e
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index ecf5fc77f50b..c0ab27368a34 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -503,7 +503,7 @@ static int add_jump_destinations(struct objtool_file *file)
insn->type != INSN_JUMP_UNCONDITIONAL)
continue;
- if (insn->ignore || insn->offset == FAKE_JUMP_OFFSET)
+ if (insn->offset == FAKE_JUMP_OFFSET)
continue;
rela = find_rela_by_dest_range(insn->sec, insn->offset,
@@ -801,6 +801,12 @@ static int add_special_section_alts(struct objtool_file *file)
}
if (special_alt->group) {
+ if (!special_alt->orig_len) {
+ WARN_FUNC("empty alternative entry",
+ orig_insn->sec, orig_insn->offset);
+ continue;
+ }
+
ret = handle_group_alt(file, special_alt, orig_insn,
&new_insn);
if (ret)
@@ -938,10 +944,7 @@ static struct rela *find_switch_table(struct objtool_file *file,
* it.
*/
for (;
- &insn->list != &file->insn_list &&
- insn->sec == func->sec &&
- insn->offset >= func->offset;
-
+ &insn->list != &file->insn_list && insn->func && insn->func->pfunc == func;
insn = insn->first_jump_src ?: list_prev_entry(insn, list)) {
if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
@@ -1318,7 +1321,7 @@ static int update_insn_state_regs(struct instruction *insn, struct insn_state *s
struct cfi_reg *cfa = &state->cfa;
struct stack_op *op = &insn->stack_op;
- if (cfa->base != CFI_SP)
+ if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
return 0;
/* push */
@@ -2089,14 +2092,27 @@ static bool ignore_unreachable_insn(struct instruction *insn)
!strcmp(insn->sec->name, ".altinstr_aux"))
return true;
+ if (!insn->func)
+ return false;
+
+ /*
+ * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
+ * __builtin_unreachable(). The BUG() macro has an unreachable() after
+ * the UD2, which causes GCC's undefined trap logic to emit another UD2
+ * (or occasionally a JMP to UD2).
+ */
+ if (list_prev_entry(insn, list)->dead_end &&
+ (insn->type == INSN_BUG ||
+ (insn->type == INSN_JUMP_UNCONDITIONAL &&
+ insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
+ return true;
+
/*
* Check if this (or a subsequent) instruction is related to
* CONFIG_UBSAN or CONFIG_KASAN.
*
* End the search at 5 instructions to avoid going into the weeds.
*/
- if (!insn->func)
- return false;
for (i = 0; i < 5; i++) {
if (is_kasan_insn(insn) || is_ubsan_insn(insn))
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index b8f3cca8e58b..264d49fea814 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -226,8 +226,11 @@ static int read_symbols(struct elf *elf)
symtab = find_section_by_name(elf, ".symtab");
if (!symtab) {
- WARN("missing symbol table");
- return -1;
+ /*
+ * A missing symbol table is actually possible if it's an empty
+ * .o file. This can happen for thunk_64.o.
+ */
+ return 0;
}
symbols_nr = symtab->sh.sh_size / symtab->sh.sh_entsize;
diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c
index faa444270ee3..1a3e774941f8 100644
--- a/tools/objtool/orc_dump.c
+++ b/tools/objtool/orc_dump.c
@@ -78,7 +78,7 @@ int orc_dump(const char *_objname)
char *name;
size_t nr_sections;
Elf64_Addr orc_ip_addr = 0;
- size_t shstrtab_idx;
+ size_t shstrtab_idx, strtab_idx = 0;
Elf *elf;
Elf_Scn *scn;
GElf_Shdr sh;
@@ -139,6 +139,8 @@ int orc_dump(const char *_objname)
if (!strcmp(name, ".symtab")) {
symtab = data;
+ } else if (!strcmp(name, ".strtab")) {
+ strtab_idx = i;
} else if (!strcmp(name, ".orc_unwind")) {
orc = data->d_buf;
orc_size = sh.sh_size;
@@ -150,7 +152,7 @@ int orc_dump(const char *_objname)
}
}
- if (!symtab || !orc || !orc_ip)
+ if (!symtab || !strtab_idx || !orc || !orc_ip)
return 0;
if (orc_size % sizeof(*orc) != 0) {
@@ -171,21 +173,29 @@ int orc_dump(const char *_objname)
return -1;
}
- scn = elf_getscn(elf, sym.st_shndx);
- if (!scn) {
- WARN_ELF("elf_getscn");
- return -1;
- }
-
- if (!gelf_getshdr(scn, &sh)) {
- WARN_ELF("gelf_getshdr");
- return -1;
- }
-
- name = elf_strptr(elf, shstrtab_idx, sh.sh_name);
- if (!name || !*name) {
- WARN_ELF("elf_strptr");
- return -1;
+ if (GELF_ST_TYPE(sym.st_info) == STT_SECTION) {
+ scn = elf_getscn(elf, sym.st_shndx);
+ if (!scn) {
+ WARN_ELF("elf_getscn");
+ return -1;
+ }
+
+ if (!gelf_getshdr(scn, &sh)) {
+ WARN_ELF("gelf_getshdr");
+ return -1;
+ }
+
+ name = elf_strptr(elf, shstrtab_idx, sh.sh_name);
+ if (!name) {
+ WARN_ELF("elf_strptr");
+ return -1;
+ }
+ } else {
+ name = elf_strptr(elf, strtab_idx, sym.st_name);
+ if (!name) {
+ WARN_ELF("elf_strptr");
+ return -1;
+ }
}
printf("%s+%llx:", name, (unsigned long long)rela.r_addend);
diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c
index 3f98dcfbc177..0b1ba8e7d18a 100644
--- a/tools/objtool/orc_gen.c
+++ b/tools/objtool/orc_gen.c
@@ -100,11 +100,6 @@ static int create_orc_entry(struct section *u_sec, struct section *ip_relasec,
struct orc_entry *orc;
struct rela *rela;
- if (!insn_sec->sym) {
- WARN("missing symbol for section %s", insn_sec->name);
- return -1;
- }
-
/* populate ORC data */
orc = (struct orc_entry *)u_sec->data->d_buf + idx;
memcpy(orc, o, sizeof(*orc));
@@ -117,8 +112,32 @@ static int create_orc_entry(struct section *u_sec, struct section *ip_relasec,
}
memset(rela, 0, sizeof(*rela));
- rela->sym = insn_sec->sym;
- rela->addend = insn_off;
+ if (insn_sec->sym) {
+ rela->sym = insn_sec->sym;
+ rela->addend = insn_off;
+ } else {
+ /*
+ * The Clang assembler doesn't produce section symbols, so we
+ * have to reference the function symbol instead:
+ */
+ rela->sym = find_symbol_containing(insn_sec, insn_off);
+ if (!rela->sym) {
+ /*
+ * Hack alert. This happens when we need to reference
+ * the NOP pad insn immediately after the function.
+ */
+ rela->sym = find_symbol_containing(insn_sec,
+ insn_off - 1);
+ }
+ if (!rela->sym) {
+ WARN("missing symbol for insn at offset 0x%lx\n",
+ insn_off);
+ return -1;
+ }
+
+ rela->addend = insn_off - rela->sym->offset;
+ }
+
rela->type = R_X86_64_PC32;
rela->offset = idx * sizeof(int);
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 246dee081efd..edf2be251788 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -33,6 +33,10 @@ OPTIONS
- a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a
hexadecimal event descriptor.
+ - a symbolic or raw PMU event followed by an optional colon
+ and a list of event modifiers, e.g., cpu-cycles:p. See the
+ linkperf:perf-list[1] man page for details on event modifiers.
+
- a symbolically formed PMU event like 'pmu/param1=0x3,param2/' where
'param1', 'param2', etc are defined as formats for the PMU in
/sys/bus/event_source/devices/<pmu>/format/*.
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index b10a90b6a718..239af8f71f79 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -39,6 +39,10 @@ report::
- a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a
hexadecimal event descriptor.
+ - a symbolic or raw PMU event followed by an optional colon
+ and a list of event modifiers, e.g., cpu-cycles:p. See the
+ linkperf:perf-list[1] man page for details on event modifiers.
+
- a symbolically formed event like 'pmu/param1=0x3,param2/' where
param1 and param2 are defined as formats for the PMU in
/sys/bus/event_source/devices/<pmu>/format/*
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 510caedd7319..a328beb9f505 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -205,8 +205,17 @@ strip-libs = $(filter-out -l%,$(1))
PYTHON_CONFIG_SQ := $(call shell-sq,$(PYTHON_CONFIG))
+# Python 3.8 changed the output of `python-config --ldflags` to not include the
+# '-lpythonX.Y' flag unless '--embed' is also passed. The feature check for
+# libpython fails if that flag is not included in LDFLAGS
+ifeq ($(shell $(PYTHON_CONFIG_SQ) --ldflags --embed 2>&1 1>/dev/null; echo $$?), 0)
+ PYTHON_CONFIG_LDFLAGS := --ldflags --embed
+else
+ PYTHON_CONFIG_LDFLAGS := --ldflags
+endif
+
ifdef PYTHON_CONFIG
- PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null)
+ PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) $(PYTHON_CONFIG_LDFLAGS) 2>/dev/null)
PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS))
PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil
PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --includes 2>/dev/null)
@@ -301,6 +310,18 @@ ifndef NO_BIONIC
endif
endif
+ifeq ($(feature-eventfd), 1)
+ CFLAGS += -DHAVE_EVENTFD
+endif
+
+ifeq ($(feature-get_current_dir_name), 1)
+ CFLAGS += -DHAVE_GET_CURRENT_DIR_NAME
+endif
+
+ifeq ($(feature-gettid), 1)
+ CFLAGS += -DHAVE_GETTID
+endif
+
ifdef NO_LIBELF
NO_DWARF := 1
NO_DEMANGLE := 1
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 0be411695379..678aa7feb84d 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -148,10 +148,6 @@ endef
LD += $(EXTRA_LDFLAGS)
-HOSTCC ?= gcc
-HOSTLD ?= ld
-HOSTAR ?= ar
-
PKG_CONFIG = $(CROSS_COMPILE)pkg-config
LLVM_CONFIG ?= llvm-config
diff --git a/tools/perf/bench/mem-functions.c b/tools/perf/bench/mem-functions.c
index 0251dd348124..4864fc67d01b 100644
--- a/tools/perf/bench/mem-functions.c
+++ b/tools/perf/bench/mem-functions.c
@@ -222,12 +222,8 @@ static int bench_mem_common(int argc, const char **argv, struct bench_mem_info *
return 0;
}
-static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, void *dst)
+static void memcpy_prefault(memcpy_t fn, size_t size, void *src, void *dst)
{
- u64 cycle_start = 0ULL, cycle_end = 0ULL;
- memcpy_t fn = r->fn.memcpy;
- int i;
-
/* Make sure to always prefault zero pages even if MMAP_THRESH is crossed: */
memset(src, 0, size);
@@ -236,6 +232,15 @@ static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, vo
* to not measure page fault overhead:
*/
fn(dst, src, size);
+}
+
+static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, void *dst)
+{
+ u64 cycle_start = 0ULL, cycle_end = 0ULL;
+ memcpy_t fn = r->fn.memcpy;
+ int i;
+
+ memcpy_prefault(fn, size, src, dst);
cycle_start = get_cycles();
for (i = 0; i < nr_loops; ++i)
@@ -251,11 +256,7 @@ static double do_memcpy_gettimeofday(const struct function *r, size_t size, void
memcpy_t fn = r->fn.memcpy;
int i;
- /*
- * We prefault the freshly allocated memory range here,
- * to not measure page fault overhead:
- */
- fn(dst, src, size);
+ memcpy_prefault(fn, size, src, dst);
BUG_ON(gettimeofday(&tv_start, NULL));
for (i = 0; i < nr_loops; ++i)
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index 6e0189df2b3b..0cb7f7b731fb 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -620,7 +620,7 @@ static int report_lock_release_event(struct perf_evsel *evsel,
case SEQ_STATE_READ_ACQUIRED:
seq->read_count--;
BUG_ON(seq->read_count < 0);
- if (!seq->read_count) {
+ if (seq->read_count) {
ls->nr_release++;
goto end;
}
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 0bdb34fee9d8..215da628d60b 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -376,6 +376,9 @@ static int perf_add_probe_events(struct perf_probe_event *pevs, int npevs)
for (k = 0; k < pev->ntevs; k++) {
struct probe_trace_event *tev = &pev->tevs[k];
+ /* Skipped events have no event name */
+ if (!tev->event)
+ continue;
/* We use tev's name for showing new events */
show_perf_probe_event(tev->group, tev->event, pev,
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 5312c761a5db..05eae94d09cb 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -447,8 +447,7 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report
if (rep->time_str)
ret += fprintf(fp, " (time slices: %s)", rep->time_str);
- if (symbol_conf.show_ref_callgraph &&
- strstr(evname, "call-graph=no")) {
+ if (symbol_conf.show_ref_callgraph && evname && strstr(evname, "call-graph=no")) {
ret += fprintf(fp, ", show reference callgraph");
}
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 6aae10ff954c..adabe9d4dc86 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -422,7 +422,7 @@ static void process_interval(void)
}
init_stats(&walltime_nsecs_stats);
- update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000);
+ update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL);
print_counters(&rs, 0, NULL);
}
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index d0733251a386..9caab84c6294 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -651,7 +651,9 @@ repeat:
delay_msecs = top->delay_secs * MSEC_PER_SEC;
set_term_quiet_input(&save);
/* trash return*/
- getc(stdin);
+ clearerr(stdin);
+ if (poll(&stdin_poll, 1, 0) > 0)
+ getc(stdin);
while (!done) {
perf_top__print_sym_table(top);
diff --git a/tools/perf/jvmti/jvmti_agent.c b/tools/perf/jvmti/jvmti_agent.c
index f7eb63cbbc65..88108598d6e9 100644
--- a/tools/perf/jvmti/jvmti_agent.c
+++ b/tools/perf/jvmti/jvmti_agent.c
@@ -45,10 +45,12 @@
static char jit_path[PATH_MAX];
static void *marker_addr;
+#ifndef HAVE_GETTID
static inline pid_t gettid(void)
{
return (pid_t)syscall(__NR_gettid);
}
+#endif
static int get_e_machine(struct jitheader *hdr)
{
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
index 38b5888ef7b3..31331c42b0e3 100644
--- a/tools/perf/pmu-events/jevents.c
+++ b/tools/perf/pmu-events/jevents.c
@@ -137,7 +137,7 @@ static char *fixregex(char *s)
return s;
/* allocate space for a new string */
- fixed = (char *) malloc(len + 1);
+ fixed = (char *) malloc(len + esc_count + 1);
if (!fixed)
return NULL;
@@ -858,7 +858,7 @@ static int get_maxfds(void)
struct rlimit rlim;
if (getrlimit(RLIMIT_NOFILE, &rlim) == 0)
- return min((int)rlim.rlim_max / 2, 512);
+ return min(rlim.rlim_max / 2, (rlim_t)512);
return 512;
}
@@ -1064,10 +1064,9 @@ static int process_one_file(const char *fpath, const struct stat *sb,
*/
int main(int argc, char *argv[])
{
- int rc;
+ int rc, ret = 0;
int maxfds;
char ldirname[PATH_MAX];
-
const char *arch;
const char *output_file;
const char *start_dirname;
@@ -1138,7 +1137,8 @@ int main(int argc, char *argv[])
/* Make build fail */
fclose(eventsfp);
free_arch_std_events();
- return 1;
+ ret = 1;
+ goto out_free_mapfile;
} else if (rc) {
goto empty_map;
}
@@ -1156,14 +1156,17 @@ int main(int argc, char *argv[])
/* Make build fail */
fclose(eventsfp);
free_arch_std_events();
- return 1;
+ ret = 1;
}
- return 0;
+
+ goto out_free_mapfile;
empty_map:
fclose(eventsfp);
create_empty_mapping(output_file);
free_arch_std_events();
- return 0;
+out_free_mapfile:
+ free(mapfile);
+ return ret;
}
diff --git a/tools/perf/python/tracepoint.py b/tools/perf/python/tracepoint.py
index eb76f6516247..461848c7f57d 100755
--- a/tools/perf/python/tracepoint.py
+++ b/tools/perf/python/tracepoint.py
@@ -1,4 +1,4 @@
-#! /usr/bin/python
+#! /usr/bin/env python
# SPDX-License-Identifier: GPL-2.0
# -*- python -*-
# -*- coding: utf-8 -*-
diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
index 6cf00650602e..697423ce3bdf 100644
--- a/tools/perf/tests/bp_signal.c
+++ b/tools/perf/tests/bp_signal.c
@@ -44,10 +44,13 @@ volatile long the_var;
#if defined (__x86_64__)
extern void __test_function(volatile long *ptr);
asm (
+ ".pushsection .text;"
".globl __test_function\n"
+ ".type __test_function, @function;"
"__test_function:\n"
"incq (%rdi)\n"
- "ret\n");
+ "ret\n"
+ ".popsection\n");
#else
static void __test_function(volatile long *ptr)
{
diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c
index 7bedf8608fdd..3e183eef6f85 100644
--- a/tools/perf/tests/pmu.c
+++ b/tools/perf/tests/pmu.c
@@ -172,6 +172,7 @@ int test__pmu(struct test *test __maybe_unused, int subtest __maybe_unused)
ret = 0;
} while (0);
+ perf_pmu__del_formats(&formats);
test_format_dir_put(format);
return ret;
}
diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c
index 0e2d00d69e6e..66e46bc8d6f1 100644
--- a/tools/perf/tests/sample-parsing.c
+++ b/tools/perf/tests/sample-parsing.c
@@ -173,7 +173,7 @@ static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
.data = {1, 211, 212, 213},
};
u64 regs[64];
- const u64 raw_data[] = {0x123456780a0b0c0dULL, 0x1102030405060708ULL};
+ const u32 raw_data[] = {0x12345678, 0x0a0b0c0d, 0x11020304, 0x05060708, 0 };
const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL};
struct perf_sample sample = {
.ip = 101,
diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
index 7cb99b433888..c2cc42daf924 100644
--- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
@@ -14,7 +14,7 @@ add_probe_vfs_getname() {
if [ $had_vfs_getname -eq 1 ] ; then
line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/')
perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \
- perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:string"
+ perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:ustring"
fi
}
diff --git a/tools/perf/trace/beauty/arch_errno_names.sh b/tools/perf/trace/beauty/arch_errno_names.sh
index 22c9fc900c84..f8c44a85650b 100755
--- a/tools/perf/trace/beauty/arch_errno_names.sh
+++ b/tools/perf/trace/beauty/arch_errno_names.sh
@@ -91,7 +91,7 @@ EoHEADER
# in tools/perf/arch
archlist=""
for arch in $(find $toolsdir/arch -maxdepth 1 -mindepth 1 -type d -printf "%f\n" | grep -v x86 | sort); do
- test -d arch/$arch && archlist="$archlist $arch"
+ test -d $toolsdir/perf/arch/$arch && archlist="$archlist $arch"
done
for arch in x86 $archlist generic; do
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 7efe15b9618d..4eaac6aaaefe 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -10,6 +10,7 @@ libperf-y += evlist.o
libperf-y += evsel.o
libperf-y += evsel_fprintf.o
libperf-y += find_bit.o
+libperf-y += get_current_dir_name.o
libperf-y += kallsyms.o
libperf-y += levenshtein.o
libperf-y += llvm-utils.o
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 40c93d8158b5..f7c4dcb9d582 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -256,10 +256,6 @@ static int auxtrace_queues__queue_buffer(struct auxtrace_queues *queues,
queue->set = true;
queue->tid = buffer->tid;
queue->cpu = buffer->cpu;
- } else if (buffer->cpu != queue->cpu || buffer->tid != queue->tid) {
- pr_err("auxtrace queue conflict: cpu %d, tid %d vs cpu %d, tid %d\n",
- queue->cpu, queue->tid, buffer->cpu, buffer->tid);
- return -EINVAL;
}
buffer->buffer_nr = queues->next_buffer_nr++;
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index f93846edc1e0..827d844f4efb 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -462,7 +462,7 @@ static void set_max_cpu_num(void)
/* get the highest possible cpu number for a sparse allocation */
ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt);
- if (ret == PATH_MAX) {
+ if (ret >= PATH_MAX) {
pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
goto out;
}
@@ -473,7 +473,7 @@ static void set_max_cpu_num(void)
/* get the highest present cpu number for a sparse allocation */
ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt);
- if (ret == PATH_MAX) {
+ if (ret >= PATH_MAX) {
pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
goto out;
}
@@ -501,7 +501,7 @@ static void set_max_node_num(void)
/* get the highest possible cpu number for a sparse allocation */
ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt);
- if (ret == PATH_MAX) {
+ if (ret >= PATH_MAX) {
pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
goto out;
}
@@ -586,7 +586,7 @@ int cpu__setup_cpunode_map(void)
return 0;
n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt);
- if (n == PATH_MAX) {
+ if (n >= PATH_MAX) {
pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
return -1;
}
@@ -601,7 +601,7 @@ int cpu__setup_cpunode_map(void)
continue;
n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name);
- if (n == PATH_MAX) {
+ if (n >= PATH_MAX) {
pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
continue;
}
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
index 938def6d0bb9..f540037eb705 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
@@ -278,14 +278,12 @@ cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder,
enum cs_etm_sample_type sample_type)
{
u32 et = 0;
- struct int_node *inode = NULL;
+ int cpu;
if (decoder->packet_count >= MAX_BUFFER - 1)
return OCSD_RESP_FATAL_SYS_ERR;
- /* Search the RB tree for the cpu associated with this traceID */
- inode = intlist__find(traceid_list, trace_chan_id);
- if (!inode)
+ if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0)
return OCSD_RESP_FATAL_SYS_ERR;
et = decoder->tail;
@@ -296,7 +294,7 @@ cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder,
decoder->packet_buffer[et].sample_type = sample_type;
decoder->packet_buffer[et].exc = false;
decoder->packet_buffer[et].exc_ret = false;
- decoder->packet_buffer[et].cpu = *((int *)inode->priv);
+ decoder->packet_buffer[et].cpu = cpu;
decoder->packet_buffer[et].start_addr = CS_ETM_INVAL_ADDR;
decoder->packet_buffer[et].end_addr = CS_ETM_INVAL_ADDR;
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 7b5e15cc6b71..3275b8dc9344 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -87,10 +87,27 @@ struct cs_etm_queue {
struct cs_etm_packet *packet;
};
+/* RB tree for quick conversion between traceID and metadata pointers */
+static struct intlist *traceid_list;
+
static int cs_etm__update_queues(struct cs_etm_auxtrace *etm);
static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
pid_t tid, u64 time_);
+int cs_etm__get_cpu(u8 trace_chan_id, int *cpu)
+{
+ struct int_node *inode;
+ u64 *metadata;
+
+ inode = intlist__find(traceid_list, trace_chan_id);
+ if (!inode)
+ return -EINVAL;
+
+ metadata = inode->priv;
+ *cpu = (int)metadata[CS_ETM_CPU];
+ return 0;
+}
+
static void cs_etm__packet_dump(const char *pkt_string)
{
const char *color = PERF_COLOR_BLUE;
@@ -230,7 +247,7 @@ static void cs_etm__free(struct perf_session *session)
cs_etm__free_events(session);
session->auxtrace = NULL;
- /* First remove all traceID/CPU# nodes for the RB tree */
+ /* First remove all traceID/metadata nodes for the RB tree */
intlist__for_each_entry_safe(inode, tmp, traceid_list)
intlist__remove(traceid_list, inode);
/* Then the RB tree itself */
@@ -1316,9 +1333,9 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
0xffffffff);
/*
- * Create an RB tree for traceID-CPU# tuple. Since the conversion has
- * to be made for each packet that gets decoded, optimizing access in
- * anything other than a sequential array is worth doing.
+ * Create an RB tree for traceID-metadata tuple. Since the conversion
+ * has to be made for each packet that gets decoded, optimizing access
+ * in anything other than a sequential array is worth doing.
*/
traceid_list = intlist__new(NULL);
if (!traceid_list) {
@@ -1384,8 +1401,8 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
err = -EINVAL;
goto err_free_metadata;
}
- /* All good, associate the traceID with the CPU# */
- inode->priv = &metadata[j][CS_ETM_CPU];
+ /* All good, associate the traceID with the metadata pointer */
+ inode->priv = metadata[j];
}
/*
diff --git a/tools/perf/util/cs-etm.h b/tools/perf/util/cs-etm.h
index 37f8d48179ca..97c3152f5bfd 100644
--- a/tools/perf/util/cs-etm.h
+++ b/tools/perf/util/cs-etm.h
@@ -53,9 +53,6 @@ enum {
CS_ETMV4_PRIV_MAX,
};
-/* RB tree for quick conversion between traceID and CPUs */
-struct intlist *traceid_list;
-
#define KiB(x) ((x) * 1024)
#define MiB(x) ((x) * 1024 * 1024)
@@ -69,6 +66,7 @@ static const u64 __perf_cs_etmv4_magic = 0x4040404040404040ULL;
#ifdef HAVE_CSTRACE_SUPPORT
int cs_etm__process_auxtrace_info(union perf_event *event,
struct perf_session *session);
+int cs_etm__get_cpu(u8 trace_chan_id, int *cpu);
#else
static inline int
cs_etm__process_auxtrace_info(union perf_event *event __maybe_unused,
@@ -76,6 +74,12 @@ cs_etm__process_auxtrace_info(union perf_event *event __maybe_unused,
{
return -1;
}
+
+static inline int cs_etm__get_cpu(u8 trace_chan_id __maybe_unused,
+ int *cpu __maybe_unused)
+{
+ return -1;
+}
#endif
#endif
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index cee717a3794f..56f86317694d 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -38,6 +38,7 @@ char dso__symtab_origin(const struct dso *dso)
[DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO] = 'D',
[DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f',
[DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u',
+ [DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO] = 'x',
[DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o',
[DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b',
[DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd',
@@ -120,6 +121,21 @@ int dso__read_binary_type_filename(const struct dso *dso,
snprintf(filename + len, size - len, "%s", dso->long_name);
break;
+ case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
+ /*
+ * Ubuntu can mixup /usr/lib with /lib, putting debuginfo in
+ * /usr/lib/debug/lib when it is expected to be in
+ * /usr/lib/debug/usr/lib
+ */
+ if (strlen(dso->long_name) < 9 ||
+ strncmp(dso->long_name, "/usr/lib/", 9)) {
+ ret = -1;
+ break;
+ }
+ len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
+ snprintf(filename + len, size - len, "%s", dso->long_name + 4);
+ break;
+
case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
{
const char *last_slash;
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index c5380500bed4..0b474c7ef1e1 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -25,6 +25,7 @@ enum dso_binary_type {
DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO,
DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
+ DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO,
DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
DSO_BINARY_TYPE__GUEST_KMODULE,
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index 29e75c051d04..230e94bf7775 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -332,6 +332,7 @@ bool die_is_func_def(Dwarf_Die *dw_die)
int die_entrypc(Dwarf_Die *dw_die, Dwarf_Addr *addr)
{
Dwarf_Addr base, end;
+ Dwarf_Attribute attr;
if (!addr)
return -EINVAL;
@@ -339,6 +340,13 @@ int die_entrypc(Dwarf_Die *dw_die, Dwarf_Addr *addr)
if (dwarf_entrypc(dw_die, addr) == 0)
return 0;
+ /*
+ * Since the dwarf_ranges() will return 0 if there is no
+ * DW_AT_ranges attribute, we should check it first.
+ */
+ if (!dwarf_attr(dw_die, DW_AT_ranges, &attr))
+ return -ENOENT;
+
return dwarf_ranges(dw_die, 0, &base, addr, &end) < 0 ? -ENOENT : 0;
}
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index aa9c7df120ca..538c935b40b8 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -912,11 +912,13 @@ static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
int err;
union perf_event *event;
- if (symbol_conf.kptr_restrict)
- return -1;
if (map == NULL)
return -1;
+ kmap = map__kmap(map);
+ if (!kmap->ref_reloc_sym)
+ return -1;
+
/*
* We should get this from /sys/kernel/sections/.text, but till that is
* available use this, and after it is use this as a fallback for older
@@ -939,7 +941,6 @@ static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
}
- kmap = map__kmap(map);
size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
"%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
size = PERF_ALIGN(size, sizeof(u64));
@@ -1659,6 +1660,8 @@ int machine__resolve(struct machine *machine, struct addr_location *al,
}
al->sym = map__find_symbol(al->map, al->addr);
+ } else if (symbol_conf.dso_list) {
+ al->filtered |= (1 << HIST_FILTER__DSO);
}
if (symbol_conf.sym_list &&
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 4fad92213609..11a2aa80802d 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -1290,6 +1290,9 @@ void perf_evsel__exit(struct perf_evsel *evsel)
thread_map__put(evsel->threads);
zfree(&evsel->group_name);
zfree(&evsel->name);
+ zfree(&evsel->pmu_name);
+ zfree(&evsel->per_pkg_mask);
+ zfree(&evsel->metric_events);
perf_evsel__object.fini(evsel);
}
diff --git a/tools/perf/util/expr.y b/tools/perf/util/expr.y
index 432b8560cf51..e7bd19c384ae 100644
--- a/tools/perf/util/expr.y
+++ b/tools/perf/util/expr.y
@@ -10,7 +10,8 @@
#define MAXIDLEN 256
%}
-%pure-parser
+%define api.pure full
+
%parse-param { double *final_val }
%parse-param { struct parse_ctx *ctx }
%parse-param { const char **pp }
diff --git a/tools/perf/util/get_current_dir_name.c b/tools/perf/util/get_current_dir_name.c
new file mode 100644
index 000000000000..267aa609a582
--- /dev/null
+++ b/tools/perf/util/get_current_dir_name.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+//
+#ifndef HAVE_GET_CURRENT_DIR_NAME
+#include "util.h"
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdlib.h>
+
+/* Android's 'bionic' library, for one, doesn't have this */
+
+char *get_current_dir_name(void)
+{
+ char pwd[PATH_MAX];
+
+ return getcwd(pwd, sizeof(pwd)) == NULL ? NULL : strdup(pwd);
+}
+#endif // HAVE_GET_CURRENT_DIR_NAME
diff --git a/tools/perf/util/intel-pt-decoder/insn.h b/tools/perf/util/intel-pt-decoder/insn.h
index 2669c9f748e4..1a01d0a41ab8 100644
--- a/tools/perf/util/intel-pt-decoder/insn.h
+++ b/tools/perf/util/intel-pt-decoder/insn.h
@@ -208,6 +208,21 @@ static inline int insn_offset_immediate(struct insn *insn)
return insn_offset_displacement(insn) + insn->displacement.nbytes;
}
+/**
+ * for_each_insn_prefix() -- Iterate prefixes in the instruction
+ * @insn: Pointer to struct insn.
+ * @idx: Index storage.
+ * @prefix: Prefix byte.
+ *
+ * Iterate prefix bytes of given @insn. Each prefix byte is stored in @prefix
+ * and the index is stored in @idx (note that this @idx is just for a cursor,
+ * do not change it.)
+ * Since prefixes.nbytes can be bigger than 4 if some prefixes
+ * are repeated, it cannot be used for looping over the prefixes.
+ */
+#define for_each_insn_prefix(insn, idx, prefix) \
+ for (idx = 0; idx < ARRAY_SIZE(insn->prefixes.bytes) && (prefix = insn->prefixes.bytes[idx]) != 0; idx++)
+
#define POP_SS_OPCODE 0x1f
#define MOV_SREG_OPCODE 0x8e
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 4357141c7c92..e2a6c22959f2 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -1063,6 +1063,8 @@ static bool intel_pt_fup_event(struct intel_pt_decoder *decoder)
decoder->set_fup_tx_flags = false;
decoder->tx_flags = decoder->fup_tx_flags;
decoder->state.type = INTEL_PT_TRANSACTION;
+ if (decoder->fup_tx_flags & INTEL_PT_ABORT_TX)
+ decoder->state.type |= INTEL_PT_BRANCH;
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
decoder->state.flags = decoder->fup_tx_flags;
@@ -1129,7 +1131,10 @@ static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
return 0;
if (err == -EAGAIN ||
intel_pt_fup_with_nlip(decoder, &intel_pt_insn, ip, err)) {
- if (intel_pt_fup_event(decoder))
+ bool no_tip = decoder->pkt_state != INTEL_PT_STATE_FUP;
+
+ decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
+ if (intel_pt_fup_event(decoder) && no_tip)
return 0;
return -EAGAIN;
}
@@ -1595,6 +1600,9 @@ static int intel_pt_walk_psbend(struct intel_pt_decoder *decoder)
break;
case INTEL_PT_CYC:
+ intel_pt_calc_cyc_timestamp(decoder);
+ break;
+
case INTEL_PT_VMCS:
case INTEL_PT_MNT:
case INTEL_PT_PAD:
@@ -1780,17 +1788,13 @@ next:
}
if (decoder->set_fup_mwait)
no_tip = true;
+ if (no_tip)
+ decoder->pkt_state = INTEL_PT_STATE_FUP_NO_TIP;
+ else
+ decoder->pkt_state = INTEL_PT_STATE_FUP;
err = intel_pt_walk_fup(decoder);
- if (err != -EAGAIN) {
- if (err)
- return err;
- if (no_tip)
- decoder->pkt_state =
- INTEL_PT_STATE_FUP_NO_TIP;
- else
- decoder->pkt_state = INTEL_PT_STATE_FUP;
- return 0;
- }
+ if (err != -EAGAIN)
+ return err;
if (no_tip) {
no_tip = false;
break;
@@ -2375,15 +2379,11 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
err = intel_pt_walk_tip(decoder);
break;
case INTEL_PT_STATE_FUP:
- decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
err = intel_pt_walk_fup(decoder);
if (err == -EAGAIN)
err = intel_pt_walk_fup_tip(decoder);
- else if (!err)
- decoder->pkt_state = INTEL_PT_STATE_FUP;
break;
case INTEL_PT_STATE_FUP_NO_TIP:
- decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
err = intel_pt_walk_fup(decoder);
if (err == -EAGAIN)
err = intel_pt_walk_trace(decoder);
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index ff2c41ea94c8..256b4755c087 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -505,8 +505,10 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
*ip += intel_pt_insn->length;
- if (to_ip && *ip == to_ip)
+ if (to_ip && *ip == to_ip) {
+ intel_pt_insn->length = 0;
goto out_no_cache;
+ }
if (*ip >= al.map->end)
break;
@@ -876,6 +878,8 @@ static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
if (queue->tid == -1 || pt->have_sched_switch) {
ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
+ if (ptq->tid == -1)
+ ptq->pid = -1;
thread__zput(ptq->thread);
}
@@ -891,6 +895,7 @@ static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
{
+ ptq->insn_len = 0;
if (ptq->state->flags & INTEL_PT_ABORT_TX) {
ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
} else if (ptq->state->flags & INTEL_PT_ASYNC) {
@@ -1915,10 +1920,8 @@ static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
tid = sample->tid;
}
- if (tid == -1) {
- pr_err("context_switch event has no tid\n");
- return -EINVAL;
- }
+ if (tid == -1)
+ intel_pt_log("context_switch event has no tid\n");
intel_pt_log("context_switch: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
cpu, pid, tid, sample->time, perf_time_to_tsc(sample->time,
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index d3d3601f41cc..769d11575a7b 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -88,8 +88,7 @@ static inline bool replace_android_lib(const char *filename, char *newfilename)
if (!strncmp(filename, "/system/lib/", 12)) {
char *ndk, *app;
const char *arch;
- size_t ndk_length;
- size_t app_length;
+ int ndk_length, app_length;
ndk = getenv("NDK_ROOT");
app = getenv("APP_PLATFORM");
@@ -117,8 +116,8 @@ static inline bool replace_android_lib(const char *filename, char *newfilename)
if (new_length > PATH_MAX)
return false;
snprintf(newfilename, new_length,
- "%s/platforms/%s/arch-%s/usr/lib/%s",
- ndk, app, arch, libname);
+ "%.*s/platforms/%.*s/arch-%s/usr/lib/%s",
+ ndk_length, ndk, app_length, app, arch, libname);
return true;
}
diff --git a/tools/perf/util/mem2node.c b/tools/perf/util/mem2node.c
index c6fd81c02586..81c5a2e438b7 100644
--- a/tools/perf/util/mem2node.c
+++ b/tools/perf/util/mem2node.c
@@ -1,5 +1,6 @@
#include <errno.h>
#include <inttypes.h>
+#include <asm/bug.h>
#include <linux/bitmap.h>
#include "mem2node.h"
#include "util.h"
@@ -92,7 +93,7 @@ int mem2node__init(struct mem2node *map, struct perf_env *env)
/* Cut unused entries, due to merging. */
tmp_entries = realloc(entries, sizeof(*entries) * j);
- if (tmp_entries)
+ if (tmp_entries || WARN_ON_ONCE(j == 0))
entries = tmp_entries;
for (i = 0; i < j; i++) {
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index 8b3dafe3fac3..6dcc6e1182a5 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -171,6 +171,7 @@ static int metricgroup__setup_events(struct list_head *groups,
if (!evsel) {
pr_debug("Cannot resolve %s: %s\n",
eg->metric_name, eg->metric_expr);
+ free(metric_events);
continue;
}
for (i = 0; i < eg->idnum; i++)
@@ -178,11 +179,13 @@ static int metricgroup__setup_events(struct list_head *groups,
me = metricgroup__lookup(metric_events_list, evsel, true);
if (!me) {
ret = -ENOMEM;
+ free(metric_events);
break;
}
expr = malloc(sizeof(struct metric_expr));
if (!expr) {
ret = -ENOMEM;
+ free(metric_events);
break;
}
expr->metric_expr = eg->metric_expr;
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 95043cae5774..0eff0c3ba9ee 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -1261,7 +1261,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
attr.type = pmu->type;
evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu, NULL, auto_merge_stats);
if (evsel) {
- evsel->pmu_name = name;
+ evsel->pmu_name = name ? strdup(name) : NULL;
evsel->use_uncore_alias = use_uncore_alias;
return 0;
} else {
@@ -1302,7 +1302,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
evsel->snapshot = info.snapshot;
evsel->metric_expr = info.metric_expr;
evsel->metric_name = info.metric_name;
- evsel->pmu_name = name;
+ evsel->pmu_name = name ? strdup(name) : NULL;
evsel->use_uncore_alias = use_uncore_alias;
}
@@ -1421,12 +1421,11 @@ parse_events__set_leader_for_uncore_aliase(char *name, struct list_head *list,
* event. That can be used to distinguish the leader from
* other members, even they have the same event name.
*/
- if ((leader != evsel) && (leader->pmu_name == evsel->pmu_name)) {
+ if ((leader != evsel) &&
+ !strcmp(leader->pmu_name, evsel->pmu_name)) {
is_leader = false;
continue;
}
- /* The name is always alias name */
- WARN_ON(strcmp(leader->name, evsel->name));
/* Store the leader event for each PMU */
leaders[nr_pmu++] = (uintptr_t) evsel;
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index da8fe57691b8..8d7578be708a 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -1,4 +1,4 @@
-%pure-parser
+%define api.pure full
%parse-param {void *_parse_state}
%parse-param {void *scanner}
%lex-param {void* scanner}
diff --git a/tools/perf/util/parse-regs-options.c b/tools/perf/util/parse-regs-options.c
index e6599e290f46..e5ad120e7f69 100644
--- a/tools/perf/util/parse-regs-options.c
+++ b/tools/perf/util/parse-regs-options.c
@@ -41,7 +41,7 @@ parse_regs(const struct option *opt, const char *str, int unset)
}
fputc('\n', stderr);
/* just printing available regs */
- return -1;
+ goto error;
}
for (r = sample_reg_masks; r->name; r++) {
if (!strcasecmp(s, r->name))
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index c1acf04c9f7a..c42054f42e7e 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -1282,6 +1282,17 @@ void perf_pmu__set_format(unsigned long *bits, long from, long to)
set_bit(b, bits);
}
+void perf_pmu__del_formats(struct list_head *formats)
+{
+ struct perf_pmu_format *fmt, *tmp;
+
+ list_for_each_entry_safe(fmt, tmp, formats, list) {
+ list_del(&fmt->list);
+ free(fmt->name);
+ free(fmt);
+ }
+}
+
static int sub_non_neg(int a, int b)
{
if (b > a)
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 76fecec7b3f9..21335425f2e4 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -79,6 +79,7 @@ int perf_pmu__new_format(struct list_head *list, char *name,
int config, unsigned long *bits);
void perf_pmu__set_format(unsigned long *bits, long from, long to);
int perf_pmu__format_parse(char *dir, struct list_head *head);
+void perf_pmu__del_formats(struct list_head *formats);
struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu);
diff --git a/tools/perf/util/print_binary.c b/tools/perf/util/print_binary.c
index 23e367063446..71aeaf6f45cc 100644
--- a/tools/perf/util/print_binary.c
+++ b/tools/perf/util/print_binary.c
@@ -50,7 +50,7 @@ int is_printable_array(char *p, unsigned int len)
len--;
- for (i = 0; i < len; i++) {
+ for (i = 0; i < len && p[i]; i++) {
if (!isprint(p[i]) && !isspace(p[i]))
return 0;
}
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index a22e1f538aea..4ac3c89bfac8 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -111,7 +111,7 @@ void exit_probe_symbol_maps(void)
symbol__exit();
}
-static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void)
+static struct ref_reloc_sym *kernel_get_ref_reloc_sym(struct map **pmap)
{
/* kmap->ref_reloc_sym should be set if host_machine is initialized */
struct kmap *kmap;
@@ -123,6 +123,10 @@ static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void)
kmap = map__kmap(map);
if (!kmap)
return NULL;
+
+ if (pmap)
+ *pmap = map;
+
return kmap->ref_reloc_sym;
}
@@ -134,7 +138,7 @@ static int kernel_get_symbol_address_by_name(const char *name, u64 *addr,
struct map *map;
/* ref_reloc_sym is just a label. Need a special fix*/
- reloc_sym = kernel_get_ref_reloc_sym();
+ reloc_sym = kernel_get_ref_reloc_sym(NULL);
if (reloc_sym && strcmp(name, reloc_sym->name) == 0)
*addr = (reloc) ? reloc_sym->addr : reloc_sym->unrelocated_addr;
else {
@@ -241,21 +245,22 @@ static void clear_probe_trace_events(struct probe_trace_event *tevs, int ntevs)
static bool kprobe_blacklist__listed(unsigned long address);
static bool kprobe_warn_out_range(const char *symbol, unsigned long address)
{
- u64 etext_addr = 0;
- int ret;
-
- /* Get the address of _etext for checking non-probable text symbol */
- ret = kernel_get_symbol_address_by_name("_etext", &etext_addr,
- false, false);
+ struct map *map;
+ bool ret = false;
- if (ret == 0 && etext_addr < address)
- pr_warning("%s is out of .text, skip it.\n", symbol);
- else if (kprobe_blacklist__listed(address))
+ map = kernel_get_module_map(NULL);
+ if (map) {
+ ret = address <= map->start || map->end < address;
+ if (ret)
+ pr_warning("%s is out of .text, skip it.\n", symbol);
+ map__put(map);
+ }
+ if (!ret && kprobe_blacklist__listed(address)) {
pr_warning("%s is blacklisted function, skip it.\n", symbol);
- else
- return false;
+ ret = true;
+ }
- return true;
+ return ret;
}
/*
@@ -751,6 +756,7 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs,
int ntevs)
{
struct ref_reloc_sym *reloc_sym;
+ struct map *map;
char *tmp;
int i, skipped = 0;
@@ -759,7 +765,7 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs,
return post_process_offline_probe_trace_events(tevs, ntevs,
symbol_conf.vmlinux_name);
- reloc_sym = kernel_get_ref_reloc_sym();
+ reloc_sym = kernel_get_ref_reloc_sym(&map);
if (!reloc_sym) {
pr_warning("Relocated base symbol is not found!\n");
return -EINVAL;
@@ -770,9 +776,13 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs,
continue;
if (tevs[i].point.retprobe && !kretprobe_offset_is_supported())
continue;
- /* If we found a wrong one, mark it by NULL symbol */
+ /*
+ * If we found a wrong one, mark it by NULL symbol.
+ * Since addresses in debuginfo is same as objdump, we need
+ * to convert it to addresses on memory.
+ */
if (kprobe_warn_out_range(tevs[i].point.symbol,
- tevs[i].point.address)) {
+ map__objdump_2mem(map, tevs[i].point.address))) {
tmp = NULL;
skipped++;
} else {
@@ -1753,8 +1763,7 @@ int parse_probe_trace_command(const char *cmd, struct probe_trace_event *tev)
fmt1_str = strtok_r(argv0_str, ":", &fmt);
fmt2_str = strtok_r(NULL, "/", &fmt);
fmt3_str = strtok_r(NULL, " \t", &fmt);
- if (fmt1_str == NULL || strlen(fmt1_str) != 1 || fmt2_str == NULL
- || fmt3_str == NULL) {
+ if (fmt1_str == NULL || fmt2_str == NULL || fmt3_str == NULL) {
semantic_error("Failed to parse event name: %s\n", argv[0]);
ret = -EINVAL;
goto out;
@@ -2888,7 +2897,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
/* Note that the symbols in the kmodule are not relocated */
if (!pev->uprobes && !pev->target &&
(!pp->retprobe || kretprobe_offset_is_supported())) {
- reloc_sym = kernel_get_ref_reloc_sym();
+ reloc_sym = kernel_get_ref_reloc_sym(NULL);
if (!reloc_sym) {
pr_warning("Relocated base symbol is not found!\n");
ret = -EINVAL;
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 876787de7959..4da4ec255246 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -114,6 +114,7 @@ enum dso_binary_type distro_dwarf_types[] = {
DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
+ DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO,
DSO_BINARY_TYPE__NOT_FOUND,
};
@@ -1350,7 +1351,7 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
tf.ntevs = 0;
ret = debuginfo__find_probes(dbg, &tf.pf);
- if (ret < 0) {
+ if (ret < 0 || tf.ntevs == 0) {
for (i = 0; i < tf.ntevs; i++)
clear_probe_trace_event(&tf.tevs[i]);
zfree(tevs);
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 9569cc06e0a7..2814251df06b 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -1493,7 +1493,6 @@ static void _free_command_line(wchar_t **command_line, int num)
static int python_start_script(const char *script, int argc, const char **argv)
{
struct tables *tables = &tables_global;
- PyMODINIT_FUNC (*initfunc)(void);
#if PY_MAJOR_VERSION < 3
const char **command_line;
#else
@@ -1508,20 +1507,18 @@ static int python_start_script(const char *script, int argc, const char **argv)
FILE *fp;
#if PY_MAJOR_VERSION < 3
- initfunc = initperf_trace_context;
command_line = malloc((argc + 1) * sizeof(const char *));
command_line[0] = script;
for (i = 1; i < argc + 1; i++)
command_line[i] = argv[i - 1];
+ PyImport_AppendInittab(name, initperf_trace_context);
#else
- initfunc = PyInit_perf_trace_context;
command_line = malloc((argc + 1) * sizeof(wchar_t *));
command_line[0] = Py_DecodeLocale(script, NULL);
for (i = 1; i < argc + 1; i++)
command_line[i] = Py_DecodeLocale(argv[i - 1], NULL);
+ PyImport_AppendInittab(name, PyInit_perf_trace_context);
#endif
-
- PyImport_AppendInittab(name, initfunc);
Py_Initialize();
#if PY_MAJOR_VERSION < 3
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index f016d1b330e5..6a2037b52098 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -488,6 +488,7 @@ static void perf_event__mmap2_swap(union perf_event *event,
event->mmap2.maj = bswap_32(event->mmap2.maj);
event->mmap2.min = bswap_32(event->mmap2.min);
event->mmap2.ino = bswap_64(event->mmap2.ino);
+ event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation);
if (sample_id_all) {
void *data = &event->mmap2.filename;
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 46daa22b86e3..85ff4f68adc0 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -2690,7 +2690,7 @@ static char *prefix_if_not_in(const char *pre, char *str)
return str;
if (asprintf(&n, "%s,%s", pre, str) < 0)
- return NULL;
+ n = NULL;
free(str);
return n;
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
index af3f9b9f1e8b..b8e77617fdc4 100644
--- a/tools/perf/util/srcline.c
+++ b/tools/perf/util/srcline.c
@@ -191,16 +191,30 @@ static void find_address_in_section(bfd *abfd, asection *section, void *data)
bfd_vma pc, vma;
bfd_size_type size;
struct a2l_data *a2l = data;
+ flagword flags;
if (a2l->found)
return;
- if ((bfd_get_section_flags(abfd, section) & SEC_ALLOC) == 0)
+#ifdef bfd_get_section_flags
+ flags = bfd_get_section_flags(abfd, section);
+#else
+ flags = bfd_section_flags(section);
+#endif
+ if ((flags & SEC_ALLOC) == 0)
return;
pc = a2l->addr;
+#ifdef bfd_get_section_vma
vma = bfd_get_section_vma(abfd, section);
+#else
+ vma = bfd_section_vma(section);
+#endif
+#ifdef bfd_get_section_size
size = bfd_get_section_size(section);
+#else
+ size = bfd_section_size(section);
+#endif
if (pc < vma || pc >= vma + size)
return;
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 6917ba8a0024..b254bee701de 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -358,8 +358,10 @@ int perf_stat_process_counter(struct perf_stat_config *config,
* interval mode, otherwise overall avg running
* averages will be shown for each interval.
*/
- if (config->interval)
- init_stats(ps->res_stats);
+ if (config->interval) {
+ for (i = 0; i < 3; i++)
+ init_stats(&ps->res_stats[i]);
+ }
if (counter->per_pkg)
zero_per_pkg(counter);
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index a701a8a48f00..166c621e0223 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -1421,6 +1421,7 @@ struct kcore_copy_info {
u64 first_symbol;
u64 last_symbol;
u64 first_module;
+ u64 first_module_symbol;
u64 last_module_symbol;
size_t phnum;
struct list_head phdrs;
@@ -1497,6 +1498,8 @@ static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
return 0;
if (strchr(name, '[')) {
+ if (!kci->first_module_symbol || start < kci->first_module_symbol)
+ kci->first_module_symbol = start;
if (start > kci->last_module_symbol)
kci->last_module_symbol = start;
return 0;
@@ -1694,6 +1697,10 @@ static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
kci->etext += page_size;
}
+ if (kci->first_module_symbol &&
+ (!kci->first_module || kci->first_module_symbol < kci->first_module))
+ kci->first_module = kci->first_module_symbol;
+
kci->first_module = round_down(kci->first_module, page_size);
if (kci->last_module_symbol) {
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 91404bacc3df..b6f8349abc67 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -65,6 +65,7 @@ static enum dso_binary_type binary_type_symtab[] = {
DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP,
DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
+ DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO,
DSO_BINARY_TYPE__NOT_FOUND,
};
@@ -1419,6 +1420,7 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
+ case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
return !kmod && dso->kernel == DSO_TYPE_USER;
diff --git a/tools/perf/util/symbol_fprintf.c b/tools/perf/util/symbol_fprintf.c
index ed0205cc7942..1fd175bb4600 100644
--- a/tools/perf/util/symbol_fprintf.c
+++ b/tools/perf/util/symbol_fprintf.c
@@ -66,7 +66,7 @@ size_t dso__fprintf_symbols_by_name(struct dso *dso,
for (nd = rb_first(&dso->symbol_names); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
- fprintf(fp, "%s\n", pos->sym.name);
+ ret += fprintf(fp, "%s\n", pos->sym.name);
}
return ret;
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index 5eb1b2469bba..12324325ea0b 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -381,6 +381,7 @@ static int read_saved_cmdline(struct tep_handle *pevent)
pr_debug("error reading saved cmdlines\n");
goto out;
}
+ buf[ret] = '\0';
parse_saved_cmdline(pevent, buf, size);
ret = 0;
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index dc58254a2b69..2efec9e77753 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -22,7 +22,7 @@ static inline void *zalloc(size_t size)
return calloc(1, size);
}
-#define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
+#define zfree(ptr) ({ free((void *)*ptr); *ptr = NULL; })
struct dirent;
struct nsinfo;
@@ -57,6 +57,10 @@ int fetch_kernel_version(unsigned int *puint,
const char *perf_tip(const char *dirpath);
+#ifndef HAVE_GET_CURRENT_DIR_NAME
+char *get_current_dir_name(void);
+#endif
+
#ifndef HAVE_SCHED_GETCPU_SUPPORT
int sched_getcpu(void);
#endif
diff --git a/tools/power/acpi/Makefile.config b/tools/power/acpi/Makefile.config
index fc116c060b98..32ff7baf39df 100644
--- a/tools/power/acpi/Makefile.config
+++ b/tools/power/acpi/Makefile.config
@@ -57,7 +57,6 @@ INSTALL_SCRIPT = ${INSTALL_PROGRAM}
CROSS = #/usr/i386-linux-uclibc/usr/bin/i386-uclibc-
CROSS_COMPILE ?= $(CROSS)
LD = $(CC)
-HOSTCC = gcc
# check if compiler option is supported
cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -x c /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;}
diff --git a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
index 2fa3c5757bcb..dbed3d213bf1 100755
--- a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
+++ b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
@@ -10,11 +10,11 @@ then this utility enables and collects trace data for a user specified interval
and generates performance plots.
Prerequisites:
- Python version 2.7.x
+ Python version 2.7.x or higher
gnuplot 5.0 or higher
- gnuplot-py 1.8
+ gnuplot-py 1.8 or higher
(Most of the distributions have these required packages. They may be called
- gnuplot-py, phython-gnuplot. )
+ gnuplot-py, phython-gnuplot or phython3-gnuplot, gnuplot-nox, ... )
HWP (Hardware P-States are disabled)
Kernel config for Linux trace is enabled
@@ -180,7 +180,7 @@ def plot_pstate_cpu_with_sample():
g_plot('set xlabel "Samples"')
g_plot('set ylabel "P-State"')
g_plot('set title "{} : cpu pstate vs. sample : {:%F %H:%M}"'.format(testname, datetime.now()))
- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
+ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_SAMPLE, C_TO)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
@@ -197,7 +197,7 @@ def plot_pstate_cpu():
# the following command is really cool, but doesn't work with the CPU masking option because it aborts on the first missing file.
# plot_str = 'plot for [i=0:*] file=sprintf("cpu%03d.csv",i) title_s=sprintf("cpu%03d",i) file using 16:7 pt 7 ps 1 title title_s'
#
- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
+ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_TO)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
@@ -211,7 +211,7 @@ def plot_load_cpu():
g_plot('set ylabel "CPU load (percent)"')
g_plot('set title "{} : cpu loads : {:%F %H:%M}"'.format(testname, datetime.now()))
- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
+ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_LOAD)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
@@ -225,7 +225,7 @@ def plot_frequency_cpu():
g_plot('set ylabel "CPU Frequency (GHz)"')
g_plot('set title "{} : cpu frequencies : {:%F %H:%M}"'.format(testname, datetime.now()))
- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
+ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_FREQ)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
@@ -240,7 +240,7 @@ def plot_duration_cpu():
g_plot('set ylabel "Timer Duration (MilliSeconds)"')
g_plot('set title "{} : cpu durations : {:%F %H:%M}"'.format(testname, datetime.now()))
- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
+ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_DURATION)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
@@ -254,7 +254,7 @@ def plot_scaled_cpu():
g_plot('set ylabel "Scaled Busy (Unitless)"')
g_plot('set title "{} : cpu scaled busy : {:%F %H:%M}"'.format(testname, datetime.now()))
- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
+ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_SCALED)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
@@ -268,7 +268,7 @@ def plot_boost_cpu():
g_plot('set ylabel "CPU IO Boost (percent)"')
g_plot('set title "{} : cpu io boost : {:%F %H:%M}"'.format(testname, datetime.now()))
- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
+ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_BOOST)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
@@ -282,7 +282,7 @@ def plot_ghz_cpu():
g_plot('set ylabel "TSC Frequency (GHz)"')
g_plot('set title "{} : cpu TSC Frequencies (Sanity check calculation) : {:%F %H:%M}"'.format(testname, datetime.now()))
- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
+ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_GHZ)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index 8fc6b1ca47dc..42dbe05b1807 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -60,6 +60,16 @@ $(call allow-override,LD,$(CROSS_COMPILE)ld)
$(call allow-override,CXX,$(CROSS_COMPILE)g++)
$(call allow-override,STRIP,$(CROSS_COMPILE)strip)
+ifneq ($(LLVM),)
+HOSTAR ?= llvm-ar
+HOSTCC ?= clang
+HOSTLD ?= ld.lld
+else
+HOSTAR ?= ar
+HOSTCC ?= gcc
+HOSTLD ?= ld
+endif
+
ifeq ($(CC_NO_CLANG), 1)
EXTRA_WARNINGS += -Wstrict-aliasing=3
endif
diff --git a/tools/testing/ktest/compare-ktest-sample.pl b/tools/testing/ktest/compare-ktest-sample.pl
index 4118eb4a842d..ebea21d0a1be 100755
--- a/tools/testing/ktest/compare-ktest-sample.pl
+++ b/tools/testing/ktest/compare-ktest-sample.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
# SPDX-License-Identifier: GPL-2.0
open (IN,"ktest.pl");
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index 3118fc0d149b..406401f1acc2 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -4177,7 +4177,12 @@ sub do_send_mail {
$mail_command =~ s/\$SUBJECT/$subject/g;
$mail_command =~ s/\$MESSAGE/$message/g;
- run_command $mail_command;
+ my $ret = run_command $mail_command;
+ if (!$ret && defined($file)) {
+ # try again without the file
+ $message .= "\n\n*** FAILED TO SEND LOG ***\n\n";
+ do_send_email($subject, $message);
+ }
}
sub send_email {
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 9b552c0fc47d..4e202217fae1 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -1017,6 +1017,8 @@ static void __run_parallel(int tasks, void (*fn)(int task, void *data),
pid_t pid[tasks];
int i;
+ fflush(stdout);
+
for (i = 0; i < tasks; i++) {
pid[i] = fork();
if (pid[i] == 0) {
diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py
index d59642e70f56..6b46db61c6d3 100755
--- a/tools/testing/selftests/bpf/test_offload.py
+++ b/tools/testing/selftests/bpf/test_offload.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python3
+#!/usr/bin/env python3
# Copyright (C) 2017 Netronome Systems, Inc.
#
@@ -787,6 +787,7 @@ try:
start_test("Test disabling TC offloads is rejected while filters installed...")
ret, _ = sim.set_ethtool_tc_offloads(False, fail=False)
fail(ret == 0, "Driver should refuse to disable TC offloads with filters installed...")
+ sim.set_ethtool_tc_offloads(True)
start_test("Test qdisc removal frees things...")
sim.tc_flush_filters()
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 89f8b0dae7ef..bad3505d66e0 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -1118,6 +1118,7 @@ static int extract_build_id(char *build_id, size_t size)
len = size;
memcpy(build_id, line, len);
build_id[len] = '\0';
+ free(line);
return 0;
err:
fclose(fp);
diff --git a/tools/testing/selftests/bpf/test_select_reuseport.c b/tools/testing/selftests/bpf/test_select_reuseport.c
index cdbbdab2725f..b14d25bfa830 100644
--- a/tools/testing/selftests/bpf/test_select_reuseport.c
+++ b/tools/testing/selftests/bpf/test_select_reuseport.c
@@ -616,13 +616,13 @@ static void cleanup_per_test(void)
for (i = 0; i < NR_RESULTS; i++) {
err = bpf_map_update_elem(result_map, &i, &zero, BPF_ANY);
- RET_IF(err, "reset elem in result_map",
- "i:%u err:%d errno:%d\n", i, err, errno);
+ CHECK(err, "reset elem in result_map",
+ "i:%u err:%d errno:%d\n", i, err, errno);
}
err = bpf_map_update_elem(linum_map, &zero, &zero, BPF_ANY);
- RET_IF(err, "reset line number in linum_map", "err:%d errno:%d\n",
- err, errno);
+ CHECK(err, "reset line number in linum_map", "err:%d errno:%d\n",
+ err, errno);
for (i = 0; i < REUSEPORT_ARRAY_SIZE; i++)
close(sk_fds[i]);
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 9db5a7378f40..e1e4b6ab83f7 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -2002,29 +2002,27 @@ static struct bpf_test tests[] = {
.result = ACCEPT,
},
{
- "check skb->hash byte load not permitted 1",
+ "check skb->hash byte load permitted 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 1),
BPF_EXIT_INSN(),
},
- .errstr = "invalid bpf_context access",
- .result = REJECT,
+ .result = ACCEPT,
},
{
- "check skb->hash byte load not permitted 2",
+ "check skb->hash byte load permitted 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 2),
BPF_EXIT_INSN(),
},
- .errstr = "invalid bpf_context access",
- .result = REJECT,
+ .result = ACCEPT,
},
{
- "check skb->hash byte load not permitted 3",
+ "check skb->hash byte load permitted 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
@@ -2036,8 +2034,7 @@ static struct bpf_test tests[] = {
#endif
BPF_EXIT_INSN(),
},
- .errstr = "invalid bpf_context access",
- .result = REJECT,
+ .result = ACCEPT,
},
{
"check cb access: byte, wrong type",
@@ -2149,7 +2146,7 @@ static struct bpf_test tests[] = {
.result = ACCEPT,
},
{
- "check skb->hash half load not permitted",
+ "check skb->hash half load permitted 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
@@ -2161,6 +2158,37 @@ static struct bpf_test tests[] = {
#endif
BPF_EXIT_INSN(),
},
+ .result = ACCEPT,
+ },
+ {
+ "check skb->hash half load not permitted, unaligned 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 1),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 3),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check skb->hash half load not permitted, unaligned 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 3),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 1),
+#endif
+ BPF_EXIT_INSN(),
+ },
.errstr = "invalid bpf_context access",
.result = REJECT,
},
@@ -2448,6 +2476,7 @@ static struct bpf_test tests[] = {
},
.result = REJECT,
.errstr = "invalid stack off=-79992 size=8",
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
},
{
"PTR_TO_STACK store/load - out of bounds high",
@@ -2836,7 +2865,7 @@ static struct bpf_test tests[] = {
.result = ACCEPT,
},
{
- "unpriv: adding of fp",
+ "unpriv: adding of fp, reg",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_1, 0),
@@ -2844,6 +2873,21 @@ static struct bpf_test tests[] = {
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ },
+ {
+ "unpriv: adding of fp, imm",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .result_unpriv = REJECT,
.result = ACCEPT,
},
{
@@ -7813,7 +7857,7 @@ static struct bpf_test tests[] = {
BPF_JMP_IMM(BPF_JA, 0, 0, -7),
},
.fixup_map1 = { 4 },
- .errstr = "R0 invalid mem access 'inv'",
+ .errstr = "unbounded min value",
.result = REJECT,
},
{
@@ -7894,6 +7938,7 @@ static struct bpf_test tests[] = {
},
.fixup_map1 = { 3 },
.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
.result = REJECT,
},
{
@@ -8266,6 +8311,7 @@ static struct bpf_test tests[] = {
},
.fixup_map1 = { 3 },
.errstr = "pointer offset 1073741822",
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
.result = REJECT
},
{
@@ -8287,6 +8333,7 @@ static struct bpf_test tests[] = {
},
.fixup_map1 = { 3 },
.errstr = "pointer offset -1073741822",
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
.result = REJECT
},
{
@@ -8458,6 +8505,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN()
},
.errstr = "fp pointer offset 1073741822",
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
.result = REJECT
},
{
@@ -9739,8 +9787,9 @@ static struct bpf_test tests[] = {
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
- .result = REJECT,
+ .errstr_unpriv = "R1 has pointer with unsupported alu operation",
.errstr = "R0 tried to subtract pointer from scalar",
+ .result = REJECT,
},
{
"check deducing bounds from const, 2",
@@ -9753,6 +9802,8 @@ static struct bpf_test tests[] = {
BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R1 has pointer with unsupported alu operation",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 1,
},
@@ -9764,32 +9815,37 @@ static struct bpf_test tests[] = {
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
- .result = REJECT,
+ .errstr_unpriv = "R1 has pointer with unsupported alu operation",
.errstr = "R0 tried to subtract pointer from scalar",
+ .result = REJECT,
},
{
"check deducing bounds from const, 4",
.insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_6, BPF_REG_0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R6 has pointer with unsupported alu operation",
+ .result_unpriv = REJECT,
.result = ACCEPT,
},
{
"check deducing bounds from const, 5",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
- .result = REJECT,
+ .errstr_unpriv = "R1 has pointer with unsupported alu operation",
.errstr = "R0 tried to subtract pointer from scalar",
+ .result = REJECT,
},
{
"check deducing bounds from const, 6",
@@ -9800,8 +9856,9 @@ static struct bpf_test tests[] = {
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
- .result = REJECT,
+ .errstr_unpriv = "R1 has pointer with unsupported alu operation",
.errstr = "R0 tried to subtract pointer from scalar",
+ .result = REJECT,
},
{
"check deducing bounds from const, 7",
@@ -9813,8 +9870,9 @@ static struct bpf_test tests[] = {
offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(),
},
- .result = REJECT,
+ .errstr_unpriv = "R1 has pointer with unsupported alu operation",
.errstr = "dereference of modified ctx ptr",
+ .result = REJECT,
},
{
"check deducing bounds from const, 8",
@@ -9826,8 +9884,9 @@ static struct bpf_test tests[] = {
offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(),
},
- .result = REJECT,
+ .errstr_unpriv = "R1 has pointer with unsupported alu operation",
.errstr = "dereference of modified ctx ptr",
+ .result = REJECT,
},
{
"check deducing bounds from const, 9",
@@ -9837,8 +9896,9 @@ static struct bpf_test tests[] = {
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
- .result = REJECT,
+ .errstr_unpriv = "R1 has pointer with unsupported alu operation",
.errstr = "R0 tried to subtract pointer from scalar",
+ .result = REJECT,
},
{
"check deducing bounds from const, 10",
@@ -9850,8 +9910,8 @@ static struct bpf_test tests[] = {
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
- .result = REJECT,
.errstr = "math between ctx pointer and register with unbounded min value is not allowed",
+ .result = REJECT,
},
{
"bpf_exit with invalid return code. test1",
@@ -12182,17 +12242,17 @@ static struct bpf_test tests[] = {
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)),
+ BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)/2),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
- BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)),
+ BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)/2),
BPF_MOV64_IMM(BPF_REG_4, 256),
BPF_EMIT_CALL(BPF_FUNC_get_stack),
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
- BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16),
+ BPF_JMP_REG(BPF_JSLT, BPF_REG_8, BPF_REG_1, 16),
BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
@@ -12202,7 +12262,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)),
+ BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)/2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c
index 075cb0c73014..90418d79ef67 100644
--- a/tools/testing/selftests/cgroup/cgroup_util.c
+++ b/tools/testing/selftests/cgroup/cgroup_util.c
@@ -95,7 +95,7 @@ int cg_read_strcmp(const char *cgroup, const char *control,
/* Handle the case of comparing against empty string */
if (!expected)
- size = 32;
+ return -1;
else
size = strlen(expected) + 1;
diff --git a/tools/testing/selftests/ftrace/settings b/tools/testing/selftests/ftrace/settings
new file mode 100644
index 000000000000..e7b9417537fb
--- /dev/null
+++ b/tools/testing/selftests/ftrace/settings
@@ -0,0 +1 @@
+timeout=0
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
index 27a54a17da65..f4e92afab14b 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
@@ -30,7 +30,7 @@ ftrace_filter_check '*schedule*' '^.*schedule.*$'
ftrace_filter_check 'schedule*' '^schedule.*$'
# filter by *mid*end
-ftrace_filter_check '*aw*lock' '.*aw.*lock$'
+ftrace_filter_check '*pin*lock' '.*pin.*lock$'
# filter by start*mid*
ftrace_filter_check 'mutex*try*' '^mutex.*try.*'
diff --git a/tools/testing/selftests/ipc/msgque.c b/tools/testing/selftests/ipc/msgque.c
index 4c156aeab6b8..5ec4d9e18806 100644
--- a/tools/testing/selftests/ipc/msgque.c
+++ b/tools/testing/selftests/ipc/msgque.c
@@ -137,7 +137,7 @@ int dump_queue(struct msgque_data *msgque)
for (kern_id = 0; kern_id < 256; kern_id++) {
ret = msgctl(kern_id, MSG_STAT, &ds);
if (ret < 0) {
- if (errno == -EINVAL)
+ if (errno == EINVAL)
continue;
printf("Failed to get stats for IPC queue with id %d\n",
kern_id);
diff --git a/tools/testing/selftests/kmod/kmod.sh b/tools/testing/selftests/kmod/kmod.sh
index 0a76314b4414..1f118916a83e 100755
--- a/tools/testing/selftests/kmod/kmod.sh
+++ b/tools/testing/selftests/kmod/kmod.sh
@@ -505,18 +505,23 @@ function test_num()
fi
}
-function get_test_count()
+function get_test_data()
{
test_num $1
- TEST_DATA=$(echo $ALL_TESTS | awk '{print $'$1'}')
+ local field_num=$(echo $1 | sed 's/^0*//')
+ echo $ALL_TESTS | awk '{print $'$field_num'}'
+}
+
+function get_test_count()
+{
+ TEST_DATA=$(get_test_data $1)
LAST_TWO=${TEST_DATA#*:*}
echo ${LAST_TWO%:*}
}
function get_test_enabled()
{
- test_num $1
- TEST_DATA=$(echo $ALL_TESTS | awk '{print $'$1'}')
+ TEST_DATA=$(get_test_data $1)
echo ${TEST_DATA#*:*:}
}
diff --git a/tools/testing/selftests/kvm/include/x86.h b/tools/testing/selftests/kvm/include/x86.h
index 42c3596815b8..a7667a613bbc 100644
--- a/tools/testing/selftests/kvm/include/x86.h
+++ b/tools/testing/selftests/kvm/include/x86.h
@@ -59,7 +59,7 @@ enum x86_register {
struct desc64 {
uint16_t limit0;
uint16_t base0;
- unsigned base1:8, s:1, type:4, dpl:2, p:1;
+ unsigned base1:8, type:4, s:1, dpl:2, p:1;
unsigned limit1:4, avl:1, l:1, db:1, g:1, base2:8;
uint32_t base3;
uint32_t zero1;
diff --git a/tools/testing/selftests/kvm/lib/x86.c b/tools/testing/selftests/kvm/lib/x86.c
index 4d35eba73dc9..800fe36064f9 100644
--- a/tools/testing/selftests/kvm/lib/x86.c
+++ b/tools/testing/selftests/kvm/lib/x86.c
@@ -449,11 +449,12 @@ static void kvm_seg_fill_gdt_64bit(struct kvm_vm *vm, struct kvm_segment *segp)
desc->limit0 = segp->limit & 0xFFFF;
desc->base0 = segp->base & 0xFFFF;
desc->base1 = segp->base >> 16;
- desc->s = segp->s;
desc->type = segp->type;
+ desc->s = segp->s;
desc->dpl = segp->dpl;
desc->p = segp->present;
desc->limit1 = segp->limit >> 16;
+ desc->avl = segp->avl;
desc->l = segp->l;
desc->db = segp->db;
desc->g = segp->g;
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 0ef203ec59fd..a5d40653a921 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -1,6 +1,10 @@
# This mimics the top-level Makefile. We do it explicitly here so that this
# Makefile can operate with or without the kbuild infrastructure.
+ifneq ($(LLVM),)
+CC := clang
+else
CC := $(CROSS_COMPILE)gcc
+endif
ifeq (0,$(MAKELEVEL))
OUTPUT := $(shell pwd)
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index 67048f922ff2..a5ba149761bf 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -987,7 +987,6 @@ ipv6_addr_metric_test()
check_route6 "2001:db8:104::1 dev dummy2 proto kernel metric 260"
log_test $? 0 "Set metric with peer route on local side"
- log_test $? 0 "User specified metric on local address"
check_route6 "2001:db8:104::2 dev dummy2 proto kernel metric 260"
log_test $? 0 "Set metric with peer route on peer side"
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh
index 197e769c2ed1..f8cda822c1ce 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh
@@ -86,11 +86,20 @@ test_ip6gretap()
test_gretap_stp()
{
+ # Sometimes after mirror installation, the neighbor's state is not valid.
+ # The reason is that there is no SW datapath activity related to the
+ # neighbor for the remote GRE address. Therefore whether the corresponding
+ # neighbor will be valid is a matter of luck, and the test is thus racy.
+ # Set the neighbor's state to permanent, so it would be always valid.
+ ip neigh replace 192.0.2.130 lladdr $(mac_get $h3) \
+ nud permanent dev br2
full_test_span_gre_stp gt4 $swp3.555 "mirror to gretap"
}
test_ip6gretap_stp()
{
+ ip neigh replace 2001:db8:2::2 lladdr $(mac_get $h3) \
+ nud permanent dev br2
full_test_span_gre_stp gt6 $swp3.555 "mirror to ip6gretap"
}
diff --git a/tools/testing/selftests/net/msg_zerocopy.c b/tools/testing/selftests/net/msg_zerocopy.c
index 406cc70c571d..c539591937a1 100644
--- a/tools/testing/selftests/net/msg_zerocopy.c
+++ b/tools/testing/selftests/net/msg_zerocopy.c
@@ -125,9 +125,8 @@ static int do_setcpu(int cpu)
CPU_ZERO(&mask);
CPU_SET(cpu, &mask);
if (sched_setaffinity(0, sizeof(mask), &mask))
- error(1, 0, "setaffinity %d", cpu);
-
- if (cfg_verbose)
+ fprintf(stderr, "cpu: unable to pin, may increase variance.\n");
+ else if (cfg_verbose)
fprintf(stderr, "cpu: %u\n", cpu);
return 0;
diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c
index bd9b9632c72b..f496ba3b1cd3 100644
--- a/tools/testing/selftests/net/psock_fanout.c
+++ b/tools/testing/selftests/net/psock_fanout.c
@@ -364,7 +364,8 @@ static int test_datapath(uint16_t typeflags, int port_off,
int fds[2], fds_udp[2][2], ret;
fprintf(stderr, "\ntest: datapath 0x%hx ports %hu,%hu\n",
- typeflags, PORT_BASE, PORT_BASE + port_off);
+ typeflags, (uint16_t)PORT_BASE,
+ (uint16_t)(PORT_BASE + port_off));
fds[0] = sock_fanout_open(typeflags, 0);
fds[1] = sock_fanout_open(typeflags, 0);
diff --git a/tools/testing/selftests/networking/timestamping/rxtimestamp.c b/tools/testing/selftests/networking/timestamping/rxtimestamp.c
index dd4162fc0419..c6428f1ac22f 100644
--- a/tools/testing/selftests/networking/timestamping/rxtimestamp.c
+++ b/tools/testing/selftests/networking/timestamping/rxtimestamp.c
@@ -114,6 +114,7 @@ static struct option long_options[] = {
{ "tcp", no_argument, 0, 't' },
{ "udp", no_argument, 0, 'u' },
{ "ip", no_argument, 0, 'i' },
+ { NULL, 0, NULL, 0 },
};
static int next_port = 19999;
@@ -327,8 +328,7 @@ int main(int argc, char **argv)
bool all_tests = true;
int arg_index = 0;
int failures = 0;
- int s, t;
- char opt;
+ int s, t, opt;
while ((opt = getopt_long(argc, argv, "", long_options,
&arg_index)) != -1) {
diff --git a/tools/testing/selftests/networking/timestamping/timestamping.c b/tools/testing/selftests/networking/timestamping/timestamping.c
index 5cdfd743447b..900ed4b47899 100644
--- a/tools/testing/selftests/networking/timestamping/timestamping.c
+++ b/tools/testing/selftests/networking/timestamping/timestamping.c
@@ -332,10 +332,16 @@ int main(int argc, char **argv)
int val;
socklen_t len;
struct timeval next;
+ size_t if_len;
if (argc < 2)
usage(0);
interface = argv[1];
+ if_len = strlen(interface);
+ if (if_len >= IFNAMSIZ) {
+ printf("interface name exceeds IFNAMSIZ\n");
+ exit(1);
+ }
for (i = 2; i < argc; i++) {
if (!strcasecmp(argv[i], "SO_TIMESTAMP"))
@@ -369,12 +375,12 @@ int main(int argc, char **argv)
bail("socket");
memset(&device, 0, sizeof(device));
- strncpy(device.ifr_name, interface, sizeof(device.ifr_name));
+ memcpy(device.ifr_name, interface, if_len + 1);
if (ioctl(sock, SIOCGIFADDR, &device) < 0)
bail("getting interface IP address");
memset(&hwtstamp, 0, sizeof(hwtstamp));
- strncpy(hwtstamp.ifr_name, interface, sizeof(hwtstamp.ifr_name));
+ memcpy(hwtstamp.ifr_name, interface, if_len + 1);
hwtstamp.ifr_data = (void *)&hwconfig;
memset(&hwconfig, 0, sizeof(hwconfig));
hwconfig.tx_type =
diff --git a/tools/testing/selftests/ntb/ntb_test.sh b/tools/testing/selftests/ntb/ntb_test.sh
index 08cbfbbc7029..17ca36403d04 100755
--- a/tools/testing/selftests/ntb/ntb_test.sh
+++ b/tools/testing/selftests/ntb/ntb_test.sh
@@ -250,7 +250,7 @@ function get_files_count()
split_remote $LOC
if [[ "$REMOTE" == "" ]]; then
- echo $(ls -1 "$LOC"/${NAME}* 2>/dev/null | wc -l)
+ echo $(ls -1 "$VPATH"/${NAME}* 2>/dev/null | wc -l)
else
echo $(ssh "$REMOTE" "ls -1 \"$VPATH\"/${NAME}* | \
wc -l" 2> /dev/null)
diff --git a/tools/testing/selftests/powerpc/alignment/alignment_handler.c b/tools/testing/selftests/powerpc/alignment/alignment_handler.c
index 169a8b9719fb..4f8335e0c985 100644
--- a/tools/testing/selftests/powerpc/alignment/alignment_handler.c
+++ b/tools/testing/selftests/powerpc/alignment/alignment_handler.c
@@ -384,7 +384,6 @@ int test_alignment_handler_integer(void)
LOAD_DFORM_TEST(ldu);
LOAD_XFORM_TEST(ldx);
LOAD_XFORM_TEST(ldux);
- LOAD_DFORM_TEST(lmw);
STORE_DFORM_TEST(stb);
STORE_XFORM_TEST(stbx);
STORE_DFORM_TEST(stbu);
@@ -403,7 +402,11 @@ int test_alignment_handler_integer(void)
STORE_XFORM_TEST(stdx);
STORE_DFORM_TEST(stdu);
STORE_XFORM_TEST(stdux);
+
+#ifdef __BIG_ENDIAN__
+ LOAD_DFORM_TEST(lmw);
STORE_DFORM_TEST(stmw);
+#endif
return rc;
}
diff --git a/tools/testing/selftests/powerpc/benchmarks/context_switch.c b/tools/testing/selftests/powerpc/benchmarks/context_switch.c
index 87f1f0252299..9ec7674697b1 100644
--- a/tools/testing/selftests/powerpc/benchmarks/context_switch.c
+++ b/tools/testing/selftests/powerpc/benchmarks/context_switch.c
@@ -23,6 +23,7 @@
#include <limits.h>
#include <sys/time.h>
#include <sys/syscall.h>
+#include <sys/sysinfo.h>
#include <sys/types.h>
#include <sys/shm.h>
#include <linux/futex.h>
@@ -108,8 +109,9 @@ static void start_thread_on(void *(*fn)(void *), void *arg, unsigned long cpu)
static void start_process_on(void *(*fn)(void *), void *arg, unsigned long cpu)
{
- int pid;
- cpu_set_t cpuset;
+ int pid, ncpus;
+ cpu_set_t *cpuset;
+ size_t size;
pid = fork();
if (pid == -1) {
@@ -120,14 +122,23 @@ static void start_process_on(void *(*fn)(void *), void *arg, unsigned long cpu)
if (pid)
return;
- CPU_ZERO(&cpuset);
- CPU_SET(cpu, &cpuset);
+ ncpus = get_nprocs();
+ size = CPU_ALLOC_SIZE(ncpus);
+ cpuset = CPU_ALLOC(ncpus);
+ if (!cpuset) {
+ perror("malloc");
+ exit(1);
+ }
+ CPU_ZERO_S(size, cpuset);
+ CPU_SET_S(cpu, size, cpuset);
- if (sched_setaffinity(0, sizeof(cpuset), &cpuset)) {
+ if (sched_setaffinity(0, size, cpuset)) {
perror("sched_setaffinity");
+ CPU_FREE(cpuset);
exit(1);
}
+ CPU_FREE(cpuset);
fn(arg);
exit(0);
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c b/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c
index 94110b1dcd3d..031baa43646f 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c
@@ -91,8 +91,6 @@ int back_to_back_ebbs(void)
ebb_global_disable();
ebb_freeze_pmcs();
- count_pmc(1, sample_period);
-
dump_ebb_state();
event_close(&event);
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c
index 7c57a8d79535..361e0be9df9a 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c
@@ -42,8 +42,6 @@ int cycles(void)
ebb_global_disable();
ebb_freeze_pmcs();
- count_pmc(1, sample_period);
-
dump_ebb_state();
event_close(&event);
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c
index ecf5ee3283a3..fe7d0dc2a1a2 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c
@@ -99,8 +99,6 @@ int cycles_with_freeze(void)
ebb_global_disable();
ebb_freeze_pmcs();
- count_pmc(1, sample_period);
-
dump_ebb_state();
printf("EBBs while frozen %d\n", ebbs_while_frozen);
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c
index c0faba520b35..b9b30f974b5e 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c
@@ -71,8 +71,6 @@ int cycles_with_mmcr2(void)
ebb_global_disable();
ebb_freeze_pmcs();
- count_pmc(1, sample_period);
-
dump_ebb_state();
event_close(&event);
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb.c
index 46681fec549b..2694ae161a84 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/ebb.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb.c
@@ -396,8 +396,6 @@ int ebb_child(union pipe read_pipe, union pipe write_pipe)
ebb_global_disable();
ebb_freeze_pmcs();
- count_pmc(1, sample_period);
-
dump_ebb_state();
event_close(&event);
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c
index a991d2ea8d0a..174e4f4dae6c 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c
@@ -38,8 +38,6 @@ static int victim_child(union pipe read_pipe, union pipe write_pipe)
ebb_global_disable();
ebb_freeze_pmcs();
- count_pmc(1, sample_period);
-
dump_ebb_state();
FAIL_IF(ebb_state.stats.ebb_count == 0);
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c b/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c
index 2ed7ad33f7a3..dddb95938304 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c
@@ -75,7 +75,6 @@ static int test_body(void)
ebb_freeze_pmcs();
ebb_global_disable();
- count_pmc(4, sample_period);
mtspr(SPRN_PMC4, 0xdead);
dump_summary_ebb_state();
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c b/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c
index 6ff8c8ff27d6..035c02273cd4 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c
@@ -70,13 +70,6 @@ int multi_counter(void)
ebb_global_disable();
ebb_freeze_pmcs();
- count_pmc(1, sample_period);
- count_pmc(2, sample_period);
- count_pmc(3, sample_period);
- count_pmc(4, sample_period);
- count_pmc(5, sample_period);
- count_pmc(6, sample_period);
-
dump_ebb_state();
for (i = 0; i < 6; i++)
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c b/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c
index 037cb6154f36..3e9d4ac965c8 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c
@@ -61,8 +61,6 @@ static int cycles_child(void)
ebb_global_disable();
ebb_freeze_pmcs();
- count_pmc(1, sample_period);
-
dump_summary_ebb_state();
event_close(&event);
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c
index c5fa64790c22..d90891fe96a3 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c
@@ -82,8 +82,6 @@ static int test_body(void)
ebb_global_disable();
ebb_freeze_pmcs();
- count_pmc(1, sample_period);
-
dump_ebb_state();
if (mmcr0_mismatch)
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
index 30e1ac62e8cb..8ca92b9ee5b0 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
@@ -76,8 +76,6 @@ int pmc56_overflow(void)
ebb_global_disable();
ebb_freeze_pmcs();
- count_pmc(2, sample_period);
-
dump_ebb_state();
printf("PMC5/6 overflow %d\n", pmc56_overflowed);
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c
index bdbbbe8431e0..3694613f418f 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c
@@ -44,7 +44,7 @@ struct shared_info {
unsigned long amr2;
/* AMR value that ptrace should refuse to write to the child. */
- unsigned long amr3;
+ unsigned long invalid_amr;
/* IAMR value the parent expects to read from the child. */
unsigned long expected_iamr;
@@ -57,8 +57,8 @@ struct shared_info {
* (even though they're valid ones) because userspace doesn't have
* access to those registers.
*/
- unsigned long new_iamr;
- unsigned long new_uamor;
+ unsigned long invalid_iamr;
+ unsigned long invalid_uamor;
};
static int sys_pkey_alloc(unsigned long flags, unsigned long init_access_rights)
@@ -66,11 +66,6 @@ static int sys_pkey_alloc(unsigned long flags, unsigned long init_access_rights)
return syscall(__NR_pkey_alloc, flags, init_access_rights);
}
-static int sys_pkey_free(int pkey)
-{
- return syscall(__NR_pkey_free, pkey);
-}
-
static int child(struct shared_info *info)
{
unsigned long reg;
@@ -100,28 +95,32 @@ static int child(struct shared_info *info)
info->amr1 |= 3ul << pkeyshift(pkey1);
info->amr2 |= 3ul << pkeyshift(pkey2);
- info->amr3 |= info->amr2 | 3ul << pkeyshift(pkey3);
+ /*
+ * invalid amr value where we try to force write
+ * things which are deined by a uamor setting.
+ */
+ info->invalid_amr = info->amr2 | (~0x0UL & ~info->expected_uamor);
+ /*
+ * if PKEY_DISABLE_EXECUTE succeeded we should update the expected_iamr
+ */
if (disable_execute)
info->expected_iamr |= 1ul << pkeyshift(pkey1);
else
info->expected_iamr &= ~(1ul << pkeyshift(pkey1));
- info->expected_iamr &= ~(1ul << pkeyshift(pkey2) | 1ul << pkeyshift(pkey3));
-
- info->expected_uamor |= 3ul << pkeyshift(pkey1) |
- 3ul << pkeyshift(pkey2);
- info->new_iamr |= 1ul << pkeyshift(pkey1) | 1ul << pkeyshift(pkey2);
- info->new_uamor |= 3ul << pkeyshift(pkey1);
+ /*
+ * We allocated pkey2 and pkey 3 above. Clear the IAMR bits.
+ */
+ info->expected_iamr &= ~(1ul << pkeyshift(pkey2));
+ info->expected_iamr &= ~(1ul << pkeyshift(pkey3));
/*
- * We won't use pkey3. We just want a plausible but invalid key to test
- * whether ptrace will let us write to AMR bits we are not supposed to.
- *
- * This also tests whether the kernel restores the UAMOR permissions
- * after a key is freed.
+ * Create an IAMR value different from expected value.
+ * Kernel will reject an IAMR and UAMOR change.
*/
- sys_pkey_free(pkey3);
+ info->invalid_iamr = info->expected_iamr | (1ul << pkeyshift(pkey1) | 1ul << pkeyshift(pkey2));
+ info->invalid_uamor = info->expected_uamor & ~(0x3ul << pkeyshift(pkey1));
printf("%-30s AMR: %016lx pkey1: %d pkey2: %d pkey3: %d\n",
user_write, info->amr1, pkey1, pkey2, pkey3);
@@ -196,9 +195,9 @@ static int parent(struct shared_info *info, pid_t pid)
PARENT_SKIP_IF_UNSUPPORTED(ret, &info->child_sync);
PARENT_FAIL_IF(ret, &info->child_sync);
- info->amr1 = info->amr2 = info->amr3 = regs[0];
- info->expected_iamr = info->new_iamr = regs[1];
- info->expected_uamor = info->new_uamor = regs[2];
+ info->amr1 = info->amr2 = regs[0];
+ info->expected_iamr = regs[1];
+ info->expected_uamor = regs[2];
/* Wake up child so that it can set itself up. */
ret = prod_child(&info->child_sync);
@@ -234,10 +233,10 @@ static int parent(struct shared_info *info, pid_t pid)
return ret;
/* Write invalid AMR value in child. */
- ret = ptrace_write_regs(pid, NT_PPC_PKEY, &info->amr3, 1);
+ ret = ptrace_write_regs(pid, NT_PPC_PKEY, &info->invalid_amr, 1);
PARENT_FAIL_IF(ret, &info->child_sync);
- printf("%-30s AMR: %016lx\n", ptrace_write_running, info->amr3);
+ printf("%-30s AMR: %016lx\n", ptrace_write_running, info->invalid_amr);
/* Wake up child so that it can verify it didn't change. */
ret = prod_child(&info->child_sync);
@@ -249,7 +248,7 @@ static int parent(struct shared_info *info, pid_t pid)
/* Try to write to IAMR. */
regs[0] = info->amr1;
- regs[1] = info->new_iamr;
+ regs[1] = info->invalid_iamr;
ret = ptrace_write_regs(pid, NT_PPC_PKEY, regs, 2);
PARENT_FAIL_IF(!ret, &info->child_sync);
@@ -257,7 +256,7 @@ static int parent(struct shared_info *info, pid_t pid)
ptrace_write_running, regs[0], regs[1]);
/* Try to write to IAMR and UAMOR. */
- regs[2] = info->new_uamor;
+ regs[2] = info->invalid_uamor;
ret = ptrace_write_regs(pid, NT_PPC_PKEY, regs, 3);
PARENT_FAIL_IF(!ret, &info->child_sync);
diff --git a/tools/testing/selftests/powerpc/utils.c b/tools/testing/selftests/powerpc/utils.c
index aa8fc1e6365b..ba0959d454b3 100644
--- a/tools/testing/selftests/powerpc/utils.c
+++ b/tools/testing/selftests/powerpc/utils.c
@@ -13,6 +13,7 @@
#include <stdio.h>
#include <string.h>
#include <sys/stat.h>
+#include <sys/sysinfo.h>
#include <sys/types.h>
#include <sys/utsname.h>
#include <unistd.h>
@@ -83,28 +84,40 @@ void *get_auxv_entry(int type)
int pick_online_cpu(void)
{
- cpu_set_t mask;
- int cpu;
+ int ncpus, cpu = -1;
+ cpu_set_t *mask;
+ size_t size;
+
+ ncpus = get_nprocs_conf();
+ size = CPU_ALLOC_SIZE(ncpus);
+ mask = CPU_ALLOC(ncpus);
+ if (!mask) {
+ perror("malloc");
+ return -1;
+ }
- CPU_ZERO(&mask);
+ CPU_ZERO_S(size, mask);
- if (sched_getaffinity(0, sizeof(mask), &mask)) {
+ if (sched_getaffinity(0, size, mask)) {
perror("sched_getaffinity");
- return -1;
+ goto done;
}
/* We prefer a primary thread, but skip 0 */
- for (cpu = 8; cpu < CPU_SETSIZE; cpu += 8)
- if (CPU_ISSET(cpu, &mask))
- return cpu;
+ for (cpu = 8; cpu < ncpus; cpu += 8)
+ if (CPU_ISSET_S(cpu, size, mask))
+ goto done;
/* Search for anything, but in reverse */
- for (cpu = CPU_SETSIZE - 1; cpu >= 0; cpu--)
- if (CPU_ISSET(cpu, &mask))
- return cpu;
+ for (cpu = ncpus - 1; cpu >= 0; cpu--)
+ if (CPU_ISSET_S(cpu, size, mask))
+ goto done;
printf("No cpus in affinity mask?!\n");
- return -1;
+
+done:
+ CPU_FREE(mask);
+ return cpu;
}
bool is_ppc64le(void)
diff --git a/tools/testing/selftests/proc/proc-loadavg-001.c b/tools/testing/selftests/proc/proc-loadavg-001.c
index fcff7047000d..8edaafc2b92f 100644
--- a/tools/testing/selftests/proc/proc-loadavg-001.c
+++ b/tools/testing/selftests/proc/proc-loadavg-001.c
@@ -14,7 +14,6 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* Test that /proc/loadavg correctly reports last pid in pid namespace. */
-#define _GNU_SOURCE
#include <errno.h>
#include <sched.h>
#include <sys/types.h>
diff --git a/tools/testing/selftests/proc/proc-self-syscall.c b/tools/testing/selftests/proc/proc-self-syscall.c
index 5ab5f4810e43..7b9018fad092 100644
--- a/tools/testing/selftests/proc/proc-self-syscall.c
+++ b/tools/testing/selftests/proc/proc-self-syscall.c
@@ -13,7 +13,6 @@
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#define _GNU_SOURCE
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/types.h>
diff --git a/tools/testing/selftests/proc/proc-uptime-002.c b/tools/testing/selftests/proc/proc-uptime-002.c
index 30e2b7849089..e7ceabed7f51 100644
--- a/tools/testing/selftests/proc/proc-uptime-002.c
+++ b/tools/testing/selftests/proc/proc-uptime-002.c
@@ -15,7 +15,6 @@
*/
// Test that values in /proc/uptime increment monotonically
// while shifting across CPUs.
-#define _GNU_SOURCE
#undef NDEBUG
#include <assert.h>
#include <unistd.h>
diff --git a/tools/testing/selftests/tc-testing/tdc_batch.py b/tools/testing/selftests/tc-testing/tdc_batch.py
index 52fa539dc662..3d8350d6bfe1 100755
--- a/tools/testing/selftests/tc-testing/tdc_batch.py
+++ b/tools/testing/selftests/tc-testing/tdc_batch.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python3
+#!/usr/bin/env python3
"""
tdc_batch.py - a script to generate TC batch file
diff --git a/tools/testing/selftests/vm/mlock2-tests.c b/tools/testing/selftests/vm/mlock2-tests.c
index 637b6d0ac0d0..11b2301f3aa3 100644
--- a/tools/testing/selftests/vm/mlock2-tests.c
+++ b/tools/testing/selftests/vm/mlock2-tests.c
@@ -67,59 +67,6 @@ out:
return ret;
}
-static uint64_t get_pageflags(unsigned long addr)
-{
- FILE *file;
- uint64_t pfn;
- unsigned long offset;
-
- file = fopen("/proc/self/pagemap", "r");
- if (!file) {
- perror("fopen pagemap");
- _exit(1);
- }
-
- offset = addr / getpagesize() * sizeof(pfn);
-
- if (fseek(file, offset, SEEK_SET)) {
- perror("fseek pagemap");
- _exit(1);
- }
-
- if (fread(&pfn, sizeof(pfn), 1, file) != 1) {
- perror("fread pagemap");
- _exit(1);
- }
-
- fclose(file);
- return pfn;
-}
-
-static uint64_t get_kpageflags(unsigned long pfn)
-{
- uint64_t flags;
- FILE *file;
-
- file = fopen("/proc/kpageflags", "r");
- if (!file) {
- perror("fopen kpageflags");
- _exit(1);
- }
-
- if (fseek(file, pfn * sizeof(flags), SEEK_SET)) {
- perror("fseek kpageflags");
- _exit(1);
- }
-
- if (fread(&flags, sizeof(flags), 1, file) != 1) {
- perror("fread kpageflags");
- _exit(1);
- }
-
- fclose(file);
- return flags;
-}
-
#define VMFLAGS "VmFlags:"
static bool is_vmflag_set(unsigned long addr, const char *vmflag)
@@ -159,19 +106,13 @@ out:
#define RSS "Rss:"
#define LOCKED "lo"
-static bool is_vma_lock_on_fault(unsigned long addr)
+static unsigned long get_value_for_name(unsigned long addr, const char *name)
{
- bool ret = false;
- bool locked;
- FILE *smaps = NULL;
- unsigned long vma_size, vma_rss;
char *line = NULL;
- char *value;
size_t size = 0;
-
- locked = is_vmflag_set(addr, LOCKED);
- if (!locked)
- goto out;
+ char *value_ptr;
+ FILE *smaps = NULL;
+ unsigned long value = -1UL;
smaps = seek_to_smaps_entry(addr);
if (!smaps) {
@@ -180,112 +121,70 @@ static bool is_vma_lock_on_fault(unsigned long addr)
}
while (getline(&line, &size, smaps) > 0) {
- if (!strstr(line, SIZE)) {
+ if (!strstr(line, name)) {
free(line);
line = NULL;
size = 0;
continue;
}
- value = line + strlen(SIZE);
- if (sscanf(value, "%lu kB", &vma_size) < 1) {
+ value_ptr = line + strlen(name);
+ if (sscanf(value_ptr, "%lu kB", &value) < 1) {
printf("Unable to parse smaps entry for Size\n");
goto out;
}
break;
}
- while (getline(&line, &size, smaps) > 0) {
- if (!strstr(line, RSS)) {
- free(line);
- line = NULL;
- size = 0;
- continue;
- }
-
- value = line + strlen(RSS);
- if (sscanf(value, "%lu kB", &vma_rss) < 1) {
- printf("Unable to parse smaps entry for Rss\n");
- goto out;
- }
- break;
- }
-
- ret = locked && (vma_rss < vma_size);
out:
- free(line);
if (smaps)
fclose(smaps);
- return ret;
+ free(line);
+ return value;
}
-#define PRESENT_BIT 0x8000000000000000ULL
-#define PFN_MASK 0x007FFFFFFFFFFFFFULL
-#define UNEVICTABLE_BIT (1UL << 18)
-
-static int lock_check(char *map)
+static bool is_vma_lock_on_fault(unsigned long addr)
{
- unsigned long page_size = getpagesize();
- uint64_t page1_flags, page2_flags;
+ bool locked;
+ unsigned long vma_size, vma_rss;
- page1_flags = get_pageflags((unsigned long)map);
- page2_flags = get_pageflags((unsigned long)map + page_size);
+ locked = is_vmflag_set(addr, LOCKED);
+ if (!locked)
+ return false;
- /* Both pages should be present */
- if (((page1_flags & PRESENT_BIT) == 0) ||
- ((page2_flags & PRESENT_BIT) == 0)) {
- printf("Failed to make both pages present\n");
- return 1;
- }
+ vma_size = get_value_for_name(addr, SIZE);
+ vma_rss = get_value_for_name(addr, RSS);
- page1_flags = get_kpageflags(page1_flags & PFN_MASK);
- page2_flags = get_kpageflags(page2_flags & PFN_MASK);
+ /* only one page is faulted in */
+ return (vma_rss < vma_size);
+}
- /* Both pages should be unevictable */
- if (((page1_flags & UNEVICTABLE_BIT) == 0) ||
- ((page2_flags & UNEVICTABLE_BIT) == 0)) {
- printf("Failed to make both pages unevictable\n");
- return 1;
- }
+#define PRESENT_BIT 0x8000000000000000ULL
+#define PFN_MASK 0x007FFFFFFFFFFFFFULL
+#define UNEVICTABLE_BIT (1UL << 18)
- if (!is_vmflag_set((unsigned long)map, LOCKED)) {
- printf("VMA flag %s is missing on page 1\n", LOCKED);
- return 1;
- }
+static int lock_check(unsigned long addr)
+{
+ bool locked;
+ unsigned long vma_size, vma_rss;
- if (!is_vmflag_set((unsigned long)map + page_size, LOCKED)) {
- printf("VMA flag %s is missing on page 2\n", LOCKED);
- return 1;
- }
+ locked = is_vmflag_set(addr, LOCKED);
+ if (!locked)
+ return false;
- return 0;
+ vma_size = get_value_for_name(addr, SIZE);
+ vma_rss = get_value_for_name(addr, RSS);
+
+ return (vma_rss == vma_size);
}
static int unlock_lock_check(char *map)
{
- unsigned long page_size = getpagesize();
- uint64_t page1_flags, page2_flags;
-
- page1_flags = get_pageflags((unsigned long)map);
- page2_flags = get_pageflags((unsigned long)map + page_size);
- page1_flags = get_kpageflags(page1_flags & PFN_MASK);
- page2_flags = get_kpageflags(page2_flags & PFN_MASK);
-
- if ((page1_flags & UNEVICTABLE_BIT) || (page2_flags & UNEVICTABLE_BIT)) {
- printf("A page is still marked unevictable after unlock\n");
- return 1;
- }
-
if (is_vmflag_set((unsigned long)map, LOCKED)) {
printf("VMA flag %s is present on page 1 after unlock\n", LOCKED);
return 1;
}
- if (is_vmflag_set((unsigned long)map + page_size, LOCKED)) {
- printf("VMA flag %s is present on page 2 after unlock\n", LOCKED);
- return 1;
- }
-
return 0;
}
@@ -311,7 +210,7 @@ static int test_mlock_lock()
goto unmap;
}
- if (lock_check(map))
+ if (!lock_check((unsigned long)map))
goto unmap;
/* Now unlock and recheck attributes */
@@ -330,64 +229,18 @@ out:
static int onfault_check(char *map)
{
- unsigned long page_size = getpagesize();
- uint64_t page1_flags, page2_flags;
-
- page1_flags = get_pageflags((unsigned long)map);
- page2_flags = get_pageflags((unsigned long)map + page_size);
-
- /* Neither page should be present */
- if ((page1_flags & PRESENT_BIT) || (page2_flags & PRESENT_BIT)) {
- printf("Pages were made present by MLOCK_ONFAULT\n");
- return 1;
- }
-
*map = 'a';
- page1_flags = get_pageflags((unsigned long)map);
- page2_flags = get_pageflags((unsigned long)map + page_size);
-
- /* Only page 1 should be present */
- if ((page1_flags & PRESENT_BIT) == 0) {
- printf("Page 1 is not present after fault\n");
- return 1;
- } else if (page2_flags & PRESENT_BIT) {
- printf("Page 2 was made present\n");
- return 1;
- }
-
- page1_flags = get_kpageflags(page1_flags & PFN_MASK);
-
- /* Page 1 should be unevictable */
- if ((page1_flags & UNEVICTABLE_BIT) == 0) {
- printf("Failed to make faulted page unevictable\n");
- return 1;
- }
-
if (!is_vma_lock_on_fault((unsigned long)map)) {
printf("VMA is not marked for lock on fault\n");
return 1;
}
- if (!is_vma_lock_on_fault((unsigned long)map + page_size)) {
- printf("VMA is not marked for lock on fault\n");
- return 1;
- }
-
return 0;
}
static int unlock_onfault_check(char *map)
{
unsigned long page_size = getpagesize();
- uint64_t page1_flags;
-
- page1_flags = get_pageflags((unsigned long)map);
- page1_flags = get_kpageflags(page1_flags & PFN_MASK);
-
- if (page1_flags & UNEVICTABLE_BIT) {
- printf("Page 1 is still marked unevictable after unlock\n");
- return 1;
- }
if (is_vma_lock_on_fault((unsigned long)map) ||
is_vma_lock_on_fault((unsigned long)map + page_size)) {
@@ -445,7 +298,6 @@ static int test_lock_onfault_of_present()
char *map;
int ret = 1;
unsigned long page_size = getpagesize();
- uint64_t page1_flags, page2_flags;
map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
@@ -465,17 +317,6 @@ static int test_lock_onfault_of_present()
goto unmap;
}
- page1_flags = get_pageflags((unsigned long)map);
- page2_flags = get_pageflags((unsigned long)map + page_size);
- page1_flags = get_kpageflags(page1_flags & PFN_MASK);
- page2_flags = get_kpageflags(page2_flags & PFN_MASK);
-
- /* Page 1 should be unevictable */
- if ((page1_flags & UNEVICTABLE_BIT) == 0) {
- printf("Failed to make present page unevictable\n");
- goto unmap;
- }
-
if (!is_vma_lock_on_fault((unsigned long)map) ||
!is_vma_lock_on_fault((unsigned long)map + page_size)) {
printf("VMA with present pages is not marked lock on fault\n");
@@ -507,7 +348,7 @@ static int test_munlockall()
goto out;
}
- if (lock_check(map))
+ if (!lock_check((unsigned long)map))
goto unmap;
if (munlockall()) {
@@ -549,7 +390,7 @@ static int test_munlockall()
goto out;
}
- if (lock_check(map))
+ if (!lock_check((unsigned long)map))
goto unmap;
if (munlockall()) {
diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
index 5d546dcdbc80..b8778960da10 100644
--- a/tools/testing/selftests/x86/protection_keys.c
+++ b/tools/testing/selftests/x86/protection_keys.c
@@ -24,6 +24,7 @@
#define _GNU_SOURCE
#include <errno.h>
#include <linux/futex.h>
+#include <time.h>
#include <sys/time.h>
#include <sys/syscall.h>
#include <string.h>
@@ -612,10 +613,10 @@ int alloc_random_pkey(void)
int nr_alloced = 0;
int random_index;
memset(alloced_pkeys, 0, sizeof(alloced_pkeys));
+ srand((unsigned int)time(NULL));
/* allocate every possible key and make a note of which ones we got */
max_nr_pkey_allocs = NR_PKEYS;
- max_nr_pkey_allocs = 1;
for (i = 0; i < max_nr_pkey_allocs; i++) {
int new_pkey = alloc_pkey();
if (new_pkey < 0)
diff --git a/tools/testing/selftests/x86/ptrace_syscall.c b/tools/testing/selftests/x86/ptrace_syscall.c
index 6f22238f3217..12aaa063196e 100644
--- a/tools/testing/selftests/x86/ptrace_syscall.c
+++ b/tools/testing/selftests/x86/ptrace_syscall.c
@@ -414,8 +414,12 @@ int main()
#if defined(__i386__) && (!defined(__GLIBC__) || __GLIBC__ > 2 || __GLIBC_MINOR__ >= 16)
vsyscall32 = (void *)getauxval(AT_SYSINFO);
- printf("[RUN]\tCheck AT_SYSINFO return regs\n");
- test_sys32_regs(do_full_vsyscall32);
+ if (vsyscall32) {
+ printf("[RUN]\tCheck AT_SYSINFO return regs\n");
+ test_sys32_regs(do_full_vsyscall32);
+ } else {
+ printf("[SKIP]\tAT_SYSINFO is not available\n");
+ }
#endif
test_ptrace_syscall_restart();
diff --git a/tools/testing/selftests/x86/syscall_nt.c b/tools/testing/selftests/x86/syscall_nt.c
index 43fcab367fb0..74e6b3fc2d09 100644
--- a/tools/testing/selftests/x86/syscall_nt.c
+++ b/tools/testing/selftests/x86/syscall_nt.c
@@ -67,6 +67,7 @@ static void do_it(unsigned long extraflags)
set_eflags(get_eflags() | extraflags);
syscall(SYS_getpid);
flags = get_eflags();
+ set_eflags(X86_EFLAGS_IF | X86_EFLAGS_FIXED);
if ((flags & extraflags) == extraflags) {
printf("[OK]\tThe syscall worked and flags are still set\n");
} else {
diff --git a/tools/usb/usbip/libsrc/usbip_host_common.c b/tools/usb/usbip/libsrc/usbip_host_common.c
index b0f7489d069d..84d0fa85939e 100644
--- a/tools/usb/usbip/libsrc/usbip_host_common.c
+++ b/tools/usb/usbip/libsrc/usbip_host_common.c
@@ -35,7 +35,7 @@
#include "list.h"
#include "sysfs_utils.h"
-struct udev *udev_context;
+extern struct udev *udev_context;
static int32_t read_attr_usbip_status(struct usbip_usb_device *udev)
{
diff --git a/tools/vm/Makefile b/tools/vm/Makefile
index 20f6cf04377f..9860622cbb15 100644
--- a/tools/vm/Makefile
+++ b/tools/vm/Makefile
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for vm tools
#
+include ../scripts/Makefile.include
+
TARGETS=page-types slabinfo page_owner_sort
LIB_DIR = ../lib/api
diff --git a/virt/kvm/arm/aarch32.c b/virt/kvm/arm/aarch32.c
index 18d6d5124397..92c9ad6c0182 100644
--- a/virt/kvm/arm/aarch32.c
+++ b/virt/kvm/arm/aarch32.c
@@ -44,6 +44,26 @@ static const u8 return_offsets[8][2] = {
[7] = { 4, 4 }, /* FIQ, unused */
};
+static bool pre_fault_synchronize(struct kvm_vcpu *vcpu)
+{
+ preempt_disable();
+ if (kvm_arm_vcpu_loaded(vcpu)) {
+ kvm_arch_vcpu_put(vcpu);
+ return true;
+ }
+
+ preempt_enable();
+ return false;
+}
+
+static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded)
+{
+ if (loaded) {
+ kvm_arch_vcpu_load(vcpu, smp_processor_id());
+ preempt_enable();
+ }
+}
+
/*
* When an exception is taken, most CPSR fields are left unchanged in the
* handler. However, some are explicitly overridden (e.g. M[4:0]).
@@ -166,7 +186,10 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
void kvm_inject_undef32(struct kvm_vcpu *vcpu)
{
+ bool loaded = pre_fault_synchronize(vcpu);
+
prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
+ post_fault_synchronize(vcpu, loaded);
}
/*
@@ -179,6 +202,9 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
u32 vect_offset;
u32 *far, *fsr;
bool is_lpae;
+ bool loaded;
+
+ loaded = pre_fault_synchronize(vcpu);
if (is_pabt) {
vect_offset = 12;
@@ -202,6 +228,8 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
/* no need to shuffle FS[4] into DFSR[10] as its 0 */
*fsr = DFSR_FSC_EXTABT_nLPAE;
}
+
+ post_fault_synchronize(vcpu, loaded);
}
void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index d982650deb33..39706799ecdf 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -574,6 +574,8 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
vcpu->arch.has_run_once = true;
+ kvm_arm_vcpu_init_debug(vcpu);
+
if (likely(irqchip_in_kernel(kvm))) {
/*
* Map the VGIC hardware resources before running a vcpu the
diff --git a/virt/kvm/arm/hyp/aarch32.c b/virt/kvm/arm/hyp/aarch32.c
index d31f267961e7..25c0e47d57cb 100644
--- a/virt/kvm/arm/hyp/aarch32.c
+++ b/virt/kvm/arm/hyp/aarch32.c
@@ -125,12 +125,16 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
*/
void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
{
+ u32 pc = *vcpu_pc(vcpu);
bool is_thumb;
is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT);
if (is_thumb && !is_wide_instr)
- *vcpu_pc(vcpu) += 2;
+ pc += 2;
else
- *vcpu_pc(vcpu) += 4;
+ pc += 4;
+
+ *vcpu_pc(vcpu) = pc;
+
kvm_adjust_itstate(vcpu);
}
diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c
index 878e0edb2e1b..ff0a1c608371 100644
--- a/virt/kvm/arm/mmio.c
+++ b/virt/kvm/arm/mmio.c
@@ -142,7 +142,7 @@ static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
bool sign_extend;
bool sixty_four;
- if (kvm_vcpu_dabt_iss1tw(vcpu)) {
+ if (kvm_vcpu_abt_iss1tw(vcpu)) {
/* page table accesses IO mem: tell guest to fix its TTBR */
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
return 1;
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index a5bc10d30618..11103b75c596 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -323,7 +323,8 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
* destroying the VM), otherwise another faulting VCPU may come in and mess
* with things behind our backs.
*/
-static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
+static void __unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size,
+ bool may_block)
{
pgd_t *pgd;
phys_addr_t addr = start, end = start + size;
@@ -348,11 +349,16 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
* If the range is too large, release the kvm->mmu_lock
* to prevent starvation and lockup detector warnings.
*/
- if (next != end)
+ if (may_block && next != end)
cond_resched_lock(&kvm->mmu_lock);
} while (pgd++, addr = next, addr != end);
}
+static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
+{
+ __unmap_stage2_range(kvm, start, size, true);
+}
+
static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
phys_addr_t addr, phys_addr_t end)
{
@@ -1276,6 +1282,9 @@ static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
{
+ if (kvm_vcpu_abt_iss1tw(vcpu))
+ return true;
+
if (kvm_vcpu_trap_is_iabt(vcpu))
return false;
@@ -1490,7 +1499,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
unsigned long flags = 0;
write_fault = kvm_is_write_fault(vcpu);
- exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
+ exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
VM_BUG_ON(write_fault && exec_fault);
if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
@@ -1820,18 +1829,20 @@ static int handle_hva_to_gpa(struct kvm *kvm,
static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
{
- unmap_stage2_range(kvm, gpa, size);
+ bool may_block = *(bool *)data;
+
+ __unmap_stage2_range(kvm, gpa, size, may_block);
return 0;
}
int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end)
+ unsigned long start, unsigned long end, bool blockable)
{
if (!kvm->arch.pgd)
return 0;
trace_kvm_unmap_hva_range(start, end);
- handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
+ handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &blockable);
return 0;
}
@@ -2069,7 +2080,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
* Prevent userspace from creating a memory region outside of the IPA
* space addressable by the KVM guest IPA space.
*/
- if (memslot->base_gfn + memslot->npages >=
+ if (memslot->base_gfn + memslot->npages >
(KVM_PHYS_SIZE >> PAGE_SHIFT))
return -EFAULT;
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index cd75df25fe14..2fc1777da50d 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -187,6 +187,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
break;
default:
kfree(dist->spis);
+ dist->spis = NULL;
return -EINVAL;
}
}
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 9295addea7ec..f139b1c62ca3 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -107,14 +107,21 @@ out_unlock:
* We "cache" the configuration table entries in our struct vgic_irq's.
* However we only have those structs for mapped IRQs, so we read in
* the respective config data from memory here upon mapping the LPI.
+ *
+ * Should any of these fail, behave as if we couldn't create the LPI
+ * by dropping the refcount and returning the error.
*/
ret = update_lpi_config(kvm, irq, NULL, false);
- if (ret)
+ if (ret) {
+ vgic_put_irq(kvm, irq);
return ERR_PTR(ret);
+ }
ret = vgic_v3_lpi_sync_pending_status(kvm, irq);
- if (ret)
+ if (ret) {
+ vgic_put_irq(kvm, irq);
return ERR_PTR(ret);
+ }
return irq;
}
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index a2a175b08b17..cbb38a0d1b25 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -226,6 +226,23 @@ static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu,
return extract_bytes(value, addr & 7, len);
}
+static unsigned long vgic_uaccess_read_v3r_typer(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
+{
+ unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
+ int target_vcpu_id = vcpu->vcpu_id;
+ u64 value;
+
+ value = (u64)(mpidr & GENMASK(23, 0)) << 32;
+ value |= ((target_vcpu_id & 0xffff) << 8);
+
+ if (vgic_has_its(vcpu->kvm))
+ value |= GICR_TYPER_PLPIS;
+
+ /* reporting of the Last bit is not supported for userspace */
+ return extract_bytes(value, addr & 7, len);
+}
+
static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len)
{
@@ -532,8 +549,9 @@ static const struct vgic_register_region vgic_v3_rdbase_registers[] = {
REGISTER_DESC_WITH_LENGTH(GICR_IIDR,
vgic_mmio_read_v3r_iidr, vgic_mmio_write_wi, 4,
VGIC_ACCESS_32bit),
- REGISTER_DESC_WITH_LENGTH(GICR_TYPER,
- vgic_mmio_read_v3r_typer, vgic_mmio_write_wi, 8,
+ REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_TYPER,
+ vgic_mmio_read_v3r_typer, vgic_mmio_write_wi,
+ vgic_uaccess_read_v3r_typer, vgic_mmio_uaccess_write_wi, 8,
VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH(GICR_WAKER,
vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index 762f81900529..9d06a1f8e6c0 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -381,7 +381,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
{
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
- intid > VGIC_NR_PRIVATE_IRQS)
+ intid >= VGIC_NR_PRIVATE_IRQS)
kvm_arm_halt_guest(vcpu->kvm);
}
@@ -389,7 +389,7 @@ static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
{
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
- intid > VGIC_NR_PRIVATE_IRQS)
+ intid >= VGIC_NR_PRIVATE_IRQS)
kvm_arm_resume_guest(vcpu->kvm);
}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 4e499b78569b..1ecb27b3421a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -52,9 +52,9 @@
#include <linux/sort.h>
#include <linux/bsearch.h>
#include <linux/kthread.h>
+#include <linux/io.h>
#include <asm/processor.h>
-#include <asm/io.h>
#include <asm/ioctl.h>
#include <linux/uaccess.h>
#include <asm/pgtable.h>
@@ -141,10 +141,9 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
static unsigned long long kvm_createvm_count;
static unsigned long long kvm_active_vms;
-__weak int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
- unsigned long start, unsigned long end, bool blockable)
+__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+ unsigned long start, unsigned long end)
{
- return 0;
}
bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)
@@ -170,6 +169,7 @@ bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
*/
if (pfn_valid(pfn))
return PageReserved(pfn_to_page(pfn)) &&
+ !is_zero_pfn(pfn) &&
!kvm_is_zone_device_pfn(pfn);
return true;
@@ -366,6 +366,18 @@ static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
return container_of(mn, struct kvm, mmu_notifier);
}
+static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ struct kvm *kvm = mmu_notifier_to_kvm(mn);
+ int idx;
+
+ idx = srcu_read_lock(&kvm->srcu);
+ kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
+ srcu_read_unlock(&kvm->srcu, idx);
+}
+
static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address,
@@ -390,7 +402,6 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int need_tlb_flush = 0, idx;
- int ret;
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
@@ -400,19 +411,15 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
* count is also read inside the mmu_lock critical section.
*/
kvm->mmu_notifier_count++;
- need_tlb_flush = kvm_unmap_hva_range(kvm, start, end);
- need_tlb_flush |= kvm->tlbs_dirty;
+ need_tlb_flush = kvm_unmap_hva_range(kvm, start, end, blockable);
/* we've to flush the tlb before the pages can be freed */
- if (need_tlb_flush)
+ if (need_tlb_flush || kvm->tlbs_dirty)
kvm_flush_remote_tlbs(kvm);
spin_unlock(&kvm->mmu_lock);
-
- ret = kvm_arch_mmu_notifier_invalidate_range(kvm, start, end, blockable);
-
srcu_read_unlock(&kvm->srcu, idx);
- return ret;
+ return 0;
}
static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
@@ -521,6 +528,7 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
.flags = MMU_INVALIDATE_DOES_NOT_BLOCK,
+ .invalidate_range = kvm_mmu_notifier_invalidate_range,
.invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
.invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
.clear_flush_young = kvm_mmu_notifier_clear_flush_young,
@@ -1705,6 +1713,153 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
}
EXPORT_SYMBOL_GPL(gfn_to_page);
+void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache)
+{
+ if (pfn == 0)
+ return;
+
+ if (cache)
+ cache->pfn = cache->gfn = 0;
+
+ if (dirty)
+ kvm_release_pfn_dirty(pfn);
+ else
+ kvm_release_pfn_clean(pfn);
+}
+
+static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn,
+ struct gfn_to_pfn_cache *cache, u64 gen)
+{
+ kvm_release_pfn(cache->pfn, cache->dirty, cache);
+
+ cache->pfn = gfn_to_pfn_memslot(slot, gfn);
+ cache->gfn = gfn;
+ cache->dirty = false;
+ cache->generation = gen;
+}
+
+static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
+ struct kvm_host_map *map,
+ struct gfn_to_pfn_cache *cache,
+ bool atomic)
+{
+ kvm_pfn_t pfn;
+ void *hva = NULL;
+ struct page *page = KVM_UNMAPPED_PAGE;
+ struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
+ u64 gen = slots->generation;
+
+ if (!map)
+ return -EINVAL;
+
+ if (cache) {
+ if (!cache->pfn || cache->gfn != gfn ||
+ cache->generation != gen) {
+ if (atomic)
+ return -EAGAIN;
+ kvm_cache_gfn_to_pfn(slot, gfn, cache, gen);
+ }
+ pfn = cache->pfn;
+ } else {
+ if (atomic)
+ return -EAGAIN;
+ pfn = gfn_to_pfn_memslot(slot, gfn);
+ }
+ if (is_error_noslot_pfn(pfn))
+ return -EINVAL;
+
+ if (pfn_valid(pfn)) {
+ page = pfn_to_page(pfn);
+ if (atomic)
+ hva = kmap_atomic(page);
+ else
+ hva = kmap(page);
+#ifdef CONFIG_HAS_IOMEM
+ } else if (!atomic) {
+ hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
+ } else {
+ return -EINVAL;
+#endif
+ }
+
+ if (!hva)
+ return -EFAULT;
+
+ map->page = page;
+ map->hva = hva;
+ map->pfn = pfn;
+ map->gfn = gfn;
+
+ return 0;
+}
+
+int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
+ struct gfn_to_pfn_cache *cache, bool atomic)
+{
+ return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map,
+ cache, atomic);
+}
+EXPORT_SYMBOL_GPL(kvm_map_gfn);
+
+int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
+{
+ return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map,
+ NULL, false);
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_map);
+
+static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
+ struct kvm_host_map *map,
+ struct gfn_to_pfn_cache *cache,
+ bool dirty, bool atomic)
+{
+ if (!map)
+ return;
+
+ if (!map->hva)
+ return;
+
+ if (map->page != KVM_UNMAPPED_PAGE) {
+ if (atomic)
+ kunmap_atomic(map->hva);
+ else
+ kunmap(map->page);
+ }
+#ifdef CONFIG_HAS_IOMEM
+ else if (!atomic)
+ memunmap(map->hva);
+ else
+ WARN_ONCE(1, "Unexpected unmapping in atomic context");
+#endif
+
+ if (dirty)
+ mark_page_dirty_in_slot(memslot, map->gfn);
+
+ if (cache)
+ cache->dirty |= dirty;
+ else
+ kvm_release_pfn(map->pfn, dirty, NULL);
+
+ map->hva = NULL;
+ map->page = NULL;
+}
+
+int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
+ struct gfn_to_pfn_cache *cache, bool dirty, bool atomic)
+{
+ __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map,
+ cache, dirty, atomic);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
+
+void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
+{
+ __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, NULL,
+ dirty, false);
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
+
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
{
kvm_pfn_t pfn;
@@ -3689,7 +3844,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
struct kvm_io_device *dev)
{
- int i;
+ int i, j;
struct kvm_io_bus *new_bus, *bus;
bus = kvm_get_bus(kvm, bus_idx);
@@ -3706,17 +3861,20 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) *
sizeof(struct kvm_io_range)), GFP_KERNEL);
- if (!new_bus) {
+ if (new_bus) {
+ memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
+ new_bus->dev_count--;
+ memcpy(new_bus->range + i, bus->range + i + 1,
+ (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
+ } else {
pr_err("kvm: failed to shrink bus, removing it completely\n");
- goto broken;
+ for (j = 0; j < bus->dev_count; j++) {
+ if (j == i)
+ continue;
+ kvm_iodevice_destructor(bus->range[j].dev);
+ }
}
- memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
- new_bus->dev_count--;
- memcpy(new_bus->range + i, bus->range + i + 1,
- (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
-
-broken:
rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
synchronize_srcu_expedited(&kvm->srcu);
kfree(bus);